1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
6 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
10 #include <linux/clk-provider.h>
11 #include <linux/clk/clk-conf.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/spinlock.h>
15 #include <linux/err.h>
16 #include <linux/list.h>
17 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/sched.h>
23 #include <linux/clkdev.h>
27 static DEFINE_SPINLOCK(enable_lock
);
28 static DEFINE_MUTEX(prepare_lock
);
30 static struct task_struct
*prepare_owner
;
31 static struct task_struct
*enable_owner
;
33 static int prepare_refcnt
;
34 static int enable_refcnt
;
36 static HLIST_HEAD(clk_root_list
);
37 static HLIST_HEAD(clk_orphan_list
);
38 static LIST_HEAD(clk_notifier_list
);
40 /*** private data structures ***/
44 const struct clk_ops
*ops
;
48 struct clk_core
*parent
;
49 const char **parent_names
;
50 struct clk_core
**parents
;
54 unsigned long req_rate
;
55 unsigned long new_rate
;
56 struct clk_core
*new_parent
;
57 struct clk_core
*new_child
;
61 unsigned int enable_count
;
62 unsigned int prepare_count
;
63 unsigned int protect_count
;
64 unsigned long min_rate
;
65 unsigned long max_rate
;
66 unsigned long accuracy
;
69 struct hlist_head children
;
70 struct hlist_node child_node
;
71 struct hlist_head clks
;
72 unsigned int notifier_count
;
73 #ifdef CONFIG_DEBUG_FS
74 struct dentry
*dentry
;
75 struct hlist_node debug_node
;
80 #define CREATE_TRACE_POINTS
81 #include <trace/events/clk.h>
84 struct clk_core
*core
;
88 unsigned long min_rate
;
89 unsigned long max_rate
;
90 unsigned int exclusive_count
;
91 struct hlist_node clks_node
;
95 static int clk_pm_runtime_get(struct clk_core
*core
)
99 if (!core
->rpm_enabled
)
102 ret
= pm_runtime_get_sync(core
->dev
);
103 return ret
< 0 ? ret
: 0;
106 static void clk_pm_runtime_put(struct clk_core
*core
)
108 if (!core
->rpm_enabled
)
111 pm_runtime_put_sync(core
->dev
);
115 static void clk_prepare_lock(void)
117 if (!mutex_trylock(&prepare_lock
)) {
118 if (prepare_owner
== current
) {
122 mutex_lock(&prepare_lock
);
124 WARN_ON_ONCE(prepare_owner
!= NULL
);
125 WARN_ON_ONCE(prepare_refcnt
!= 0);
126 prepare_owner
= current
;
130 static void clk_prepare_unlock(void)
132 WARN_ON_ONCE(prepare_owner
!= current
);
133 WARN_ON_ONCE(prepare_refcnt
== 0);
135 if (--prepare_refcnt
)
137 prepare_owner
= NULL
;
138 mutex_unlock(&prepare_lock
);
141 static unsigned long clk_enable_lock(void)
142 __acquires(enable_lock
)
147 * On UP systems, spin_trylock_irqsave() always returns true, even if
148 * we already hold the lock. So, in that case, we rely only on
149 * reference counting.
151 if (!IS_ENABLED(CONFIG_SMP
) ||
152 !spin_trylock_irqsave(&enable_lock
, flags
)) {
153 if (enable_owner
== current
) {
155 __acquire(enable_lock
);
156 if (!IS_ENABLED(CONFIG_SMP
))
157 local_save_flags(flags
);
160 spin_lock_irqsave(&enable_lock
, flags
);
162 WARN_ON_ONCE(enable_owner
!= NULL
);
163 WARN_ON_ONCE(enable_refcnt
!= 0);
164 enable_owner
= current
;
169 static void clk_enable_unlock(unsigned long flags
)
170 __releases(enable_lock
)
172 WARN_ON_ONCE(enable_owner
!= current
);
173 WARN_ON_ONCE(enable_refcnt
== 0);
175 if (--enable_refcnt
) {
176 __release(enable_lock
);
180 spin_unlock_irqrestore(&enable_lock
, flags
);
183 static bool clk_core_rate_is_protected(struct clk_core
*core
)
185 return core
->protect_count
;
188 static bool clk_core_is_prepared(struct clk_core
*core
)
193 * .is_prepared is optional for clocks that can prepare
194 * fall back to software usage counter if it is missing
196 if (!core
->ops
->is_prepared
)
197 return core
->prepare_count
;
199 if (!clk_pm_runtime_get(core
)) {
200 ret
= core
->ops
->is_prepared(core
->hw
);
201 clk_pm_runtime_put(core
);
207 static bool clk_core_is_enabled(struct clk_core
*core
)
212 * .is_enabled is only mandatory for clocks that gate
213 * fall back to software usage counter if .is_enabled is missing
215 if (!core
->ops
->is_enabled
)
216 return core
->enable_count
;
219 * Check if clock controller's device is runtime active before
220 * calling .is_enabled callback. If not, assume that clock is
221 * disabled, because we might be called from atomic context, from
222 * which pm_runtime_get() is not allowed.
223 * This function is called mainly from clk_disable_unused_subtree,
224 * which ensures proper runtime pm activation of controller before
225 * taking enable spinlock, but the below check is needed if one tries
226 * to call it from other places.
228 if (core
->rpm_enabled
) {
229 pm_runtime_get_noresume(core
->dev
);
230 if (!pm_runtime_active(core
->dev
)) {
236 ret
= core
->ops
->is_enabled(core
->hw
);
238 if (core
->rpm_enabled
)
239 pm_runtime_put(core
->dev
);
244 /*** helper functions ***/
246 const char *__clk_get_name(const struct clk
*clk
)
248 return !clk
? NULL
: clk
->core
->name
;
250 EXPORT_SYMBOL_GPL(__clk_get_name
);
252 const char *clk_hw_get_name(const struct clk_hw
*hw
)
254 return hw
->core
->name
;
256 EXPORT_SYMBOL_GPL(clk_hw_get_name
);
258 struct clk_hw
*__clk_get_hw(struct clk
*clk
)
260 return !clk
? NULL
: clk
->core
->hw
;
262 EXPORT_SYMBOL_GPL(__clk_get_hw
);
264 unsigned int clk_hw_get_num_parents(const struct clk_hw
*hw
)
266 return hw
->core
->num_parents
;
268 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents
);
270 struct clk_hw
*clk_hw_get_parent(const struct clk_hw
*hw
)
272 return hw
->core
->parent
? hw
->core
->parent
->hw
: NULL
;
274 EXPORT_SYMBOL_GPL(clk_hw_get_parent
);
276 static struct clk_core
*__clk_lookup_subtree(const char *name
,
277 struct clk_core
*core
)
279 struct clk_core
*child
;
280 struct clk_core
*ret
;
282 if (!strcmp(core
->name
, name
))
285 hlist_for_each_entry(child
, &core
->children
, child_node
) {
286 ret
= __clk_lookup_subtree(name
, child
);
294 static struct clk_core
*clk_core_lookup(const char *name
)
296 struct clk_core
*root_clk
;
297 struct clk_core
*ret
;
302 /* search the 'proper' clk tree first */
303 hlist_for_each_entry(root_clk
, &clk_root_list
, child_node
) {
304 ret
= __clk_lookup_subtree(name
, root_clk
);
309 /* if not found, then search the orphan tree */
310 hlist_for_each_entry(root_clk
, &clk_orphan_list
, child_node
) {
311 ret
= __clk_lookup_subtree(name
, root_clk
);
319 static struct clk_core
*clk_core_get_parent_by_index(struct clk_core
*core
,
322 if (!core
|| index
>= core
->num_parents
)
325 if (!core
->parents
[index
])
326 core
->parents
[index
] =
327 clk_core_lookup(core
->parent_names
[index
]);
329 return core
->parents
[index
];
333 clk_hw_get_parent_by_index(const struct clk_hw
*hw
, unsigned int index
)
335 struct clk_core
*parent
;
337 parent
= clk_core_get_parent_by_index(hw
->core
, index
);
339 return !parent
? NULL
: parent
->hw
;
341 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index
);
343 unsigned int __clk_get_enable_count(struct clk
*clk
)
345 return !clk
? 0 : clk
->core
->enable_count
;
348 static unsigned long clk_core_get_rate_nolock(struct clk_core
*core
)
359 if (!core
->num_parents
)
369 unsigned long clk_hw_get_rate(const struct clk_hw
*hw
)
371 return clk_core_get_rate_nolock(hw
->core
);
373 EXPORT_SYMBOL_GPL(clk_hw_get_rate
);
375 static unsigned long __clk_get_accuracy(struct clk_core
*core
)
380 return core
->accuracy
;
383 unsigned long __clk_get_flags(struct clk
*clk
)
385 return !clk
? 0 : clk
->core
->flags
;
387 EXPORT_SYMBOL_GPL(__clk_get_flags
);
389 unsigned long clk_hw_get_flags(const struct clk_hw
*hw
)
391 return hw
->core
->flags
;
393 EXPORT_SYMBOL_GPL(clk_hw_get_flags
);
395 bool clk_hw_is_prepared(const struct clk_hw
*hw
)
397 return clk_core_is_prepared(hw
->core
);
399 EXPORT_SYMBOL_GPL(clk_hw_is_prepared
);
401 bool clk_hw_rate_is_protected(const struct clk_hw
*hw
)
403 return clk_core_rate_is_protected(hw
->core
);
405 EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected
);
407 bool clk_hw_is_enabled(const struct clk_hw
*hw
)
409 return clk_core_is_enabled(hw
->core
);
411 EXPORT_SYMBOL_GPL(clk_hw_is_enabled
);
413 bool __clk_is_enabled(struct clk
*clk
)
418 return clk_core_is_enabled(clk
->core
);
420 EXPORT_SYMBOL_GPL(__clk_is_enabled
);
422 static bool mux_is_better_rate(unsigned long rate
, unsigned long now
,
423 unsigned long best
, unsigned long flags
)
425 if (flags
& CLK_MUX_ROUND_CLOSEST
)
426 return abs(now
- rate
) < abs(best
- rate
);
428 return now
<= rate
&& now
> best
;
431 int clk_mux_determine_rate_flags(struct clk_hw
*hw
,
432 struct clk_rate_request
*req
,
435 struct clk_core
*core
= hw
->core
, *parent
, *best_parent
= NULL
;
436 int i
, num_parents
, ret
;
437 unsigned long best
= 0;
438 struct clk_rate_request parent_req
= *req
;
440 /* if NO_REPARENT flag set, pass through to current parent */
441 if (core
->flags
& CLK_SET_RATE_NO_REPARENT
) {
442 parent
= core
->parent
;
443 if (core
->flags
& CLK_SET_RATE_PARENT
) {
444 ret
= __clk_determine_rate(parent
? parent
->hw
: NULL
,
449 best
= parent_req
.rate
;
451 best
= clk_core_get_rate_nolock(parent
);
453 best
= clk_core_get_rate_nolock(core
);
459 /* find the parent that can provide the fastest rate <= rate */
460 num_parents
= core
->num_parents
;
461 for (i
= 0; i
< num_parents
; i
++) {
462 parent
= clk_core_get_parent_by_index(core
, i
);
466 if (core
->flags
& CLK_SET_RATE_PARENT
) {
468 ret
= __clk_determine_rate(parent
->hw
, &parent_req
);
472 parent_req
.rate
= clk_core_get_rate_nolock(parent
);
475 if (mux_is_better_rate(req
->rate
, parent_req
.rate
,
477 best_parent
= parent
;
478 best
= parent_req
.rate
;
487 req
->best_parent_hw
= best_parent
->hw
;
488 req
->best_parent_rate
= best
;
493 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags
);
495 struct clk
*__clk_lookup(const char *name
)
497 struct clk_core
*core
= clk_core_lookup(name
);
499 return !core
? NULL
: core
->hw
->clk
;
502 static void clk_core_get_boundaries(struct clk_core
*core
,
503 unsigned long *min_rate
,
504 unsigned long *max_rate
)
506 struct clk
*clk_user
;
508 *min_rate
= core
->min_rate
;
509 *max_rate
= core
->max_rate
;
511 hlist_for_each_entry(clk_user
, &core
->clks
, clks_node
)
512 *min_rate
= max(*min_rate
, clk_user
->min_rate
);
514 hlist_for_each_entry(clk_user
, &core
->clks
, clks_node
)
515 *max_rate
= min(*max_rate
, clk_user
->max_rate
);
518 void clk_hw_set_rate_range(struct clk_hw
*hw
, unsigned long min_rate
,
519 unsigned long max_rate
)
521 hw
->core
->min_rate
= min_rate
;
522 hw
->core
->max_rate
= max_rate
;
524 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range
);
527 * Helper for finding best parent to provide a given frequency. This can be used
528 * directly as a determine_rate callback (e.g. for a mux), or from a more
529 * complex clock that may combine a mux with other operations.
531 int __clk_mux_determine_rate(struct clk_hw
*hw
,
532 struct clk_rate_request
*req
)
534 return clk_mux_determine_rate_flags(hw
, req
, 0);
536 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate
);
538 int __clk_mux_determine_rate_closest(struct clk_hw
*hw
,
539 struct clk_rate_request
*req
)
541 return clk_mux_determine_rate_flags(hw
, req
, CLK_MUX_ROUND_CLOSEST
);
543 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest
);
547 static void clk_core_rate_unprotect(struct clk_core
*core
)
549 lockdep_assert_held(&prepare_lock
);
554 if (WARN(core
->protect_count
== 0,
555 "%s already unprotected\n", core
->name
))
558 if (--core
->protect_count
> 0)
561 clk_core_rate_unprotect(core
->parent
);
564 static int clk_core_rate_nuke_protect(struct clk_core
*core
)
568 lockdep_assert_held(&prepare_lock
);
573 if (core
->protect_count
== 0)
576 ret
= core
->protect_count
;
577 core
->protect_count
= 1;
578 clk_core_rate_unprotect(core
);
584 * clk_rate_exclusive_put - release exclusivity over clock rate control
585 * @clk: the clk over which the exclusivity is released
587 * clk_rate_exclusive_put() completes a critical section during which a clock
588 * consumer cannot tolerate any other consumer making any operation on the
589 * clock which could result in a rate change or rate glitch. Exclusive clocks
590 * cannot have their rate changed, either directly or indirectly due to changes
591 * further up the parent chain of clocks. As a result, clocks up parent chain
592 * also get under exclusive control of the calling consumer.
594 * If exlusivity is claimed more than once on clock, even by the same consumer,
595 * the rate effectively gets locked as exclusivity can't be preempted.
597 * Calls to clk_rate_exclusive_put() must be balanced with calls to
598 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
601 void clk_rate_exclusive_put(struct clk
*clk
)
609 * if there is something wrong with this consumer protect count, stop
610 * here before messing with the provider
612 if (WARN_ON(clk
->exclusive_count
<= 0))
615 clk_core_rate_unprotect(clk
->core
);
616 clk
->exclusive_count
--;
618 clk_prepare_unlock();
620 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put
);
622 static void clk_core_rate_protect(struct clk_core
*core
)
624 lockdep_assert_held(&prepare_lock
);
629 if (core
->protect_count
== 0)
630 clk_core_rate_protect(core
->parent
);
632 core
->protect_count
++;
635 static void clk_core_rate_restore_protect(struct clk_core
*core
, int count
)
637 lockdep_assert_held(&prepare_lock
);
645 clk_core_rate_protect(core
);
646 core
->protect_count
= count
;
650 * clk_rate_exclusive_get - get exclusivity over the clk rate control
651 * @clk: the clk over which the exclusity of rate control is requested
653 * clk_rate_exlusive_get() begins a critical section during which a clock
654 * consumer cannot tolerate any other consumer making any operation on the
655 * clock which could result in a rate change or rate glitch. Exclusive clocks
656 * cannot have their rate changed, either directly or indirectly due to changes
657 * further up the parent chain of clocks. As a result, clocks up parent chain
658 * also get under exclusive control of the calling consumer.
660 * If exlusivity is claimed more than once on clock, even by the same consumer,
661 * the rate effectively gets locked as exclusivity can't be preempted.
663 * Calls to clk_rate_exclusive_get() should be balanced with calls to
664 * clk_rate_exclusive_put(). Calls to this function may sleep.
665 * Returns 0 on success, -EERROR otherwise
667 int clk_rate_exclusive_get(struct clk
*clk
)
673 clk_core_rate_protect(clk
->core
);
674 clk
->exclusive_count
++;
675 clk_prepare_unlock();
679 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get
);
681 static void clk_core_unprepare(struct clk_core
*core
)
683 lockdep_assert_held(&prepare_lock
);
688 if (WARN(core
->prepare_count
== 0,
689 "%s already unprepared\n", core
->name
))
692 if (WARN(core
->prepare_count
== 1 && core
->flags
& CLK_IS_CRITICAL
,
693 "Unpreparing critical %s\n", core
->name
))
696 if (core
->flags
& CLK_SET_RATE_GATE
)
697 clk_core_rate_unprotect(core
);
699 if (--core
->prepare_count
> 0)
702 WARN(core
->enable_count
> 0, "Unpreparing enabled %s\n", core
->name
);
704 trace_clk_unprepare(core
);
706 if (core
->ops
->unprepare
)
707 core
->ops
->unprepare(core
->hw
);
709 clk_pm_runtime_put(core
);
711 trace_clk_unprepare_complete(core
);
712 clk_core_unprepare(core
->parent
);
715 static void clk_core_unprepare_lock(struct clk_core
*core
)
718 clk_core_unprepare(core
);
719 clk_prepare_unlock();
723 * clk_unprepare - undo preparation of a clock source
724 * @clk: the clk being unprepared
726 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
727 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
728 * if the operation may sleep. One example is a clk which is accessed over
729 * I2c. In the complex case a clk gate operation may require a fast and a slow
730 * part. It is this reason that clk_unprepare and clk_disable are not mutually
731 * exclusive. In fact clk_disable must be called before clk_unprepare.
733 void clk_unprepare(struct clk
*clk
)
735 if (IS_ERR_OR_NULL(clk
))
738 clk_core_unprepare_lock(clk
->core
);
740 EXPORT_SYMBOL_GPL(clk_unprepare
);
742 static int clk_core_prepare(struct clk_core
*core
)
746 lockdep_assert_held(&prepare_lock
);
751 if (core
->prepare_count
== 0) {
752 ret
= clk_pm_runtime_get(core
);
756 ret
= clk_core_prepare(core
->parent
);
760 trace_clk_prepare(core
);
762 if (core
->ops
->prepare
)
763 ret
= core
->ops
->prepare(core
->hw
);
765 trace_clk_prepare_complete(core
);
771 core
->prepare_count
++;
774 * CLK_SET_RATE_GATE is a special case of clock protection
775 * Instead of a consumer claiming exclusive rate control, it is
776 * actually the provider which prevents any consumer from making any
777 * operation which could result in a rate change or rate glitch while
778 * the clock is prepared.
780 if (core
->flags
& CLK_SET_RATE_GATE
)
781 clk_core_rate_protect(core
);
785 clk_core_unprepare(core
->parent
);
787 clk_pm_runtime_put(core
);
791 static int clk_core_prepare_lock(struct clk_core
*core
)
796 ret
= clk_core_prepare(core
);
797 clk_prepare_unlock();
803 * clk_prepare - prepare a clock source
804 * @clk: the clk being prepared
806 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
807 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
808 * operation may sleep. One example is a clk which is accessed over I2c. In
809 * the complex case a clk ungate operation may require a fast and a slow part.
810 * It is this reason that clk_prepare and clk_enable are not mutually
811 * exclusive. In fact clk_prepare must be called before clk_enable.
812 * Returns 0 on success, -EERROR otherwise.
814 int clk_prepare(struct clk
*clk
)
819 return clk_core_prepare_lock(clk
->core
);
821 EXPORT_SYMBOL_GPL(clk_prepare
);
823 static void clk_core_disable(struct clk_core
*core
)
825 lockdep_assert_held(&enable_lock
);
830 if (WARN(core
->enable_count
== 0, "%s already disabled\n", core
->name
))
833 if (WARN(core
->enable_count
== 1 && core
->flags
& CLK_IS_CRITICAL
,
834 "Disabling critical %s\n", core
->name
))
837 if (--core
->enable_count
> 0)
840 trace_clk_disable_rcuidle(core
);
842 if (core
->ops
->disable
)
843 core
->ops
->disable(core
->hw
);
845 trace_clk_disable_complete_rcuidle(core
);
847 clk_core_disable(core
->parent
);
850 static void clk_core_disable_lock(struct clk_core
*core
)
854 flags
= clk_enable_lock();
855 clk_core_disable(core
);
856 clk_enable_unlock(flags
);
860 * clk_disable - gate a clock
861 * @clk: the clk being gated
863 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
864 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
865 * clk if the operation is fast and will never sleep. One example is a
866 * SoC-internal clk which is controlled via simple register writes. In the
867 * complex case a clk gate operation may require a fast and a slow part. It is
868 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
869 * In fact clk_disable must be called before clk_unprepare.
871 void clk_disable(struct clk
*clk
)
873 if (IS_ERR_OR_NULL(clk
))
876 clk_core_disable_lock(clk
->core
);
878 EXPORT_SYMBOL_GPL(clk_disable
);
880 static int clk_core_enable(struct clk_core
*core
)
884 lockdep_assert_held(&enable_lock
);
889 if (WARN(core
->prepare_count
== 0,
890 "Enabling unprepared %s\n", core
->name
))
893 if (core
->enable_count
== 0) {
894 ret
= clk_core_enable(core
->parent
);
899 trace_clk_enable_rcuidle(core
);
901 if (core
->ops
->enable
)
902 ret
= core
->ops
->enable(core
->hw
);
904 trace_clk_enable_complete_rcuidle(core
);
907 clk_core_disable(core
->parent
);
912 core
->enable_count
++;
916 static int clk_core_enable_lock(struct clk_core
*core
)
921 flags
= clk_enable_lock();
922 ret
= clk_core_enable(core
);
923 clk_enable_unlock(flags
);
929 * clk_gate_restore_context - restore context for poweroff
930 * @hw: the clk_hw pointer of clock whose state is to be restored
932 * The clock gate restore context function enables or disables
933 * the gate clocks based on the enable_count. This is done in cases
934 * where the clock context is lost and based on the enable_count
935 * the clock either needs to be enabled/disabled. This
936 * helps restore the state of gate clocks.
938 void clk_gate_restore_context(struct clk_hw
*hw
)
940 struct clk_core
*core
= hw
->core
;
942 if (core
->enable_count
)
943 core
->ops
->enable(hw
);
945 core
->ops
->disable(hw
);
947 EXPORT_SYMBOL_GPL(clk_gate_restore_context
);
949 static int clk_core_save_context(struct clk_core
*core
)
951 struct clk_core
*child
;
954 hlist_for_each_entry(child
, &core
->children
, child_node
) {
955 ret
= clk_core_save_context(child
);
960 if (core
->ops
&& core
->ops
->save_context
)
961 ret
= core
->ops
->save_context(core
->hw
);
966 static void clk_core_restore_context(struct clk_core
*core
)
968 struct clk_core
*child
;
970 if (core
->ops
&& core
->ops
->restore_context
)
971 core
->ops
->restore_context(core
->hw
);
973 hlist_for_each_entry(child
, &core
->children
, child_node
)
974 clk_core_restore_context(child
);
978 * clk_save_context - save clock context for poweroff
980 * Saves the context of the clock register for powerstates in which the
981 * contents of the registers will be lost. Occurs deep within the suspend
982 * code. Returns 0 on success.
984 int clk_save_context(void)
986 struct clk_core
*clk
;
989 hlist_for_each_entry(clk
, &clk_root_list
, child_node
) {
990 ret
= clk_core_save_context(clk
);
995 hlist_for_each_entry(clk
, &clk_orphan_list
, child_node
) {
996 ret
= clk_core_save_context(clk
);
1003 EXPORT_SYMBOL_GPL(clk_save_context
);
1006 * clk_restore_context - restore clock context after poweroff
1008 * Restore the saved clock context upon resume.
1011 void clk_restore_context(void)
1013 struct clk_core
*core
;
1015 hlist_for_each_entry(core
, &clk_root_list
, child_node
)
1016 clk_core_restore_context(core
);
1018 hlist_for_each_entry(core
, &clk_orphan_list
, child_node
)
1019 clk_core_restore_context(core
);
1021 EXPORT_SYMBOL_GPL(clk_restore_context
);
1024 * clk_enable - ungate a clock
1025 * @clk: the clk being ungated
1027 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
1028 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1029 * if the operation will never sleep. One example is a SoC-internal clk which
1030 * is controlled via simple register writes. In the complex case a clk ungate
1031 * operation may require a fast and a slow part. It is this reason that
1032 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
1033 * must be called before clk_enable. Returns 0 on success, -EERROR
1036 int clk_enable(struct clk
*clk
)
1041 return clk_core_enable_lock(clk
->core
);
1043 EXPORT_SYMBOL_GPL(clk_enable
);
1045 static int clk_core_prepare_enable(struct clk_core
*core
)
1049 ret
= clk_core_prepare_lock(core
);
1053 ret
= clk_core_enable_lock(core
);
1055 clk_core_unprepare_lock(core
);
1060 static void clk_core_disable_unprepare(struct clk_core
*core
)
1062 clk_core_disable_lock(core
);
1063 clk_core_unprepare_lock(core
);
1066 static void clk_unprepare_unused_subtree(struct clk_core
*core
)
1068 struct clk_core
*child
;
1070 lockdep_assert_held(&prepare_lock
);
1072 hlist_for_each_entry(child
, &core
->children
, child_node
)
1073 clk_unprepare_unused_subtree(child
);
1075 if (core
->prepare_count
)
1078 if (core
->flags
& CLK_IGNORE_UNUSED
)
1081 if (clk_pm_runtime_get(core
))
1084 if (clk_core_is_prepared(core
)) {
1085 trace_clk_unprepare(core
);
1086 if (core
->ops
->unprepare_unused
)
1087 core
->ops
->unprepare_unused(core
->hw
);
1088 else if (core
->ops
->unprepare
)
1089 core
->ops
->unprepare(core
->hw
);
1090 trace_clk_unprepare_complete(core
);
1093 clk_pm_runtime_put(core
);
1096 static void clk_disable_unused_subtree(struct clk_core
*core
)
1098 struct clk_core
*child
;
1099 unsigned long flags
;
1101 lockdep_assert_held(&prepare_lock
);
1103 hlist_for_each_entry(child
, &core
->children
, child_node
)
1104 clk_disable_unused_subtree(child
);
1106 if (core
->flags
& CLK_OPS_PARENT_ENABLE
)
1107 clk_core_prepare_enable(core
->parent
);
1109 if (clk_pm_runtime_get(core
))
1112 flags
= clk_enable_lock();
1114 if (core
->enable_count
)
1117 if (core
->flags
& CLK_IGNORE_UNUSED
)
1121 * some gate clocks have special needs during the disable-unused
1122 * sequence. call .disable_unused if available, otherwise fall
1125 if (clk_core_is_enabled(core
)) {
1126 trace_clk_disable(core
);
1127 if (core
->ops
->disable_unused
)
1128 core
->ops
->disable_unused(core
->hw
);
1129 else if (core
->ops
->disable
)
1130 core
->ops
->disable(core
->hw
);
1131 trace_clk_disable_complete(core
);
1135 clk_enable_unlock(flags
);
1136 clk_pm_runtime_put(core
);
1138 if (core
->flags
& CLK_OPS_PARENT_ENABLE
)
1139 clk_core_disable_unprepare(core
->parent
);
1142 static bool clk_ignore_unused
;
1143 static int __init
clk_ignore_unused_setup(char *__unused
)
1145 clk_ignore_unused
= true;
1148 __setup("clk_ignore_unused", clk_ignore_unused_setup
);
1150 static int clk_disable_unused(void)
1152 struct clk_core
*core
;
1154 if (clk_ignore_unused
) {
1155 pr_warn("clk: Not disabling unused clocks\n");
1161 hlist_for_each_entry(core
, &clk_root_list
, child_node
)
1162 clk_disable_unused_subtree(core
);
1164 hlist_for_each_entry(core
, &clk_orphan_list
, child_node
)
1165 clk_disable_unused_subtree(core
);
1167 hlist_for_each_entry(core
, &clk_root_list
, child_node
)
1168 clk_unprepare_unused_subtree(core
);
1170 hlist_for_each_entry(core
, &clk_orphan_list
, child_node
)
1171 clk_unprepare_unused_subtree(core
);
1173 clk_prepare_unlock();
1177 late_initcall_sync(clk_disable_unused
);
1179 static int clk_core_determine_round_nolock(struct clk_core
*core
,
1180 struct clk_rate_request
*req
)
1184 lockdep_assert_held(&prepare_lock
);
1190 * At this point, core protection will be disabled if
1191 * - if the provider is not protected at all
1192 * - if the calling consumer is the only one which has exclusivity
1195 if (clk_core_rate_is_protected(core
)) {
1196 req
->rate
= core
->rate
;
1197 } else if (core
->ops
->determine_rate
) {
1198 return core
->ops
->determine_rate(core
->hw
, req
);
1199 } else if (core
->ops
->round_rate
) {
1200 rate
= core
->ops
->round_rate(core
->hw
, req
->rate
,
1201 &req
->best_parent_rate
);
1213 static void clk_core_init_rate_req(struct clk_core
* const core
,
1214 struct clk_rate_request
*req
)
1216 struct clk_core
*parent
;
1218 if (WARN_ON(!core
|| !req
))
1221 parent
= core
->parent
;
1223 req
->best_parent_hw
= parent
->hw
;
1224 req
->best_parent_rate
= parent
->rate
;
1226 req
->best_parent_hw
= NULL
;
1227 req
->best_parent_rate
= 0;
1231 static bool clk_core_can_round(struct clk_core
* const core
)
1233 if (core
->ops
->determine_rate
|| core
->ops
->round_rate
)
1239 static int clk_core_round_rate_nolock(struct clk_core
*core
,
1240 struct clk_rate_request
*req
)
1242 lockdep_assert_held(&prepare_lock
);
1249 clk_core_init_rate_req(core
, req
);
1251 if (clk_core_can_round(core
))
1252 return clk_core_determine_round_nolock(core
, req
);
1253 else if (core
->flags
& CLK_SET_RATE_PARENT
)
1254 return clk_core_round_rate_nolock(core
->parent
, req
);
1256 req
->rate
= core
->rate
;
1261 * __clk_determine_rate - get the closest rate actually supported by a clock
1262 * @hw: determine the rate of this clock
1263 * @req: target rate request
1265 * Useful for clk_ops such as .set_rate and .determine_rate.
1267 int __clk_determine_rate(struct clk_hw
*hw
, struct clk_rate_request
*req
)
1274 return clk_core_round_rate_nolock(hw
->core
, req
);
1276 EXPORT_SYMBOL_GPL(__clk_determine_rate
);
1278 unsigned long clk_hw_round_rate(struct clk_hw
*hw
, unsigned long rate
)
1281 struct clk_rate_request req
;
1283 clk_core_get_boundaries(hw
->core
, &req
.min_rate
, &req
.max_rate
);
1286 ret
= clk_core_round_rate_nolock(hw
->core
, &req
);
1292 EXPORT_SYMBOL_GPL(clk_hw_round_rate
);
1295 * clk_round_rate - round the given rate for a clk
1296 * @clk: the clk for which we are rounding a rate
1297 * @rate: the rate which is to be rounded
1299 * Takes in a rate as input and rounds it to a rate that the clk can actually
1300 * use which is then returned. If clk doesn't support round_rate operation
1301 * then the parent rate is returned.
1303 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
1305 struct clk_rate_request req
;
1313 if (clk
->exclusive_count
)
1314 clk_core_rate_unprotect(clk
->core
);
1316 clk_core_get_boundaries(clk
->core
, &req
.min_rate
, &req
.max_rate
);
1319 ret
= clk_core_round_rate_nolock(clk
->core
, &req
);
1321 if (clk
->exclusive_count
)
1322 clk_core_rate_protect(clk
->core
);
1324 clk_prepare_unlock();
1331 EXPORT_SYMBOL_GPL(clk_round_rate
);
1334 * __clk_notify - call clk notifier chain
1335 * @core: clk that is changing rate
1336 * @msg: clk notifier type (see include/linux/clk.h)
1337 * @old_rate: old clk rate
1338 * @new_rate: new clk rate
1340 * Triggers a notifier call chain on the clk rate-change notification
1341 * for 'clk'. Passes a pointer to the struct clk and the previous
1342 * and current rates to the notifier callback. Intended to be called by
1343 * internal clock code only. Returns NOTIFY_DONE from the last driver
1344 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1345 * a driver returns that.
1347 static int __clk_notify(struct clk_core
*core
, unsigned long msg
,
1348 unsigned long old_rate
, unsigned long new_rate
)
1350 struct clk_notifier
*cn
;
1351 struct clk_notifier_data cnd
;
1352 int ret
= NOTIFY_DONE
;
1354 cnd
.old_rate
= old_rate
;
1355 cnd
.new_rate
= new_rate
;
1357 list_for_each_entry(cn
, &clk_notifier_list
, node
) {
1358 if (cn
->clk
->core
== core
) {
1360 ret
= srcu_notifier_call_chain(&cn
->notifier_head
, msg
,
1362 if (ret
& NOTIFY_STOP_MASK
)
1371 * __clk_recalc_accuracies
1372 * @core: first clk in the subtree
1374 * Walks the subtree of clks starting with clk and recalculates accuracies as
1375 * it goes. Note that if a clk does not implement the .recalc_accuracy
1376 * callback then it is assumed that the clock will take on the accuracy of its
1379 static void __clk_recalc_accuracies(struct clk_core
*core
)
1381 unsigned long parent_accuracy
= 0;
1382 struct clk_core
*child
;
1384 lockdep_assert_held(&prepare_lock
);
1387 parent_accuracy
= core
->parent
->accuracy
;
1389 if (core
->ops
->recalc_accuracy
)
1390 core
->accuracy
= core
->ops
->recalc_accuracy(core
->hw
,
1393 core
->accuracy
= parent_accuracy
;
1395 hlist_for_each_entry(child
, &core
->children
, child_node
)
1396 __clk_recalc_accuracies(child
);
1399 static long clk_core_get_accuracy(struct clk_core
*core
)
1401 unsigned long accuracy
;
1404 if (core
&& (core
->flags
& CLK_GET_ACCURACY_NOCACHE
))
1405 __clk_recalc_accuracies(core
);
1407 accuracy
= __clk_get_accuracy(core
);
1408 clk_prepare_unlock();
1414 * clk_get_accuracy - return the accuracy of clk
1415 * @clk: the clk whose accuracy is being returned
1417 * Simply returns the cached accuracy of the clk, unless
1418 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1420 * If clk is NULL then returns 0.
1422 long clk_get_accuracy(struct clk
*clk
)
1427 return clk_core_get_accuracy(clk
->core
);
1429 EXPORT_SYMBOL_GPL(clk_get_accuracy
);
1431 static unsigned long clk_recalc(struct clk_core
*core
,
1432 unsigned long parent_rate
)
1434 unsigned long rate
= parent_rate
;
1436 if (core
->ops
->recalc_rate
&& !clk_pm_runtime_get(core
)) {
1437 rate
= core
->ops
->recalc_rate(core
->hw
, parent_rate
);
1438 clk_pm_runtime_put(core
);
1444 * __clk_recalc_rates
1445 * @core: first clk in the subtree
1446 * @msg: notification type (see include/linux/clk.h)
1448 * Walks the subtree of clks starting with clk and recalculates rates as it
1449 * goes. Note that if a clk does not implement the .recalc_rate callback then
1450 * it is assumed that the clock will take on the rate of its parent.
1452 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1455 static void __clk_recalc_rates(struct clk_core
*core
, unsigned long msg
)
1457 unsigned long old_rate
;
1458 unsigned long parent_rate
= 0;
1459 struct clk_core
*child
;
1461 lockdep_assert_held(&prepare_lock
);
1463 old_rate
= core
->rate
;
1466 parent_rate
= core
->parent
->rate
;
1468 core
->rate
= clk_recalc(core
, parent_rate
);
1471 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1472 * & ABORT_RATE_CHANGE notifiers
1474 if (core
->notifier_count
&& msg
)
1475 __clk_notify(core
, msg
, old_rate
, core
->rate
);
1477 hlist_for_each_entry(child
, &core
->children
, child_node
)
1478 __clk_recalc_rates(child
, msg
);
1481 static unsigned long clk_core_get_rate(struct clk_core
*core
)
1487 if (core
&& (core
->flags
& CLK_GET_RATE_NOCACHE
))
1488 __clk_recalc_rates(core
, 0);
1490 rate
= clk_core_get_rate_nolock(core
);
1491 clk_prepare_unlock();
1497 * clk_get_rate - return the rate of clk
1498 * @clk: the clk whose rate is being returned
1500 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1501 * is set, which means a recalc_rate will be issued.
1502 * If clk is NULL then returns 0.
1504 unsigned long clk_get_rate(struct clk
*clk
)
1509 return clk_core_get_rate(clk
->core
);
1511 EXPORT_SYMBOL_GPL(clk_get_rate
);
1513 static int clk_fetch_parent_index(struct clk_core
*core
,
1514 struct clk_core
*parent
)
1521 for (i
= 0; i
< core
->num_parents
; i
++) {
1522 if (core
->parents
[i
] == parent
)
1525 if (core
->parents
[i
])
1528 /* Fallback to comparing globally unique names */
1529 if (!strcmp(parent
->name
, core
->parent_names
[i
])) {
1530 core
->parents
[i
] = parent
;
1539 * Update the orphan status of @core and all its children.
1541 static void clk_core_update_orphan_status(struct clk_core
*core
, bool is_orphan
)
1543 struct clk_core
*child
;
1545 core
->orphan
= is_orphan
;
1547 hlist_for_each_entry(child
, &core
->children
, child_node
)
1548 clk_core_update_orphan_status(child
, is_orphan
);
1551 static void clk_reparent(struct clk_core
*core
, struct clk_core
*new_parent
)
1553 bool was_orphan
= core
->orphan
;
1555 hlist_del(&core
->child_node
);
1558 bool becomes_orphan
= new_parent
->orphan
;
1560 /* avoid duplicate POST_RATE_CHANGE notifications */
1561 if (new_parent
->new_child
== core
)
1562 new_parent
->new_child
= NULL
;
1564 hlist_add_head(&core
->child_node
, &new_parent
->children
);
1566 if (was_orphan
!= becomes_orphan
)
1567 clk_core_update_orphan_status(core
, becomes_orphan
);
1569 hlist_add_head(&core
->child_node
, &clk_orphan_list
);
1571 clk_core_update_orphan_status(core
, true);
1574 core
->parent
= new_parent
;
1577 static struct clk_core
*__clk_set_parent_before(struct clk_core
*core
,
1578 struct clk_core
*parent
)
1580 unsigned long flags
;
1581 struct clk_core
*old_parent
= core
->parent
;
1584 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1586 * 2. Migrate prepare state between parents and prevent race with
1589 * If the clock is not prepared, then a race with
1590 * clk_enable/disable() is impossible since we already have the
1591 * prepare lock (future calls to clk_enable() need to be preceded by
1594 * If the clock is prepared, migrate the prepared state to the new
1595 * parent and also protect against a race with clk_enable() by
1596 * forcing the clock and the new parent on. This ensures that all
1597 * future calls to clk_enable() are practically NOPs with respect to
1598 * hardware and software states.
1600 * See also: Comment for clk_set_parent() below.
1603 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1604 if (core
->flags
& CLK_OPS_PARENT_ENABLE
) {
1605 clk_core_prepare_enable(old_parent
);
1606 clk_core_prepare_enable(parent
);
1609 /* migrate prepare count if > 0 */
1610 if (core
->prepare_count
) {
1611 clk_core_prepare_enable(parent
);
1612 clk_core_enable_lock(core
);
1615 /* update the clk tree topology */
1616 flags
= clk_enable_lock();
1617 clk_reparent(core
, parent
);
1618 clk_enable_unlock(flags
);
1623 static void __clk_set_parent_after(struct clk_core
*core
,
1624 struct clk_core
*parent
,
1625 struct clk_core
*old_parent
)
1628 * Finish the migration of prepare state and undo the changes done
1629 * for preventing a race with clk_enable().
1631 if (core
->prepare_count
) {
1632 clk_core_disable_lock(core
);
1633 clk_core_disable_unprepare(old_parent
);
1636 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1637 if (core
->flags
& CLK_OPS_PARENT_ENABLE
) {
1638 clk_core_disable_unprepare(parent
);
1639 clk_core_disable_unprepare(old_parent
);
1643 static int __clk_set_parent(struct clk_core
*core
, struct clk_core
*parent
,
1646 unsigned long flags
;
1648 struct clk_core
*old_parent
;
1650 old_parent
= __clk_set_parent_before(core
, parent
);
1652 trace_clk_set_parent(core
, parent
);
1654 /* change clock input source */
1655 if (parent
&& core
->ops
->set_parent
)
1656 ret
= core
->ops
->set_parent(core
->hw
, p_index
);
1658 trace_clk_set_parent_complete(core
, parent
);
1661 flags
= clk_enable_lock();
1662 clk_reparent(core
, old_parent
);
1663 clk_enable_unlock(flags
);
1664 __clk_set_parent_after(core
, old_parent
, parent
);
1669 __clk_set_parent_after(core
, parent
, old_parent
);
1675 * __clk_speculate_rates
1676 * @core: first clk in the subtree
1677 * @parent_rate: the "future" rate of clk's parent
1679 * Walks the subtree of clks starting with clk, speculating rates as it
1680 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1682 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1683 * pre-rate change notifications and returns early if no clks in the
1684 * subtree have subscribed to the notifications. Note that if a clk does not
1685 * implement the .recalc_rate callback then it is assumed that the clock will
1686 * take on the rate of its parent.
1688 static int __clk_speculate_rates(struct clk_core
*core
,
1689 unsigned long parent_rate
)
1691 struct clk_core
*child
;
1692 unsigned long new_rate
;
1693 int ret
= NOTIFY_DONE
;
1695 lockdep_assert_held(&prepare_lock
);
1697 new_rate
= clk_recalc(core
, parent_rate
);
1699 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1700 if (core
->notifier_count
)
1701 ret
= __clk_notify(core
, PRE_RATE_CHANGE
, core
->rate
, new_rate
);
1703 if (ret
& NOTIFY_STOP_MASK
) {
1704 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1705 __func__
, core
->name
, ret
);
1709 hlist_for_each_entry(child
, &core
->children
, child_node
) {
1710 ret
= __clk_speculate_rates(child
, new_rate
);
1711 if (ret
& NOTIFY_STOP_MASK
)
1719 static void clk_calc_subtree(struct clk_core
*core
, unsigned long new_rate
,
1720 struct clk_core
*new_parent
, u8 p_index
)
1722 struct clk_core
*child
;
1724 core
->new_rate
= new_rate
;
1725 core
->new_parent
= new_parent
;
1726 core
->new_parent_index
= p_index
;
1727 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1728 core
->new_child
= NULL
;
1729 if (new_parent
&& new_parent
!= core
->parent
)
1730 new_parent
->new_child
= core
;
1732 hlist_for_each_entry(child
, &core
->children
, child_node
) {
1733 child
->new_rate
= clk_recalc(child
, new_rate
);
1734 clk_calc_subtree(child
, child
->new_rate
, NULL
, 0);
1739 * calculate the new rates returning the topmost clock that has to be
1742 static struct clk_core
*clk_calc_new_rates(struct clk_core
*core
,
1745 struct clk_core
*top
= core
;
1746 struct clk_core
*old_parent
, *parent
;
1747 unsigned long best_parent_rate
= 0;
1748 unsigned long new_rate
;
1749 unsigned long min_rate
;
1750 unsigned long max_rate
;
1755 if (IS_ERR_OR_NULL(core
))
1758 /* save parent rate, if it exists */
1759 parent
= old_parent
= core
->parent
;
1761 best_parent_rate
= parent
->rate
;
1763 clk_core_get_boundaries(core
, &min_rate
, &max_rate
);
1765 /* find the closest rate and parent clk/rate */
1766 if (clk_core_can_round(core
)) {
1767 struct clk_rate_request req
;
1770 req
.min_rate
= min_rate
;
1771 req
.max_rate
= max_rate
;
1773 clk_core_init_rate_req(core
, &req
);
1775 ret
= clk_core_determine_round_nolock(core
, &req
);
1779 best_parent_rate
= req
.best_parent_rate
;
1780 new_rate
= req
.rate
;
1781 parent
= req
.best_parent_hw
? req
.best_parent_hw
->core
: NULL
;
1783 if (new_rate
< min_rate
|| new_rate
> max_rate
)
1785 } else if (!parent
|| !(core
->flags
& CLK_SET_RATE_PARENT
)) {
1786 /* pass-through clock without adjustable parent */
1787 core
->new_rate
= core
->rate
;
1790 /* pass-through clock with adjustable parent */
1791 top
= clk_calc_new_rates(parent
, rate
);
1792 new_rate
= parent
->new_rate
;
1796 /* some clocks must be gated to change parent */
1797 if (parent
!= old_parent
&&
1798 (core
->flags
& CLK_SET_PARENT_GATE
) && core
->prepare_count
) {
1799 pr_debug("%s: %s not gated but wants to reparent\n",
1800 __func__
, core
->name
);
1804 /* try finding the new parent index */
1805 if (parent
&& core
->num_parents
> 1) {
1806 p_index
= clk_fetch_parent_index(core
, parent
);
1808 pr_debug("%s: clk %s can not be parent of clk %s\n",
1809 __func__
, parent
->name
, core
->name
);
1814 if ((core
->flags
& CLK_SET_RATE_PARENT
) && parent
&&
1815 best_parent_rate
!= parent
->rate
)
1816 top
= clk_calc_new_rates(parent
, best_parent_rate
);
1819 clk_calc_subtree(core
, new_rate
, parent
, p_index
);
1825 * Notify about rate changes in a subtree. Always walk down the whole tree
1826 * so that in case of an error we can walk down the whole tree again and
1829 static struct clk_core
*clk_propagate_rate_change(struct clk_core
*core
,
1830 unsigned long event
)
1832 struct clk_core
*child
, *tmp_clk
, *fail_clk
= NULL
;
1833 int ret
= NOTIFY_DONE
;
1835 if (core
->rate
== core
->new_rate
)
1838 if (core
->notifier_count
) {
1839 ret
= __clk_notify(core
, event
, core
->rate
, core
->new_rate
);
1840 if (ret
& NOTIFY_STOP_MASK
)
1844 hlist_for_each_entry(child
, &core
->children
, child_node
) {
1845 /* Skip children who will be reparented to another clock */
1846 if (child
->new_parent
&& child
->new_parent
!= core
)
1848 tmp_clk
= clk_propagate_rate_change(child
, event
);
1853 /* handle the new child who might not be in core->children yet */
1854 if (core
->new_child
) {
1855 tmp_clk
= clk_propagate_rate_change(core
->new_child
, event
);
1864 * walk down a subtree and set the new rates notifying the rate
1867 static void clk_change_rate(struct clk_core
*core
)
1869 struct clk_core
*child
;
1870 struct hlist_node
*tmp
;
1871 unsigned long old_rate
;
1872 unsigned long best_parent_rate
= 0;
1873 bool skip_set_rate
= false;
1874 struct clk_core
*old_parent
;
1875 struct clk_core
*parent
= NULL
;
1877 old_rate
= core
->rate
;
1879 if (core
->new_parent
) {
1880 parent
= core
->new_parent
;
1881 best_parent_rate
= core
->new_parent
->rate
;
1882 } else if (core
->parent
) {
1883 parent
= core
->parent
;
1884 best_parent_rate
= core
->parent
->rate
;
1887 if (clk_pm_runtime_get(core
))
1890 if (core
->flags
& CLK_SET_RATE_UNGATE
) {
1891 unsigned long flags
;
1893 clk_core_prepare(core
);
1894 flags
= clk_enable_lock();
1895 clk_core_enable(core
);
1896 clk_enable_unlock(flags
);
1899 if (core
->new_parent
&& core
->new_parent
!= core
->parent
) {
1900 old_parent
= __clk_set_parent_before(core
, core
->new_parent
);
1901 trace_clk_set_parent(core
, core
->new_parent
);
1903 if (core
->ops
->set_rate_and_parent
) {
1904 skip_set_rate
= true;
1905 core
->ops
->set_rate_and_parent(core
->hw
, core
->new_rate
,
1907 core
->new_parent_index
);
1908 } else if (core
->ops
->set_parent
) {
1909 core
->ops
->set_parent(core
->hw
, core
->new_parent_index
);
1912 trace_clk_set_parent_complete(core
, core
->new_parent
);
1913 __clk_set_parent_after(core
, core
->new_parent
, old_parent
);
1916 if (core
->flags
& CLK_OPS_PARENT_ENABLE
)
1917 clk_core_prepare_enable(parent
);
1919 trace_clk_set_rate(core
, core
->new_rate
);
1921 if (!skip_set_rate
&& core
->ops
->set_rate
)
1922 core
->ops
->set_rate(core
->hw
, core
->new_rate
, best_parent_rate
);
1924 trace_clk_set_rate_complete(core
, core
->new_rate
);
1926 core
->rate
= clk_recalc(core
, best_parent_rate
);
1928 if (core
->flags
& CLK_SET_RATE_UNGATE
) {
1929 unsigned long flags
;
1931 flags
= clk_enable_lock();
1932 clk_core_disable(core
);
1933 clk_enable_unlock(flags
);
1934 clk_core_unprepare(core
);
1937 if (core
->flags
& CLK_OPS_PARENT_ENABLE
)
1938 clk_core_disable_unprepare(parent
);
1940 if (core
->notifier_count
&& old_rate
!= core
->rate
)
1941 __clk_notify(core
, POST_RATE_CHANGE
, old_rate
, core
->rate
);
1943 if (core
->flags
& CLK_RECALC_NEW_RATES
)
1944 (void)clk_calc_new_rates(core
, core
->new_rate
);
1947 * Use safe iteration, as change_rate can actually swap parents
1948 * for certain clock types.
1950 hlist_for_each_entry_safe(child
, tmp
, &core
->children
, child_node
) {
1951 /* Skip children who will be reparented to another clock */
1952 if (child
->new_parent
&& child
->new_parent
!= core
)
1954 clk_change_rate(child
);
1957 /* handle the new child who might not be in core->children yet */
1958 if (core
->new_child
)
1959 clk_change_rate(core
->new_child
);
1961 clk_pm_runtime_put(core
);
1964 static unsigned long clk_core_req_round_rate_nolock(struct clk_core
*core
,
1965 unsigned long req_rate
)
1968 struct clk_rate_request req
;
1970 lockdep_assert_held(&prepare_lock
);
1975 /* simulate what the rate would be if it could be freely set */
1976 cnt
= clk_core_rate_nuke_protect(core
);
1980 clk_core_get_boundaries(core
, &req
.min_rate
, &req
.max_rate
);
1981 req
.rate
= req_rate
;
1983 ret
= clk_core_round_rate_nolock(core
, &req
);
1985 /* restore the protection */
1986 clk_core_rate_restore_protect(core
, cnt
);
1988 return ret
? 0 : req
.rate
;
1991 static int clk_core_set_rate_nolock(struct clk_core
*core
,
1992 unsigned long req_rate
)
1994 struct clk_core
*top
, *fail_clk
;
2001 rate
= clk_core_req_round_rate_nolock(core
, req_rate
);
2003 /* bail early if nothing to do */
2004 if (rate
== clk_core_get_rate_nolock(core
))
2007 /* fail on a direct rate set of a protected provider */
2008 if (clk_core_rate_is_protected(core
))
2011 /* calculate new rates and get the topmost changed clock */
2012 top
= clk_calc_new_rates(core
, req_rate
);
2016 ret
= clk_pm_runtime_get(core
);
2020 /* notify that we are about to change rates */
2021 fail_clk
= clk_propagate_rate_change(top
, PRE_RATE_CHANGE
);
2023 pr_debug("%s: failed to set %s rate\n", __func__
,
2025 clk_propagate_rate_change(top
, ABORT_RATE_CHANGE
);
2030 /* change the rates */
2031 clk_change_rate(top
);
2033 core
->req_rate
= req_rate
;
2035 clk_pm_runtime_put(core
);
2041 * clk_set_rate - specify a new rate for clk
2042 * @clk: the clk whose rate is being changed
2043 * @rate: the new rate for clk
2045 * In the simplest case clk_set_rate will only adjust the rate of clk.
2047 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
2048 * propagate up to clk's parent; whether or not this happens depends on the
2049 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
2050 * after calling .round_rate then upstream parent propagation is ignored. If
2051 * *parent_rate comes back with a new rate for clk's parent then we propagate
2052 * up to clk's parent and set its rate. Upward propagation will continue
2053 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
2054 * .round_rate stops requesting changes to clk's parent_rate.
2056 * Rate changes are accomplished via tree traversal that also recalculates the
2057 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
2059 * Returns 0 on success, -EERROR otherwise.
2061 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
2068 /* prevent racing with updates to the clock topology */
2071 if (clk
->exclusive_count
)
2072 clk_core_rate_unprotect(clk
->core
);
2074 ret
= clk_core_set_rate_nolock(clk
->core
, rate
);
2076 if (clk
->exclusive_count
)
2077 clk_core_rate_protect(clk
->core
);
2079 clk_prepare_unlock();
2083 EXPORT_SYMBOL_GPL(clk_set_rate
);
2086 * clk_set_rate_exclusive - specify a new rate get exclusive control
2087 * @clk: the clk whose rate is being changed
2088 * @rate: the new rate for clk
2090 * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
2091 * within a critical section
2093 * This can be used initially to ensure that at least 1 consumer is
2094 * statisfied when several consumers are competing for exclusivity over the
2095 * same clock provider.
2097 * The exclusivity is not applied if setting the rate failed.
2099 * Calls to clk_rate_exclusive_get() should be balanced with calls to
2100 * clk_rate_exclusive_put().
2102 * Returns 0 on success, -EERROR otherwise.
2104 int clk_set_rate_exclusive(struct clk
*clk
, unsigned long rate
)
2111 /* prevent racing with updates to the clock topology */
2115 * The temporary protection removal is not here, on purpose
2116 * This function is meant to be used instead of clk_rate_protect,
2117 * so before the consumer code path protect the clock provider
2120 ret
= clk_core_set_rate_nolock(clk
->core
, rate
);
2122 clk_core_rate_protect(clk
->core
);
2123 clk
->exclusive_count
++;
2126 clk_prepare_unlock();
2130 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive
);
2133 * clk_set_rate_range - set a rate range for a clock source
2134 * @clk: clock source
2135 * @min: desired minimum clock rate in Hz, inclusive
2136 * @max: desired maximum clock rate in Hz, inclusive
2138 * Returns success (0) or negative errno.
2140 int clk_set_rate_range(struct clk
*clk
, unsigned long min
, unsigned long max
)
2143 unsigned long old_min
, old_max
, rate
;
2149 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2150 __func__
, clk
->core
->name
, clk
->dev_id
, clk
->con_id
,
2157 if (clk
->exclusive_count
)
2158 clk_core_rate_unprotect(clk
->core
);
2160 /* Save the current values in case we need to rollback the change */
2161 old_min
= clk
->min_rate
;
2162 old_max
= clk
->max_rate
;
2163 clk
->min_rate
= min
;
2164 clk
->max_rate
= max
;
2166 rate
= clk_core_get_rate_nolock(clk
->core
);
2167 if (rate
< min
|| rate
> max
) {
2170 * We are in bit of trouble here, current rate is outside the
2171 * the requested range. We are going try to request appropriate
2172 * range boundary but there is a catch. It may fail for the
2173 * usual reason (clock broken, clock protected, etc) but also
2175 * - round_rate() was not favorable and fell on the wrong
2176 * side of the boundary
2177 * - the determine_rate() callback does not really check for
2178 * this corner case when determining the rate
2186 ret
= clk_core_set_rate_nolock(clk
->core
, rate
);
2188 /* rollback the changes */
2189 clk
->min_rate
= old_min
;
2190 clk
->max_rate
= old_max
;
2194 if (clk
->exclusive_count
)
2195 clk_core_rate_protect(clk
->core
);
2197 clk_prepare_unlock();
2201 EXPORT_SYMBOL_GPL(clk_set_rate_range
);
2204 * clk_set_min_rate - set a minimum clock rate for a clock source
2205 * @clk: clock source
2206 * @rate: desired minimum clock rate in Hz, inclusive
2208 * Returns success (0) or negative errno.
2210 int clk_set_min_rate(struct clk
*clk
, unsigned long rate
)
2215 return clk_set_rate_range(clk
, rate
, clk
->max_rate
);
2217 EXPORT_SYMBOL_GPL(clk_set_min_rate
);
2220 * clk_set_max_rate - set a maximum clock rate for a clock source
2221 * @clk: clock source
2222 * @rate: desired maximum clock rate in Hz, inclusive
2224 * Returns success (0) or negative errno.
2226 int clk_set_max_rate(struct clk
*clk
, unsigned long rate
)
2231 return clk_set_rate_range(clk
, clk
->min_rate
, rate
);
2233 EXPORT_SYMBOL_GPL(clk_set_max_rate
);
2236 * clk_get_parent - return the parent of a clk
2237 * @clk: the clk whose parent gets returned
2239 * Simply returns clk->parent. Returns NULL if clk is NULL.
2241 struct clk
*clk_get_parent(struct clk
*clk
)
2249 /* TODO: Create a per-user clk and change callers to call clk_put */
2250 parent
= !clk
->core
->parent
? NULL
: clk
->core
->parent
->hw
->clk
;
2251 clk_prepare_unlock();
2255 EXPORT_SYMBOL_GPL(clk_get_parent
);
2257 static struct clk_core
*__clk_init_parent(struct clk_core
*core
)
2261 if (core
->num_parents
> 1 && core
->ops
->get_parent
)
2262 index
= core
->ops
->get_parent(core
->hw
);
2264 return clk_core_get_parent_by_index(core
, index
);
2267 static void clk_core_reparent(struct clk_core
*core
,
2268 struct clk_core
*new_parent
)
2270 clk_reparent(core
, new_parent
);
2271 __clk_recalc_accuracies(core
);
2272 __clk_recalc_rates(core
, POST_RATE_CHANGE
);
2275 void clk_hw_reparent(struct clk_hw
*hw
, struct clk_hw
*new_parent
)
2280 clk_core_reparent(hw
->core
, !new_parent
? NULL
: new_parent
->core
);
2284 * clk_has_parent - check if a clock is a possible parent for another
2285 * @clk: clock source
2286 * @parent: parent clock source
2288 * This function can be used in drivers that need to check that a clock can be
2289 * the parent of another without actually changing the parent.
2291 * Returns true if @parent is a possible parent for @clk, false otherwise.
2293 bool clk_has_parent(struct clk
*clk
, struct clk
*parent
)
2295 struct clk_core
*core
, *parent_core
;
2297 /* NULL clocks should be nops, so return success if either is NULL. */
2298 if (!clk
|| !parent
)
2302 parent_core
= parent
->core
;
2304 /* Optimize for the case where the parent is already the parent. */
2305 if (core
->parent
== parent_core
)
2308 return match_string(core
->parent_names
, core
->num_parents
,
2309 parent_core
->name
) >= 0;
2311 EXPORT_SYMBOL_GPL(clk_has_parent
);
2313 static int clk_core_set_parent_nolock(struct clk_core
*core
,
2314 struct clk_core
*parent
)
2318 unsigned long p_rate
= 0;
2320 lockdep_assert_held(&prepare_lock
);
2325 if (core
->parent
== parent
)
2328 /* verify ops for for multi-parent clks */
2329 if (core
->num_parents
> 1 && !core
->ops
->set_parent
)
2332 /* check that we are allowed to re-parent if the clock is in use */
2333 if ((core
->flags
& CLK_SET_PARENT_GATE
) && core
->prepare_count
)
2336 if (clk_core_rate_is_protected(core
))
2339 /* try finding the new parent index */
2341 p_index
= clk_fetch_parent_index(core
, parent
);
2343 pr_debug("%s: clk %s can not be parent of clk %s\n",
2344 __func__
, parent
->name
, core
->name
);
2347 p_rate
= parent
->rate
;
2350 ret
= clk_pm_runtime_get(core
);
2354 /* propagate PRE_RATE_CHANGE notifications */
2355 ret
= __clk_speculate_rates(core
, p_rate
);
2357 /* abort if a driver objects */
2358 if (ret
& NOTIFY_STOP_MASK
)
2361 /* do the re-parent */
2362 ret
= __clk_set_parent(core
, parent
, p_index
);
2364 /* propagate rate an accuracy recalculation accordingly */
2366 __clk_recalc_rates(core
, ABORT_RATE_CHANGE
);
2368 __clk_recalc_rates(core
, POST_RATE_CHANGE
);
2369 __clk_recalc_accuracies(core
);
2373 clk_pm_runtime_put(core
);
2379 * clk_set_parent - switch the parent of a mux clk
2380 * @clk: the mux clk whose input we are switching
2381 * @parent: the new input to clk
2383 * Re-parent clk to use parent as its new input source. If clk is in
2384 * prepared state, the clk will get enabled for the duration of this call. If
2385 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2386 * that, the reparenting is glitchy in hardware, etc), use the
2387 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2389 * After successfully changing clk's parent clk_set_parent will update the
2390 * clk topology, sysfs topology and propagate rate recalculation via
2391 * __clk_recalc_rates.
2393 * Returns 0 on success, -EERROR otherwise.
2395 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
2404 if (clk
->exclusive_count
)
2405 clk_core_rate_unprotect(clk
->core
);
2407 ret
= clk_core_set_parent_nolock(clk
->core
,
2408 parent
? parent
->core
: NULL
);
2410 if (clk
->exclusive_count
)
2411 clk_core_rate_protect(clk
->core
);
2413 clk_prepare_unlock();
2417 EXPORT_SYMBOL_GPL(clk_set_parent
);
2419 static int clk_core_set_phase_nolock(struct clk_core
*core
, int degrees
)
2423 lockdep_assert_held(&prepare_lock
);
2428 if (clk_core_rate_is_protected(core
))
2431 trace_clk_set_phase(core
, degrees
);
2433 if (core
->ops
->set_phase
) {
2434 ret
= core
->ops
->set_phase(core
->hw
, degrees
);
2436 core
->phase
= degrees
;
2439 trace_clk_set_phase_complete(core
, degrees
);
2445 * clk_set_phase - adjust the phase shift of a clock signal
2446 * @clk: clock signal source
2447 * @degrees: number of degrees the signal is shifted
2449 * Shifts the phase of a clock signal by the specified
2450 * degrees. Returns 0 on success, -EERROR otherwise.
2452 * This function makes no distinction about the input or reference
2453 * signal that we adjust the clock signal phase against. For example
2454 * phase locked-loop clock signal generators we may shift phase with
2455 * respect to feedback clock signal input, but for other cases the
2456 * clock phase may be shifted with respect to some other, unspecified
2459 * Additionally the concept of phase shift does not propagate through
2460 * the clock tree hierarchy, which sets it apart from clock rates and
2461 * clock accuracy. A parent clock phase attribute does not have an
2462 * impact on the phase attribute of a child clock.
2464 int clk_set_phase(struct clk
*clk
, int degrees
)
2471 /* sanity check degrees */
2478 if (clk
->exclusive_count
)
2479 clk_core_rate_unprotect(clk
->core
);
2481 ret
= clk_core_set_phase_nolock(clk
->core
, degrees
);
2483 if (clk
->exclusive_count
)
2484 clk_core_rate_protect(clk
->core
);
2486 clk_prepare_unlock();
2490 EXPORT_SYMBOL_GPL(clk_set_phase
);
2492 static int clk_core_get_phase(struct clk_core
*core
)
2497 /* Always try to update cached phase if possible */
2498 if (core
->ops
->get_phase
)
2499 core
->phase
= core
->ops
->get_phase(core
->hw
);
2501 clk_prepare_unlock();
2507 * clk_get_phase - return the phase shift of a clock signal
2508 * @clk: clock signal source
2510 * Returns the phase shift of a clock node in degrees, otherwise returns
2513 int clk_get_phase(struct clk
*clk
)
2518 return clk_core_get_phase(clk
->core
);
2520 EXPORT_SYMBOL_GPL(clk_get_phase
);
2522 static void clk_core_reset_duty_cycle_nolock(struct clk_core
*core
)
2524 /* Assume a default value of 50% */
2529 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core
*core
);
2531 static int clk_core_update_duty_cycle_nolock(struct clk_core
*core
)
2533 struct clk_duty
*duty
= &core
->duty
;
2536 if (!core
->ops
->get_duty_cycle
)
2537 return clk_core_update_duty_cycle_parent_nolock(core
);
2539 ret
= core
->ops
->get_duty_cycle(core
->hw
, duty
);
2543 /* Don't trust the clock provider too much */
2544 if (duty
->den
== 0 || duty
->num
> duty
->den
) {
2552 clk_core_reset_duty_cycle_nolock(core
);
2556 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core
*core
)
2561 core
->flags
& CLK_DUTY_CYCLE_PARENT
) {
2562 ret
= clk_core_update_duty_cycle_nolock(core
->parent
);
2563 memcpy(&core
->duty
, &core
->parent
->duty
, sizeof(core
->duty
));
2565 clk_core_reset_duty_cycle_nolock(core
);
2571 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core
*core
,
2572 struct clk_duty
*duty
);
2574 static int clk_core_set_duty_cycle_nolock(struct clk_core
*core
,
2575 struct clk_duty
*duty
)
2579 lockdep_assert_held(&prepare_lock
);
2581 if (clk_core_rate_is_protected(core
))
2584 trace_clk_set_duty_cycle(core
, duty
);
2586 if (!core
->ops
->set_duty_cycle
)
2587 return clk_core_set_duty_cycle_parent_nolock(core
, duty
);
2589 ret
= core
->ops
->set_duty_cycle(core
->hw
, duty
);
2591 memcpy(&core
->duty
, duty
, sizeof(*duty
));
2593 trace_clk_set_duty_cycle_complete(core
, duty
);
2598 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core
*core
,
2599 struct clk_duty
*duty
)
2604 core
->flags
& (CLK_DUTY_CYCLE_PARENT
| CLK_SET_RATE_PARENT
)) {
2605 ret
= clk_core_set_duty_cycle_nolock(core
->parent
, duty
);
2606 memcpy(&core
->duty
, &core
->parent
->duty
, sizeof(core
->duty
));
2613 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
2614 * @clk: clock signal source
2615 * @num: numerator of the duty cycle ratio to be applied
2616 * @den: denominator of the duty cycle ratio to be applied
2618 * Apply the duty cycle ratio if the ratio is valid and the clock can
2619 * perform this operation
2621 * Returns (0) on success, a negative errno otherwise.
2623 int clk_set_duty_cycle(struct clk
*clk
, unsigned int num
, unsigned int den
)
2626 struct clk_duty duty
;
2631 /* sanity check the ratio */
2632 if (den
== 0 || num
> den
)
2640 if (clk
->exclusive_count
)
2641 clk_core_rate_unprotect(clk
->core
);
2643 ret
= clk_core_set_duty_cycle_nolock(clk
->core
, &duty
);
2645 if (clk
->exclusive_count
)
2646 clk_core_rate_protect(clk
->core
);
2648 clk_prepare_unlock();
2652 EXPORT_SYMBOL_GPL(clk_set_duty_cycle
);
2654 static int clk_core_get_scaled_duty_cycle(struct clk_core
*core
,
2657 struct clk_duty
*duty
= &core
->duty
;
2662 ret
= clk_core_update_duty_cycle_nolock(core
);
2664 ret
= mult_frac(scale
, duty
->num
, duty
->den
);
2666 clk_prepare_unlock();
2672 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
2673 * @clk: clock signal source
2674 * @scale: scaling factor to be applied to represent the ratio as an integer
2676 * Returns the duty cycle ratio of a clock node multiplied by the provided
2677 * scaling factor, or negative errno on error.
2679 int clk_get_scaled_duty_cycle(struct clk
*clk
, unsigned int scale
)
2684 return clk_core_get_scaled_duty_cycle(clk
->core
, scale
);
2686 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle
);
2689 * clk_is_match - check if two clk's point to the same hardware clock
2690 * @p: clk compared against q
2691 * @q: clk compared against p
2693 * Returns true if the two struct clk pointers both point to the same hardware
2694 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2695 * share the same struct clk_core object.
2697 * Returns false otherwise. Note that two NULL clks are treated as matching.
2699 bool clk_is_match(const struct clk
*p
, const struct clk
*q
)
2701 /* trivial case: identical struct clk's or both NULL */
2705 /* true if clk->core pointers match. Avoid dereferencing garbage */
2706 if (!IS_ERR_OR_NULL(p
) && !IS_ERR_OR_NULL(q
))
2707 if (p
->core
== q
->core
)
2712 EXPORT_SYMBOL_GPL(clk_is_match
);
2714 /*** debugfs support ***/
2716 #ifdef CONFIG_DEBUG_FS
2717 #include <linux/debugfs.h>
2719 static struct dentry
*rootdir
;
2720 static int inited
= 0;
2721 static DEFINE_MUTEX(clk_debug_lock
);
2722 static HLIST_HEAD(clk_debug_list
);
2724 static struct hlist_head
*all_lists
[] = {
2730 static struct hlist_head
*orphan_list
[] = {
2735 static void clk_summary_show_one(struct seq_file
*s
, struct clk_core
*c
,
2741 seq_printf(s
, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
2743 30 - level
* 3, c
->name
,
2744 c
->enable_count
, c
->prepare_count
, c
->protect_count
,
2745 clk_core_get_rate(c
), clk_core_get_accuracy(c
),
2746 clk_core_get_phase(c
),
2747 clk_core_get_scaled_duty_cycle(c
, 100000));
2750 static void clk_summary_show_subtree(struct seq_file
*s
, struct clk_core
*c
,
2753 struct clk_core
*child
;
2758 clk_summary_show_one(s
, c
, level
);
2760 hlist_for_each_entry(child
, &c
->children
, child_node
)
2761 clk_summary_show_subtree(s
, child
, level
+ 1);
2764 static int clk_summary_show(struct seq_file
*s
, void *data
)
2767 struct hlist_head
**lists
= (struct hlist_head
**)s
->private;
2769 seq_puts(s
, " enable prepare protect duty\n");
2770 seq_puts(s
, " clock count count count rate accuracy phase cycle\n");
2771 seq_puts(s
, "---------------------------------------------------------------------------------------------\n");
2775 for (; *lists
; lists
++)
2776 hlist_for_each_entry(c
, *lists
, child_node
)
2777 clk_summary_show_subtree(s
, c
, 0);
2779 clk_prepare_unlock();
2783 DEFINE_SHOW_ATTRIBUTE(clk_summary
);
2785 static void clk_dump_one(struct seq_file
*s
, struct clk_core
*c
, int level
)
2790 /* This should be JSON format, i.e. elements separated with a comma */
2791 seq_printf(s
, "\"%s\": { ", c
->name
);
2792 seq_printf(s
, "\"enable_count\": %d,", c
->enable_count
);
2793 seq_printf(s
, "\"prepare_count\": %d,", c
->prepare_count
);
2794 seq_printf(s
, "\"protect_count\": %d,", c
->protect_count
);
2795 seq_printf(s
, "\"rate\": %lu,", clk_core_get_rate(c
));
2796 seq_printf(s
, "\"accuracy\": %lu,", clk_core_get_accuracy(c
));
2797 seq_printf(s
, "\"phase\": %d,", clk_core_get_phase(c
));
2798 seq_printf(s
, "\"duty_cycle\": %u",
2799 clk_core_get_scaled_duty_cycle(c
, 100000));
2802 static void clk_dump_subtree(struct seq_file
*s
, struct clk_core
*c
, int level
)
2804 struct clk_core
*child
;
2809 clk_dump_one(s
, c
, level
);
2811 hlist_for_each_entry(child
, &c
->children
, child_node
) {
2813 clk_dump_subtree(s
, child
, level
+ 1);
2819 static int clk_dump_show(struct seq_file
*s
, void *data
)
2822 bool first_node
= true;
2823 struct hlist_head
**lists
= (struct hlist_head
**)s
->private;
2828 for (; *lists
; lists
++) {
2829 hlist_for_each_entry(c
, *lists
, child_node
) {
2833 clk_dump_subtree(s
, c
, 0);
2837 clk_prepare_unlock();
2842 DEFINE_SHOW_ATTRIBUTE(clk_dump
);
2844 static const struct {
2848 #define ENTRY(f) { f, #f }
2849 ENTRY(CLK_SET_RATE_GATE
),
2850 ENTRY(CLK_SET_PARENT_GATE
),
2851 ENTRY(CLK_SET_RATE_PARENT
),
2852 ENTRY(CLK_IGNORE_UNUSED
),
2853 ENTRY(CLK_IS_BASIC
),
2854 ENTRY(CLK_GET_RATE_NOCACHE
),
2855 ENTRY(CLK_SET_RATE_NO_REPARENT
),
2856 ENTRY(CLK_GET_ACCURACY_NOCACHE
),
2857 ENTRY(CLK_RECALC_NEW_RATES
),
2858 ENTRY(CLK_SET_RATE_UNGATE
),
2859 ENTRY(CLK_IS_CRITICAL
),
2860 ENTRY(CLK_OPS_PARENT_ENABLE
),
2861 ENTRY(CLK_DUTY_CYCLE_PARENT
),
2865 static int clk_flags_show(struct seq_file
*s
, void *data
)
2867 struct clk_core
*core
= s
->private;
2868 unsigned long flags
= core
->flags
;
2871 for (i
= 0; flags
&& i
< ARRAY_SIZE(clk_flags
); i
++) {
2872 if (flags
& clk_flags
[i
].flag
) {
2873 seq_printf(s
, "%s\n", clk_flags
[i
].name
);
2874 flags
&= ~clk_flags
[i
].flag
;
2879 seq_printf(s
, "0x%lx\n", flags
);
2884 DEFINE_SHOW_ATTRIBUTE(clk_flags
);
2886 static int possible_parents_show(struct seq_file
*s
, void *data
)
2888 struct clk_core
*core
= s
->private;
2891 for (i
= 0; i
< core
->num_parents
- 1; i
++)
2892 seq_printf(s
, "%s ", core
->parent_names
[i
]);
2894 seq_printf(s
, "%s\n", core
->parent_names
[i
]);
2898 DEFINE_SHOW_ATTRIBUTE(possible_parents
);
2900 static int clk_duty_cycle_show(struct seq_file
*s
, void *data
)
2902 struct clk_core
*core
= s
->private;
2903 struct clk_duty
*duty
= &core
->duty
;
2905 seq_printf(s
, "%u/%u\n", duty
->num
, duty
->den
);
2909 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle
);
2911 static void clk_debug_create_one(struct clk_core
*core
, struct dentry
*pdentry
)
2913 struct dentry
*root
;
2915 if (!core
|| !pdentry
)
2918 root
= debugfs_create_dir(core
->name
, pdentry
);
2919 core
->dentry
= root
;
2921 debugfs_create_ulong("clk_rate", 0444, root
, &core
->rate
);
2922 debugfs_create_ulong("clk_accuracy", 0444, root
, &core
->accuracy
);
2923 debugfs_create_u32("clk_phase", 0444, root
, &core
->phase
);
2924 debugfs_create_file("clk_flags", 0444, root
, core
, &clk_flags_fops
);
2925 debugfs_create_u32("clk_prepare_count", 0444, root
, &core
->prepare_count
);
2926 debugfs_create_u32("clk_enable_count", 0444, root
, &core
->enable_count
);
2927 debugfs_create_u32("clk_protect_count", 0444, root
, &core
->protect_count
);
2928 debugfs_create_u32("clk_notifier_count", 0444, root
, &core
->notifier_count
);
2929 debugfs_create_file("clk_duty_cycle", 0444, root
, core
,
2930 &clk_duty_cycle_fops
);
2932 if (core
->num_parents
> 1)
2933 debugfs_create_file("clk_possible_parents", 0444, root
, core
,
2934 &possible_parents_fops
);
2936 if (core
->ops
->debug_init
)
2937 core
->ops
->debug_init(core
->hw
, core
->dentry
);
2941 * clk_debug_register - add a clk node to the debugfs clk directory
2942 * @core: the clk being added to the debugfs clk directory
2944 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
2945 * initialized. Otherwise it bails out early since the debugfs clk directory
2946 * will be created lazily by clk_debug_init as part of a late_initcall.
2948 static void clk_debug_register(struct clk_core
*core
)
2950 mutex_lock(&clk_debug_lock
);
2951 hlist_add_head(&core
->debug_node
, &clk_debug_list
);
2953 clk_debug_create_one(core
, rootdir
);
2954 mutex_unlock(&clk_debug_lock
);
2958 * clk_debug_unregister - remove a clk node from the debugfs clk directory
2959 * @core: the clk being removed from the debugfs clk directory
2961 * Dynamically removes a clk and all its child nodes from the
2962 * debugfs clk directory if clk->dentry points to debugfs created by
2963 * clk_debug_register in __clk_core_init.
2965 static void clk_debug_unregister(struct clk_core
*core
)
2967 mutex_lock(&clk_debug_lock
);
2968 hlist_del_init(&core
->debug_node
);
2969 debugfs_remove_recursive(core
->dentry
);
2970 core
->dentry
= NULL
;
2971 mutex_unlock(&clk_debug_lock
);
2975 * clk_debug_init - lazily populate the debugfs clk directory
2977 * clks are often initialized very early during boot before memory can be
2978 * dynamically allocated and well before debugfs is setup. This function
2979 * populates the debugfs clk directory once at boot-time when we know that
2980 * debugfs is setup. It should only be called once at boot-time, all other clks
2981 * added dynamically will be done so with clk_debug_register.
2983 static int __init
clk_debug_init(void)
2985 struct clk_core
*core
;
2987 rootdir
= debugfs_create_dir("clk", NULL
);
2989 debugfs_create_file("clk_summary", 0444, rootdir
, &all_lists
,
2991 debugfs_create_file("clk_dump", 0444, rootdir
, &all_lists
,
2993 debugfs_create_file("clk_orphan_summary", 0444, rootdir
, &orphan_list
,
2995 debugfs_create_file("clk_orphan_dump", 0444, rootdir
, &orphan_list
,
2998 mutex_lock(&clk_debug_lock
);
2999 hlist_for_each_entry(core
, &clk_debug_list
, debug_node
)
3000 clk_debug_create_one(core
, rootdir
);
3003 mutex_unlock(&clk_debug_lock
);
3007 late_initcall(clk_debug_init
);
3009 static inline void clk_debug_register(struct clk_core
*core
) { }
3010 static inline void clk_debug_reparent(struct clk_core
*core
,
3011 struct clk_core
*new_parent
)
3014 static inline void clk_debug_unregister(struct clk_core
*core
)
3020 * __clk_core_init - initialize the data structures in a struct clk_core
3021 * @core: clk_core being initialized
3023 * Initializes the lists in struct clk_core, queries the hardware for the
3024 * parent and rate and sets them both.
3026 static int __clk_core_init(struct clk_core
*core
)
3029 struct clk_core
*orphan
;
3030 struct hlist_node
*tmp2
;
3038 ret
= clk_pm_runtime_get(core
);
3042 /* check to see if a clock with this name is already registered */
3043 if (clk_core_lookup(core
->name
)) {
3044 pr_debug("%s: clk %s already initialized\n",
3045 __func__
, core
->name
);
3050 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */
3051 if (core
->ops
->set_rate
&&
3052 !((core
->ops
->round_rate
|| core
->ops
->determine_rate
) &&
3053 core
->ops
->recalc_rate
)) {
3054 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3055 __func__
, core
->name
);
3060 if (core
->ops
->set_parent
&& !core
->ops
->get_parent
) {
3061 pr_err("%s: %s must implement .get_parent & .set_parent\n",
3062 __func__
, core
->name
);
3067 if (core
->num_parents
> 1 && !core
->ops
->get_parent
) {
3068 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3069 __func__
, core
->name
);
3074 if (core
->ops
->set_rate_and_parent
&&
3075 !(core
->ops
->set_parent
&& core
->ops
->set_rate
)) {
3076 pr_err("%s: %s must implement .set_parent & .set_rate\n",
3077 __func__
, core
->name
);
3082 /* throw a WARN if any entries in parent_names are NULL */
3083 for (i
= 0; i
< core
->num_parents
; i
++)
3084 WARN(!core
->parent_names
[i
],
3085 "%s: invalid NULL in %s's .parent_names\n",
3086 __func__
, core
->name
);
3088 core
->parent
= __clk_init_parent(core
);
3091 * Populate core->parent if parent has already been clk_core_init'd. If
3092 * parent has not yet been clk_core_init'd then place clk in the orphan
3093 * list. If clk doesn't have any parents then place it in the root
3096 * Every time a new clk is clk_init'd then we walk the list of orphan
3097 * clocks and re-parent any that are children of the clock currently
3101 hlist_add_head(&core
->child_node
,
3102 &core
->parent
->children
);
3103 core
->orphan
= core
->parent
->orphan
;
3104 } else if (!core
->num_parents
) {
3105 hlist_add_head(&core
->child_node
, &clk_root_list
);
3106 core
->orphan
= false;
3108 hlist_add_head(&core
->child_node
, &clk_orphan_list
);
3109 core
->orphan
= true;
3113 * optional platform-specific magic
3115 * The .init callback is not used by any of the basic clock types, but
3116 * exists for weird hardware that must perform initialization magic.
3117 * Please consider other ways of solving initialization problems before
3118 * using this callback, as its use is discouraged.
3120 if (core
->ops
->init
)
3121 core
->ops
->init(core
->hw
);
3124 * Set clk's accuracy. The preferred method is to use
3125 * .recalc_accuracy. For simple clocks and lazy developers the default
3126 * fallback is to use the parent's accuracy. If a clock doesn't have a
3127 * parent (or is orphaned) then accuracy is set to zero (perfect
3130 if (core
->ops
->recalc_accuracy
)
3131 core
->accuracy
= core
->ops
->recalc_accuracy(core
->hw
,
3132 __clk_get_accuracy(core
->parent
));
3133 else if (core
->parent
)
3134 core
->accuracy
= core
->parent
->accuracy
;
3140 * Since a phase is by definition relative to its parent, just
3141 * query the current clock phase, or just assume it's in phase.
3143 if (core
->ops
->get_phase
)
3144 core
->phase
= core
->ops
->get_phase(core
->hw
);
3149 * Set clk's duty cycle.
3151 clk_core_update_duty_cycle_nolock(core
);
3154 * Set clk's rate. The preferred method is to use .recalc_rate. For
3155 * simple clocks and lazy developers the default fallback is to use the
3156 * parent's rate. If a clock doesn't have a parent (or is orphaned)
3157 * then rate is set to zero.
3159 if (core
->ops
->recalc_rate
)
3160 rate
= core
->ops
->recalc_rate(core
->hw
,
3161 clk_core_get_rate_nolock(core
->parent
));
3162 else if (core
->parent
)
3163 rate
= core
->parent
->rate
;
3166 core
->rate
= core
->req_rate
= rate
;
3169 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
3170 * don't get accidentally disabled when walking the orphan tree and
3171 * reparenting clocks
3173 if (core
->flags
& CLK_IS_CRITICAL
) {
3174 unsigned long flags
;
3176 clk_core_prepare(core
);
3178 flags
= clk_enable_lock();
3179 clk_core_enable(core
);
3180 clk_enable_unlock(flags
);
3184 * walk the list of orphan clocks and reparent any that newly finds a
3187 hlist_for_each_entry_safe(orphan
, tmp2
, &clk_orphan_list
, child_node
) {
3188 struct clk_core
*parent
= __clk_init_parent(orphan
);
3191 * We need to use __clk_set_parent_before() and _after() to
3192 * to properly migrate any prepare/enable count of the orphan
3193 * clock. This is important for CLK_IS_CRITICAL clocks, which
3194 * are enabled during init but might not have a parent yet.
3197 /* update the clk tree topology */
3198 __clk_set_parent_before(orphan
, parent
);
3199 __clk_set_parent_after(orphan
, parent
, NULL
);
3200 __clk_recalc_accuracies(orphan
);
3201 __clk_recalc_rates(orphan
, 0);
3205 kref_init(&core
->ref
);
3207 clk_pm_runtime_put(core
);
3209 clk_prepare_unlock();
3212 clk_debug_register(core
);
3218 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
3219 * @core: clk to add consumer to
3220 * @clk: consumer to link to a clk
3222 static void clk_core_link_consumer(struct clk_core
*core
, struct clk
*clk
)
3225 hlist_add_head(&clk
->clks_node
, &core
->clks
);
3226 clk_prepare_unlock();
3230 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
3231 * @clk: consumer to unlink
3233 static void clk_core_unlink_consumer(struct clk
*clk
)
3235 lockdep_assert_held(&prepare_lock
);
3236 hlist_del(&clk
->clks_node
);
3240 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
3241 * @core: clk to allocate a consumer for
3242 * @dev_id: string describing device name
3243 * @con_id: connection ID string on device
3245 * Returns: clk consumer left unlinked from the consumer list
3247 static struct clk
*alloc_clk(struct clk_core
*core
, const char *dev_id
,
3252 clk
= kzalloc(sizeof(*clk
), GFP_KERNEL
);
3254 return ERR_PTR(-ENOMEM
);
3257 clk
->dev_id
= dev_id
;
3258 clk
->con_id
= kstrdup_const(con_id
, GFP_KERNEL
);
3259 clk
->max_rate
= ULONG_MAX
;
3265 * free_clk - Free a clk consumer
3266 * @clk: clk consumer to free
3268 * Note, this assumes the clk has been unlinked from the clk_core consumer
3271 static void free_clk(struct clk
*clk
)
3273 kfree_const(clk
->con_id
);
3278 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
3280 * @dev: clk consumer device
3281 * @hw: clk_hw associated with the clk being consumed
3282 * @dev_id: string describing device name
3283 * @con_id: connection ID string on device
3285 * This is the main function used to create a clk pointer for use by clk
3286 * consumers. It connects a consumer to the clk_core and clk_hw structures
3287 * used by the framework and clk provider respectively.
3289 struct clk
*clk_hw_create_clk(struct device
*dev
, struct clk_hw
*hw
,
3290 const char *dev_id
, const char *con_id
)
3293 struct clk_core
*core
;
3295 /* This is to allow this function to be chained to others */
3296 if (IS_ERR_OR_NULL(hw
))
3297 return ERR_CAST(hw
);
3300 clk
= alloc_clk(core
, dev_id
, con_id
);
3305 if (!try_module_get(core
->owner
)) {
3307 return ERR_PTR(-ENOENT
);
3310 kref_get(&core
->ref
);
3311 clk_core_link_consumer(core
, clk
);
3317 * clk_register - allocate a new clock, register it and return an opaque cookie
3318 * @dev: device that is registering this clock
3319 * @hw: link to hardware-specific clock data
3321 * clk_register is the primary interface for populating the clock tree with new
3322 * clock nodes. It returns a pointer to the newly allocated struct clk which
3323 * cannot be dereferenced by driver code but may be used in conjunction with the
3324 * rest of the clock API. In the event of an error clk_register will return an
3325 * error code; drivers must test for an error code after calling clk_register.
3327 struct clk
*clk_register(struct device
*dev
, struct clk_hw
*hw
)
3330 struct clk_core
*core
;
3332 core
= kzalloc(sizeof(*core
), GFP_KERNEL
);
3338 core
->name
= kstrdup_const(hw
->init
->name
, GFP_KERNEL
);
3344 if (WARN_ON(!hw
->init
->ops
)) {
3348 core
->ops
= hw
->init
->ops
;
3350 if (dev
&& pm_runtime_enabled(dev
))
3351 core
->rpm_enabled
= true;
3353 if (dev
&& dev
->driver
)
3354 core
->owner
= dev
->driver
->owner
;
3356 core
->flags
= hw
->init
->flags
;
3357 core
->num_parents
= hw
->init
->num_parents
;
3359 core
->max_rate
= ULONG_MAX
;
3362 /* allocate local copy in case parent_names is __initdata */
3363 core
->parent_names
= kcalloc(core
->num_parents
, sizeof(char *),
3366 if (!core
->parent_names
) {
3368 goto fail_parent_names
;
3372 /* copy each string name in case parent_names is __initdata */
3373 for (i
= 0; i
< core
->num_parents
; i
++) {
3374 core
->parent_names
[i
] = kstrdup_const(hw
->init
->parent_names
[i
],
3376 if (!core
->parent_names
[i
]) {
3378 goto fail_parent_names_copy
;
3382 /* avoid unnecessary string look-ups of clk_core's possible parents. */
3383 core
->parents
= kcalloc(core
->num_parents
, sizeof(*core
->parents
),
3385 if (!core
->parents
) {
3390 INIT_HLIST_HEAD(&core
->clks
);
3393 * Don't call clk_hw_create_clk() here because that would pin the
3394 * provider module to itself and prevent it from ever being removed.
3396 hw
->clk
= alloc_clk(core
, NULL
, NULL
);
3397 if (IS_ERR(hw
->clk
)) {
3398 ret
= PTR_ERR(hw
->clk
);
3402 clk_core_link_consumer(hw
->core
, hw
->clk
);
3404 ret
= __clk_core_init(core
);
3409 clk_core_unlink_consumer(hw
->clk
);
3410 clk_prepare_unlock();
3416 kfree(core
->parents
);
3417 fail_parent_names_copy
:
3419 kfree_const(core
->parent_names
[i
]);
3420 kfree(core
->parent_names
);
3423 kfree_const(core
->name
);
3427 return ERR_PTR(ret
);
3429 EXPORT_SYMBOL_GPL(clk_register
);
3432 * clk_hw_register - register a clk_hw and return an error code
3433 * @dev: device that is registering this clock
3434 * @hw: link to hardware-specific clock data
3436 * clk_hw_register is the primary interface for populating the clock tree with
3437 * new clock nodes. It returns an integer equal to zero indicating success or
3438 * less than zero indicating failure. Drivers must test for an error code after
3439 * calling clk_hw_register().
3441 int clk_hw_register(struct device
*dev
, struct clk_hw
*hw
)
3443 return PTR_ERR_OR_ZERO(clk_register(dev
, hw
));
3445 EXPORT_SYMBOL_GPL(clk_hw_register
);
3447 /* Free memory allocated for a clock. */
3448 static void __clk_release(struct kref
*ref
)
3450 struct clk_core
*core
= container_of(ref
, struct clk_core
, ref
);
3451 int i
= core
->num_parents
;
3453 lockdep_assert_held(&prepare_lock
);
3455 kfree(core
->parents
);
3457 kfree_const(core
->parent_names
[i
]);
3459 kfree(core
->parent_names
);
3460 kfree_const(core
->name
);
3465 * Empty clk_ops for unregistered clocks. These are used temporarily
3466 * after clk_unregister() was called on a clock and until last clock
3467 * consumer calls clk_put() and the struct clk object is freed.
3469 static int clk_nodrv_prepare_enable(struct clk_hw
*hw
)
3474 static void clk_nodrv_disable_unprepare(struct clk_hw
*hw
)
3479 static int clk_nodrv_set_rate(struct clk_hw
*hw
, unsigned long rate
,
3480 unsigned long parent_rate
)
3485 static int clk_nodrv_set_parent(struct clk_hw
*hw
, u8 index
)
3490 static const struct clk_ops clk_nodrv_ops
= {
3491 .enable
= clk_nodrv_prepare_enable
,
3492 .disable
= clk_nodrv_disable_unprepare
,
3493 .prepare
= clk_nodrv_prepare_enable
,
3494 .unprepare
= clk_nodrv_disable_unprepare
,
3495 .set_rate
= clk_nodrv_set_rate
,
3496 .set_parent
= clk_nodrv_set_parent
,
3500 * clk_unregister - unregister a currently registered clock
3501 * @clk: clock to unregister
3503 void clk_unregister(struct clk
*clk
)
3505 unsigned long flags
;
3507 if (!clk
|| WARN_ON_ONCE(IS_ERR(clk
)))
3510 clk_debug_unregister(clk
->core
);
3514 if (clk
->core
->ops
== &clk_nodrv_ops
) {
3515 pr_err("%s: unregistered clock: %s\n", __func__
,
3520 * Assign empty clock ops for consumers that might still hold
3521 * a reference to this clock.
3523 flags
= clk_enable_lock();
3524 clk
->core
->ops
= &clk_nodrv_ops
;
3525 clk_enable_unlock(flags
);
3527 if (!hlist_empty(&clk
->core
->children
)) {
3528 struct clk_core
*child
;
3529 struct hlist_node
*t
;
3531 /* Reparent all children to the orphan list. */
3532 hlist_for_each_entry_safe(child
, t
, &clk
->core
->children
,
3534 clk_core_set_parent_nolock(child
, NULL
);
3537 hlist_del_init(&clk
->core
->child_node
);
3539 if (clk
->core
->prepare_count
)
3540 pr_warn("%s: unregistering prepared clock: %s\n",
3541 __func__
, clk
->core
->name
);
3543 if (clk
->core
->protect_count
)
3544 pr_warn("%s: unregistering protected clock: %s\n",
3545 __func__
, clk
->core
->name
);
3547 kref_put(&clk
->core
->ref
, __clk_release
);
3549 clk_prepare_unlock();
3551 EXPORT_SYMBOL_GPL(clk_unregister
);
3554 * clk_hw_unregister - unregister a currently registered clk_hw
3555 * @hw: hardware-specific clock data to unregister
3557 void clk_hw_unregister(struct clk_hw
*hw
)
3559 clk_unregister(hw
->clk
);
3561 EXPORT_SYMBOL_GPL(clk_hw_unregister
);
3563 static void devm_clk_release(struct device
*dev
, void *res
)
3565 clk_unregister(*(struct clk
**)res
);
3568 static void devm_clk_hw_release(struct device
*dev
, void *res
)
3570 clk_hw_unregister(*(struct clk_hw
**)res
);
3574 * devm_clk_register - resource managed clk_register()
3575 * @dev: device that is registering this clock
3576 * @hw: link to hardware-specific clock data
3578 * Managed clk_register(). Clocks returned from this function are
3579 * automatically clk_unregister()ed on driver detach. See clk_register() for
3582 struct clk
*devm_clk_register(struct device
*dev
, struct clk_hw
*hw
)
3587 clkp
= devres_alloc(devm_clk_release
, sizeof(*clkp
), GFP_KERNEL
);
3589 return ERR_PTR(-ENOMEM
);
3591 clk
= clk_register(dev
, hw
);
3594 devres_add(dev
, clkp
);
3601 EXPORT_SYMBOL_GPL(devm_clk_register
);
3604 * devm_clk_hw_register - resource managed clk_hw_register()
3605 * @dev: device that is registering this clock
3606 * @hw: link to hardware-specific clock data
3608 * Managed clk_hw_register(). Clocks registered by this function are
3609 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
3610 * for more information.
3612 int devm_clk_hw_register(struct device
*dev
, struct clk_hw
*hw
)
3614 struct clk_hw
**hwp
;
3617 hwp
= devres_alloc(devm_clk_hw_release
, sizeof(*hwp
), GFP_KERNEL
);
3621 ret
= clk_hw_register(dev
, hw
);
3624 devres_add(dev
, hwp
);
3631 EXPORT_SYMBOL_GPL(devm_clk_hw_register
);
3633 static int devm_clk_match(struct device
*dev
, void *res
, void *data
)
3635 struct clk
*c
= res
;
3641 static int devm_clk_hw_match(struct device
*dev
, void *res
, void *data
)
3643 struct clk_hw
*hw
= res
;
3651 * devm_clk_unregister - resource managed clk_unregister()
3652 * @clk: clock to unregister
3654 * Deallocate a clock allocated with devm_clk_register(). Normally
3655 * this function will not need to be called and the resource management
3656 * code will ensure that the resource is freed.
3658 void devm_clk_unregister(struct device
*dev
, struct clk
*clk
)
3660 WARN_ON(devres_release(dev
, devm_clk_release
, devm_clk_match
, clk
));
3662 EXPORT_SYMBOL_GPL(devm_clk_unregister
);
3665 * devm_clk_hw_unregister - resource managed clk_hw_unregister()
3666 * @dev: device that is unregistering the hardware-specific clock data
3667 * @hw: link to hardware-specific clock data
3669 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
3670 * this function will not need to be called and the resource management
3671 * code will ensure that the resource is freed.
3673 void devm_clk_hw_unregister(struct device
*dev
, struct clk_hw
*hw
)
3675 WARN_ON(devres_release(dev
, devm_clk_hw_release
, devm_clk_hw_match
,
3678 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister
);
3684 void __clk_put(struct clk
*clk
)
3686 struct module
*owner
;
3688 if (!clk
|| WARN_ON_ONCE(IS_ERR(clk
)))
3694 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
3695 * given user should be balanced with calls to clk_rate_exclusive_put()
3696 * and by that same consumer
3698 if (WARN_ON(clk
->exclusive_count
)) {
3699 /* We voiced our concern, let's sanitize the situation */
3700 clk
->core
->protect_count
-= (clk
->exclusive_count
- 1);
3701 clk_core_rate_unprotect(clk
->core
);
3702 clk
->exclusive_count
= 0;
3705 hlist_del(&clk
->clks_node
);
3706 if (clk
->min_rate
> clk
->core
->req_rate
||
3707 clk
->max_rate
< clk
->core
->req_rate
)
3708 clk_core_set_rate_nolock(clk
->core
, clk
->core
->req_rate
);
3710 owner
= clk
->core
->owner
;
3711 kref_put(&clk
->core
->ref
, __clk_release
);
3713 clk_prepare_unlock();
3720 /*** clk rate change notifiers ***/
3723 * clk_notifier_register - add a clk rate change notifier
3724 * @clk: struct clk * to watch
3725 * @nb: struct notifier_block * with callback info
3727 * Request notification when clk's rate changes. This uses an SRCU
3728 * notifier because we want it to block and notifier unregistrations are
3729 * uncommon. The callbacks associated with the notifier must not
3730 * re-enter into the clk framework by calling any top-level clk APIs;
3731 * this will cause a nested prepare_lock mutex.
3733 * In all notification cases (pre, post and abort rate change) the original
3734 * clock rate is passed to the callback via struct clk_notifier_data.old_rate
3735 * and the new frequency is passed via struct clk_notifier_data.new_rate.
3737 * clk_notifier_register() must be called from non-atomic context.
3738 * Returns -EINVAL if called with null arguments, -ENOMEM upon
3739 * allocation failure; otherwise, passes along the return value of
3740 * srcu_notifier_chain_register().
3742 int clk_notifier_register(struct clk
*clk
, struct notifier_block
*nb
)
3744 struct clk_notifier
*cn
;
3752 /* search the list of notifiers for this clk */
3753 list_for_each_entry(cn
, &clk_notifier_list
, node
)
3757 /* if clk wasn't in the notifier list, allocate new clk_notifier */
3758 if (cn
->clk
!= clk
) {
3759 cn
= kzalloc(sizeof(*cn
), GFP_KERNEL
);
3764 srcu_init_notifier_head(&cn
->notifier_head
);
3766 list_add(&cn
->node
, &clk_notifier_list
);
3769 ret
= srcu_notifier_chain_register(&cn
->notifier_head
, nb
);
3771 clk
->core
->notifier_count
++;
3774 clk_prepare_unlock();
3778 EXPORT_SYMBOL_GPL(clk_notifier_register
);
3781 * clk_notifier_unregister - remove a clk rate change notifier
3782 * @clk: struct clk *
3783 * @nb: struct notifier_block * with callback info
3785 * Request no further notification for changes to 'clk' and frees memory
3786 * allocated in clk_notifier_register.
3788 * Returns -EINVAL if called with null arguments; otherwise, passes
3789 * along the return value of srcu_notifier_chain_unregister().
3791 int clk_notifier_unregister(struct clk
*clk
, struct notifier_block
*nb
)
3793 struct clk_notifier
*cn
= NULL
;
3801 list_for_each_entry(cn
, &clk_notifier_list
, node
)
3805 if (cn
->clk
== clk
) {
3806 ret
= srcu_notifier_chain_unregister(&cn
->notifier_head
, nb
);
3808 clk
->core
->notifier_count
--;
3810 /* XXX the notifier code should handle this better */
3811 if (!cn
->notifier_head
.head
) {
3812 srcu_cleanup_notifier_head(&cn
->notifier_head
);
3813 list_del(&cn
->node
);
3821 clk_prepare_unlock();
3825 EXPORT_SYMBOL_GPL(clk_notifier_unregister
);
3829 * struct of_clk_provider - Clock provider registration structure
3830 * @link: Entry in global list of clock providers
3831 * @node: Pointer to device tree node of clock provider
3832 * @get: Get clock callback. Returns NULL or a struct clk for the
3833 * given clock specifier
3834 * @data: context pointer to be passed into @get callback
3836 struct of_clk_provider
{
3837 struct list_head link
;
3839 struct device_node
*node
;
3840 struct clk
*(*get
)(struct of_phandle_args
*clkspec
, void *data
);
3841 struct clk_hw
*(*get_hw
)(struct of_phandle_args
*clkspec
, void *data
);
3845 static const struct of_device_id __clk_of_table_sentinel
3846 __used
__section(__clk_of_table_end
);
3848 static LIST_HEAD(of_clk_providers
);
3849 static DEFINE_MUTEX(of_clk_mutex
);
3851 struct clk
*of_clk_src_simple_get(struct of_phandle_args
*clkspec
,
3856 EXPORT_SYMBOL_GPL(of_clk_src_simple_get
);
3858 struct clk_hw
*of_clk_hw_simple_get(struct of_phandle_args
*clkspec
, void *data
)
3862 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get
);
3864 struct clk
*of_clk_src_onecell_get(struct of_phandle_args
*clkspec
, void *data
)
3866 struct clk_onecell_data
*clk_data
= data
;
3867 unsigned int idx
= clkspec
->args
[0];
3869 if (idx
>= clk_data
->clk_num
) {
3870 pr_err("%s: invalid clock index %u\n", __func__
, idx
);
3871 return ERR_PTR(-EINVAL
);
3874 return clk_data
->clks
[idx
];
3876 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get
);
3879 of_clk_hw_onecell_get(struct of_phandle_args
*clkspec
, void *data
)
3881 struct clk_hw_onecell_data
*hw_data
= data
;
3882 unsigned int idx
= clkspec
->args
[0];
3884 if (idx
>= hw_data
->num
) {
3885 pr_err("%s: invalid index %u\n", __func__
, idx
);
3886 return ERR_PTR(-EINVAL
);
3889 return hw_data
->hws
[idx
];
3891 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get
);
3894 * of_clk_add_provider() - Register a clock provider for a node
3895 * @np: Device node pointer associated with clock provider
3896 * @clk_src_get: callback for decoding clock
3897 * @data: context pointer for @clk_src_get callback.
3899 int of_clk_add_provider(struct device_node
*np
,
3900 struct clk
*(*clk_src_get
)(struct of_phandle_args
*clkspec
,
3904 struct of_clk_provider
*cp
;
3907 cp
= kzalloc(sizeof(*cp
), GFP_KERNEL
);
3911 cp
->node
= of_node_get(np
);
3913 cp
->get
= clk_src_get
;
3915 mutex_lock(&of_clk_mutex
);
3916 list_add(&cp
->link
, &of_clk_providers
);
3917 mutex_unlock(&of_clk_mutex
);
3918 pr_debug("Added clock from %pOF\n", np
);
3920 ret
= of_clk_set_defaults(np
, true);
3922 of_clk_del_provider(np
);
3926 EXPORT_SYMBOL_GPL(of_clk_add_provider
);
3929 * of_clk_add_hw_provider() - Register a clock provider for a node
3930 * @np: Device node pointer associated with clock provider
3931 * @get: callback for decoding clk_hw
3932 * @data: context pointer for @get callback.
3934 int of_clk_add_hw_provider(struct device_node
*np
,
3935 struct clk_hw
*(*get
)(struct of_phandle_args
*clkspec
,
3939 struct of_clk_provider
*cp
;
3942 cp
= kzalloc(sizeof(*cp
), GFP_KERNEL
);
3946 cp
->node
= of_node_get(np
);
3950 mutex_lock(&of_clk_mutex
);
3951 list_add(&cp
->link
, &of_clk_providers
);
3952 mutex_unlock(&of_clk_mutex
);
3953 pr_debug("Added clk_hw provider from %pOF\n", np
);
3955 ret
= of_clk_set_defaults(np
, true);
3957 of_clk_del_provider(np
);
3961 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider
);
3963 static void devm_of_clk_release_provider(struct device
*dev
, void *res
)
3965 of_clk_del_provider(*(struct device_node
**)res
);
3969 * We allow a child device to use its parent device as the clock provider node
3970 * for cases like MFD sub-devices where the child device driver wants to use
3971 * devm_*() APIs but not list the device in DT as a sub-node.
3973 static struct device_node
*get_clk_provider_node(struct device
*dev
)
3975 struct device_node
*np
, *parent_np
;
3978 parent_np
= dev
->parent
? dev
->parent
->of_node
: NULL
;
3980 if (!of_find_property(np
, "#clock-cells", NULL
))
3981 if (of_find_property(parent_np
, "#clock-cells", NULL
))
3988 * devm_of_clk_add_hw_provider() - Managed clk provider node registration
3989 * @dev: Device acting as the clock provider (used for DT node and lifetime)
3990 * @get: callback for decoding clk_hw
3991 * @data: context pointer for @get callback
3993 * Registers clock provider for given device's node. If the device has no DT
3994 * node or if the device node lacks of clock provider information (#clock-cells)
3995 * then the parent device's node is scanned for this information. If parent node
3996 * has the #clock-cells then it is used in registration. Provider is
3997 * automatically released at device exit.
3999 * Return: 0 on success or an errno on failure.
4001 int devm_of_clk_add_hw_provider(struct device
*dev
,
4002 struct clk_hw
*(*get
)(struct of_phandle_args
*clkspec
,
4006 struct device_node
**ptr
, *np
;
4009 ptr
= devres_alloc(devm_of_clk_release_provider
, sizeof(*ptr
),
4014 np
= get_clk_provider_node(dev
);
4015 ret
= of_clk_add_hw_provider(np
, get
, data
);
4018 devres_add(dev
, ptr
);
4025 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider
);
4028 * of_clk_del_provider() - Remove a previously registered clock provider
4029 * @np: Device node pointer associated with clock provider
4031 void of_clk_del_provider(struct device_node
*np
)
4033 struct of_clk_provider
*cp
;
4035 mutex_lock(&of_clk_mutex
);
4036 list_for_each_entry(cp
, &of_clk_providers
, link
) {
4037 if (cp
->node
== np
) {
4038 list_del(&cp
->link
);
4039 of_node_put(cp
->node
);
4044 mutex_unlock(&of_clk_mutex
);
4046 EXPORT_SYMBOL_GPL(of_clk_del_provider
);
4048 static int devm_clk_provider_match(struct device
*dev
, void *res
, void *data
)
4050 struct device_node
**np
= res
;
4052 if (WARN_ON(!np
|| !*np
))
4059 * devm_of_clk_del_provider() - Remove clock provider registered using devm
4060 * @dev: Device to whose lifetime the clock provider was bound
4062 void devm_of_clk_del_provider(struct device
*dev
)
4065 struct device_node
*np
= get_clk_provider_node(dev
);
4067 ret
= devres_release(dev
, devm_of_clk_release_provider
,
4068 devm_clk_provider_match
, np
);
4072 EXPORT_SYMBOL(devm_of_clk_del_provider
);
4075 * Beware the return values when np is valid, but no clock provider is found.
4076 * If name == NULL, the function returns -ENOENT.
4077 * If name != NULL, the function returns -EINVAL. This is because
4078 * of_parse_phandle_with_args() is called even if of_property_match_string()
4081 static int of_parse_clkspec(const struct device_node
*np
, int index
,
4082 const char *name
, struct of_phandle_args
*out_args
)
4086 /* Walk up the tree of devices looking for a clock property that matches */
4089 * For named clocks, first look up the name in the
4090 * "clock-names" property. If it cannot be found, then index
4091 * will be an error code and of_parse_phandle_with_args() will
4095 index
= of_property_match_string(np
, "clock-names", name
);
4096 ret
= of_parse_phandle_with_args(np
, "clocks", "#clock-cells",
4100 if (name
&& index
>= 0)
4104 * No matching clock found on this node. If the parent node
4105 * has a "clock-ranges" property, then we can try one of its
4109 if (np
&& !of_get_property(np
, "clock-ranges", NULL
))
4117 static struct clk_hw
*
4118 __of_clk_get_hw_from_provider(struct of_clk_provider
*provider
,
4119 struct of_phandle_args
*clkspec
)
4123 if (provider
->get_hw
)
4124 return provider
->get_hw(clkspec
, provider
->data
);
4126 clk
= provider
->get(clkspec
, provider
->data
);
4128 return ERR_CAST(clk
);
4129 return __clk_get_hw(clk
);
4132 static struct clk_hw
*
4133 of_clk_get_hw_from_clkspec(struct of_phandle_args
*clkspec
)
4135 struct of_clk_provider
*provider
;
4136 struct clk_hw
*hw
= ERR_PTR(-EPROBE_DEFER
);
4139 return ERR_PTR(-EINVAL
);
4141 mutex_lock(&of_clk_mutex
);
4142 list_for_each_entry(provider
, &of_clk_providers
, link
) {
4143 if (provider
->node
== clkspec
->np
) {
4144 hw
= __of_clk_get_hw_from_provider(provider
, clkspec
);
4149 mutex_unlock(&of_clk_mutex
);
4155 * of_clk_get_from_provider() - Lookup a clock from a clock provider
4156 * @clkspec: pointer to a clock specifier data structure
4158 * This function looks up a struct clk from the registered list of clock
4159 * providers, an input is a clock specifier data structure as returned
4160 * from the of_parse_phandle_with_args() function call.
4162 struct clk
*of_clk_get_from_provider(struct of_phandle_args
*clkspec
)
4164 struct clk_hw
*hw
= of_clk_get_hw_from_clkspec(clkspec
);
4166 return clk_hw_create_clk(NULL
, hw
, NULL
, __func__
);
4168 EXPORT_SYMBOL_GPL(of_clk_get_from_provider
);
4170 struct clk_hw
*of_clk_get_hw(struct device_node
*np
, int index
,
4175 struct of_phandle_args clkspec
;
4177 ret
= of_parse_clkspec(np
, index
, con_id
, &clkspec
);
4179 return ERR_PTR(ret
);
4181 hw
= of_clk_get_hw_from_clkspec(&clkspec
);
4182 of_node_put(clkspec
.np
);
4187 static struct clk
*__of_clk_get(struct device_node
*np
,
4188 int index
, const char *dev_id
,
4191 struct clk_hw
*hw
= of_clk_get_hw(np
, index
, con_id
);
4193 return clk_hw_create_clk(NULL
, hw
, dev_id
, con_id
);
4196 struct clk
*of_clk_get(struct device_node
*np
, int index
)
4198 return __of_clk_get(np
, index
, np
->full_name
, NULL
);
4200 EXPORT_SYMBOL(of_clk_get
);
4203 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
4204 * @np: pointer to clock consumer node
4205 * @name: name of consumer's clock input, or NULL for the first clock reference
4207 * This function parses the clocks and clock-names properties,
4208 * and uses them to look up the struct clk from the registered list of clock
4211 struct clk
*of_clk_get_by_name(struct device_node
*np
, const char *name
)
4214 return ERR_PTR(-ENOENT
);
4216 return __of_clk_get(np
, 0, np
->full_name
, name
);
4218 EXPORT_SYMBOL(of_clk_get_by_name
);
4221 * of_clk_get_parent_count() - Count the number of clocks a device node has
4222 * @np: device node to count
4224 * Returns: The number of clocks that are possible parents of this node
4226 unsigned int of_clk_get_parent_count(struct device_node
*np
)
4230 count
= of_count_phandle_with_args(np
, "clocks", "#clock-cells");
4236 EXPORT_SYMBOL_GPL(of_clk_get_parent_count
);
4238 const char *of_clk_get_parent_name(struct device_node
*np
, int index
)
4240 struct of_phandle_args clkspec
;
4241 struct property
*prop
;
4242 const char *clk_name
;
4249 rc
= of_parse_phandle_with_args(np
, "clocks", "#clock-cells", index
,
4254 index
= clkspec
.args_count
? clkspec
.args
[0] : 0;
4257 /* if there is an indices property, use it to transfer the index
4258 * specified into an array offset for the clock-output-names property.
4260 of_property_for_each_u32(clkspec
.np
, "clock-indices", prop
, vp
, pv
) {
4267 /* We went off the end of 'clock-indices' without finding it */
4271 if (of_property_read_string_index(clkspec
.np
, "clock-output-names",
4275 * Best effort to get the name if the clock has been
4276 * registered with the framework. If the clock isn't
4277 * registered, we return the node name as the name of
4278 * the clock as long as #clock-cells = 0.
4280 clk
= of_clk_get_from_provider(&clkspec
);
4282 if (clkspec
.args_count
== 0)
4283 clk_name
= clkspec
.np
->name
;
4287 clk_name
= __clk_get_name(clk
);
4293 of_node_put(clkspec
.np
);
4296 EXPORT_SYMBOL_GPL(of_clk_get_parent_name
);
4299 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
4301 * @np: Device node pointer associated with clock provider
4302 * @parents: pointer to char array that hold the parents' names
4303 * @size: size of the @parents array
4305 * Return: number of parents for the clock node.
4307 int of_clk_parent_fill(struct device_node
*np
, const char **parents
,
4312 while (i
< size
&& (parents
[i
] = of_clk_get_parent_name(np
, i
)) != NULL
)
4317 EXPORT_SYMBOL_GPL(of_clk_parent_fill
);
4319 struct clock_provider
{
4320 void (*clk_init_cb
)(struct device_node
*);
4321 struct device_node
*np
;
4322 struct list_head node
;
4326 * This function looks for a parent clock. If there is one, then it
4327 * checks that the provider for this parent clock was initialized, in
4328 * this case the parent clock will be ready.
4330 static int parent_ready(struct device_node
*np
)
4335 struct clk
*clk
= of_clk_get(np
, i
);
4337 /* this parent is ready we can check the next one */
4344 /* at least one parent is not ready, we exit now */
4345 if (PTR_ERR(clk
) == -EPROBE_DEFER
)
4349 * Here we make assumption that the device tree is
4350 * written correctly. So an error means that there is
4351 * no more parent. As we didn't exit yet, then the
4352 * previous parent are ready. If there is no clock
4353 * parent, no need to wait for them, then we can
4354 * consider their absence as being ready
4361 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
4362 * @np: Device node pointer associated with clock provider
4363 * @index: clock index
4364 * @flags: pointer to top-level framework flags
4366 * Detects if the clock-critical property exists and, if so, sets the
4367 * corresponding CLK_IS_CRITICAL flag.
4369 * Do not use this function. It exists only for legacy Device Tree
4370 * bindings, such as the one-clock-per-node style that are outdated.
4371 * Those bindings typically put all clock data into .dts and the Linux
4372 * driver has no clock data, thus making it impossible to set this flag
4373 * correctly from the driver. Only those drivers may call
4374 * of_clk_detect_critical from their setup functions.
4376 * Return: error code or zero on success
4378 int of_clk_detect_critical(struct device_node
*np
,
4379 int index
, unsigned long *flags
)
4381 struct property
*prop
;
4388 of_property_for_each_u32(np
, "clock-critical", prop
, cur
, idx
)
4390 *flags
|= CLK_IS_CRITICAL
;
4396 * of_clk_init() - Scan and init clock providers from the DT
4397 * @matches: array of compatible values and init functions for providers.
4399 * This function scans the device tree for matching clock providers
4400 * and calls their initialization functions. It also does it by trying
4401 * to follow the dependencies.
4403 void __init
of_clk_init(const struct of_device_id
*matches
)
4405 const struct of_device_id
*match
;
4406 struct device_node
*np
;
4407 struct clock_provider
*clk_provider
, *next
;
4410 LIST_HEAD(clk_provider_list
);
4413 matches
= &__clk_of_table
;
4415 /* First prepare the list of the clocks providers */
4416 for_each_matching_node_and_match(np
, matches
, &match
) {
4417 struct clock_provider
*parent
;
4419 if (!of_device_is_available(np
))
4422 parent
= kzalloc(sizeof(*parent
), GFP_KERNEL
);
4424 list_for_each_entry_safe(clk_provider
, next
,
4425 &clk_provider_list
, node
) {
4426 list_del(&clk_provider
->node
);
4427 of_node_put(clk_provider
->np
);
4428 kfree(clk_provider
);
4434 parent
->clk_init_cb
= match
->data
;
4435 parent
->np
= of_node_get(np
);
4436 list_add_tail(&parent
->node
, &clk_provider_list
);
4439 while (!list_empty(&clk_provider_list
)) {
4440 is_init_done
= false;
4441 list_for_each_entry_safe(clk_provider
, next
,
4442 &clk_provider_list
, node
) {
4443 if (force
|| parent_ready(clk_provider
->np
)) {
4445 /* Don't populate platform devices */
4446 of_node_set_flag(clk_provider
->np
,
4449 clk_provider
->clk_init_cb(clk_provider
->np
);
4450 of_clk_set_defaults(clk_provider
->np
, true);
4452 list_del(&clk_provider
->node
);
4453 of_node_put(clk_provider
->np
);
4454 kfree(clk_provider
);
4455 is_init_done
= true;
4460 * We didn't manage to initialize any of the
4461 * remaining providers during the last loop, so now we
4462 * initialize all the remaining ones unconditionally
4463 * in case the clock parent was not mandatory