* Must not be called from within atomic context.
*/
int clk_prepare(struct clk *clk);
+
+/**
+ * clk_unprepare - undo preparation of a clock source
+ * @clk: clock source
+ *
+ * This undoes a previously prepared clock. The caller must balance
+ * the number of prepare and unprepare calls.
+ *
+ * Must not be called from within atomic context.
+ */
+void clk_unprepare(struct clk *clk);
+
int __must_check clk_bulk_prepare(int num_clks,
const struct clk_bulk_data *clks);
+void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks);
/**
* clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
return 0;
}
-static inline int __must_check
-clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks)
+static inline void clk_unprepare(struct clk *clk)
{
might_sleep();
- return 0;
}
-static inline bool clk_is_enabled_when_prepared(struct clk *clk)
-{
- return false;
-}
-#endif
-
-/**
- * clk_unprepare - undo preparation of a clock source
- * @clk: clock source
- *
- * This undoes a previously prepared clock. The caller must balance
- * the number of prepare and unprepare calls.
- *
- * Must not be called from within atomic context.
- */
-#ifdef CONFIG_HAVE_CLK_PREPARE
-void clk_unprepare(struct clk *clk);
-void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks);
-#else
-static inline void clk_unprepare(struct clk *clk)
+static inline int __must_check
+clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks)
{
might_sleep();
+ return 0;
}
+
static inline void clk_bulk_unprepare(int num_clks,
const struct clk_bulk_data *clks)
{
might_sleep();
}
+
+static inline bool clk_is_enabled_when_prepared(struct clk *clk)
+{
+ return false;
+}
#endif
#ifdef CONFIG_HAVE_CLK