1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Performance Protocol
5 * Copyright (C) 2018-2023 ARM Ltd.
8 #define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
10 #include <linux/bits.h>
11 #include <linux/hashtable.h>
13 #include <linux/log2.h>
14 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_opp.h>
18 #include <linux/scmi_protocol.h>
19 #include <linux/sort.h>
20 #include <linux/xarray.h>
22 #include <trace/events/scmi.h>
24 #include "protocols.h"
29 enum scmi_performance_protocol_cmd
{
30 PERF_DOMAIN_ATTRIBUTES
= 0x3,
31 PERF_DESCRIBE_LEVELS
= 0x4,
32 PERF_LIMITS_SET
= 0x5,
33 PERF_LIMITS_GET
= 0x6,
36 PERF_NOTIFY_LIMITS
= 0x9,
37 PERF_NOTIFY_LEVEL
= 0xa,
38 PERF_DESCRIBE_FASTCHANNEL
= 0xb,
39 PERF_DOMAIN_NAME_GET
= 0xc,
54 struct hlist_node hash
;
57 struct scmi_msg_resp_perf_attributes
{
60 #define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
61 #define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1))
62 __le32 stats_addr_low
;
63 __le32 stats_addr_high
;
67 struct scmi_msg_resp_perf_domain_attributes
{
69 #define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
70 #define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
71 #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
72 #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
73 #define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
74 #define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26))
75 #define SUPPORTS_LEVEL_INDEXING(x) ((x) & BIT(25))
77 __le32 sustained_freq_khz
;
78 __le32 sustained_perf_level
;
79 u8 name
[SCMI_SHORT_NAME_MAX_SIZE
];
82 struct scmi_msg_perf_describe_levels
{
87 struct scmi_perf_set_limits
{
93 struct scmi_perf_get_limits
{
98 struct scmi_perf_set_level
{
103 struct scmi_perf_notify_level_or_limits
{
105 __le32 notify_enable
;
108 struct scmi_perf_limits_notify_payld
{
115 struct scmi_perf_level_notify_payld
{
118 __le32 performance_level
;
121 struct scmi_msg_resp_perf_describe_levels
{
123 __le16 num_remaining
;
127 __le16 transition_latency_us
;
132 struct scmi_msg_resp_perf_describe_levels_v4
{
134 __le16 num_remaining
;
138 __le16 transition_latency_us
;
140 __le32 indicative_freq
;
145 struct perf_dom_info
{
149 bool perf_limit_notify
;
150 bool perf_level_notify
;
151 bool perf_fastchannels
;
152 bool level_indexing_mode
;
154 u32 sustained_freq_khz
;
155 u32 sustained_perf_level
;
157 char name
[SCMI_MAX_STR_SIZE
];
158 struct scmi_opp opp
[MAX_OPPS
];
159 struct scmi_fc_info
*fc_info
;
160 struct xarray opps_by_idx
;
161 struct xarray opps_by_lvl
;
162 DECLARE_HASHTABLE(opps_by_freq
, ilog2(MAX_OPPS
));
165 #define LOOKUP_BY_FREQ(__htp, __freq) \
167 /* u32 cast is needed to pick right hash func */ \
168 u32 f_ = (u32)(__freq); \
169 struct scmi_opp *_opp; \
171 hash_for_each_possible((__htp), _opp, hash, f_) \
172 if (_opp->indicative_freq == f_) \
177 struct scmi_perf_info
{
180 enum scmi_power_scale power_scale
;
183 struct perf_dom_info
*dom_info
;
186 static enum scmi_performance_protocol_cmd evt_2_cmd
[] = {
191 static int scmi_perf_attributes_get(const struct scmi_protocol_handle
*ph
,
192 struct scmi_perf_info
*pi
)
196 struct scmi_msg_resp_perf_attributes
*attr
;
198 ret
= ph
->xops
->xfer_get_init(ph
, PROTOCOL_ATTRIBUTES
, 0,
205 ret
= ph
->xops
->do_xfer(ph
, t
);
207 u16 flags
= le16_to_cpu(attr
->flags
);
209 pi
->num_domains
= le16_to_cpu(attr
->num_domains
);
211 if (POWER_SCALE_IN_MILLIWATT(flags
))
212 pi
->power_scale
= SCMI_POWER_MILLIWATTS
;
213 if (PROTOCOL_REV_MAJOR(pi
->version
) >= 0x3)
214 if (POWER_SCALE_IN_MICROWATT(flags
))
215 pi
->power_scale
= SCMI_POWER_MICROWATTS
;
217 pi
->stats_addr
= le32_to_cpu(attr
->stats_addr_low
) |
218 (u64
)le32_to_cpu(attr
->stats_addr_high
) << 32;
219 pi
->stats_size
= le32_to_cpu(attr
->stats_size
);
222 ph
->xops
->xfer_put(ph
, t
);
226 static void scmi_perf_xa_destroy(void *data
)
229 struct scmi_perf_info
*pinfo
= data
;
231 for (domain
= 0; domain
< pinfo
->num_domains
; domain
++) {
232 xa_destroy(&((pinfo
->dom_info
+ domain
)->opps_by_idx
));
233 xa_destroy(&((pinfo
->dom_info
+ domain
)->opps_by_lvl
));
238 scmi_perf_domain_attributes_get(const struct scmi_protocol_handle
*ph
,
239 struct perf_dom_info
*dom_info
,
245 struct scmi_msg_resp_perf_domain_attributes
*attr
;
247 ret
= ph
->xops
->xfer_get_init(ph
, PERF_DOMAIN_ATTRIBUTES
,
248 sizeof(dom_info
->id
), sizeof(*attr
), &t
);
252 put_unaligned_le32(dom_info
->id
, t
->tx
.buf
);
255 ret
= ph
->xops
->do_xfer(ph
, t
);
257 flags
= le32_to_cpu(attr
->flags
);
259 dom_info
->set_limits
= SUPPORTS_SET_LIMITS(flags
);
260 dom_info
->set_perf
= SUPPORTS_SET_PERF_LVL(flags
);
261 dom_info
->perf_limit_notify
= SUPPORTS_PERF_LIMIT_NOTIFY(flags
);
262 dom_info
->perf_level_notify
= SUPPORTS_PERF_LEVEL_NOTIFY(flags
);
263 dom_info
->perf_fastchannels
= SUPPORTS_PERF_FASTCHANNELS(flags
);
264 if (PROTOCOL_REV_MAJOR(version
) >= 0x4)
265 dom_info
->level_indexing_mode
=
266 SUPPORTS_LEVEL_INDEXING(flags
);
267 dom_info
->sustained_freq_khz
=
268 le32_to_cpu(attr
->sustained_freq_khz
);
269 dom_info
->sustained_perf_level
=
270 le32_to_cpu(attr
->sustained_perf_level
);
271 if (!dom_info
->sustained_freq_khz
||
272 !dom_info
->sustained_perf_level
)
273 /* CPUFreq converts to kHz, hence default 1000 */
274 dom_info
->mult_factor
= 1000;
276 dom_info
->mult_factor
=
277 (dom_info
->sustained_freq_khz
* 1000) /
278 dom_info
->sustained_perf_level
;
279 strscpy(dom_info
->name
, attr
->name
, SCMI_SHORT_NAME_MAX_SIZE
);
282 ph
->xops
->xfer_put(ph
, t
);
285 * If supported overwrite short name with the extended one;
286 * on error just carry on and use already provided short name.
288 if (!ret
&& PROTOCOL_REV_MAJOR(version
) >= 0x3 &&
289 SUPPORTS_EXTENDED_NAMES(flags
))
290 ph
->hops
->extended_name_get(ph
, PERF_DOMAIN_NAME_GET
,
291 dom_info
->id
, dom_info
->name
,
294 if (dom_info
->level_indexing_mode
) {
295 xa_init(&dom_info
->opps_by_idx
);
296 xa_init(&dom_info
->opps_by_lvl
);
297 hash_init(dom_info
->opps_by_freq
);
303 static int opp_cmp_func(const void *opp1
, const void *opp2
)
305 const struct scmi_opp
*t1
= opp1
, *t2
= opp2
;
307 return t1
->perf
- t2
->perf
;
310 struct scmi_perf_ipriv
{
312 struct perf_dom_info
*perf_dom
;
315 static void iter_perf_levels_prepare_message(void *message
,
316 unsigned int desc_index
,
319 struct scmi_msg_perf_describe_levels
*msg
= message
;
320 const struct scmi_perf_ipriv
*p
= priv
;
322 msg
->domain
= cpu_to_le32(p
->perf_dom
->id
);
323 /* Set the number of OPPs to be skipped/already read */
324 msg
->level_index
= cpu_to_le32(desc_index
);
327 static int iter_perf_levels_update_state(struct scmi_iterator_state
*st
,
328 const void *response
, void *priv
)
330 const struct scmi_msg_resp_perf_describe_levels
*r
= response
;
332 st
->num_returned
= le16_to_cpu(r
->num_returned
);
333 st
->num_remaining
= le16_to_cpu(r
->num_remaining
);
339 process_response_opp(struct scmi_opp
*opp
, unsigned int loop_idx
,
340 const struct scmi_msg_resp_perf_describe_levels
*r
)
342 opp
->perf
= le32_to_cpu(r
->opp
[loop_idx
].perf_val
);
343 opp
->power
= le32_to_cpu(r
->opp
[loop_idx
].power
);
344 opp
->trans_latency_us
=
345 le16_to_cpu(r
->opp
[loop_idx
].transition_latency_us
);
349 process_response_opp_v4(struct perf_dom_info
*dom
, struct scmi_opp
*opp
,
350 unsigned int loop_idx
,
351 const struct scmi_msg_resp_perf_describe_levels_v4
*r
)
353 opp
->perf
= le32_to_cpu(r
->opp
[loop_idx
].perf_val
);
354 opp
->power
= le32_to_cpu(r
->opp
[loop_idx
].power
);
355 opp
->trans_latency_us
=
356 le16_to_cpu(r
->opp
[loop_idx
].transition_latency_us
);
358 /* Note that PERF v4 reports always five 32-bit words */
359 opp
->indicative_freq
= le32_to_cpu(r
->opp
[loop_idx
].indicative_freq
);
360 if (dom
->level_indexing_mode
) {
361 opp
->level_index
= le32_to_cpu(r
->opp
[loop_idx
].level_index
);
363 xa_store(&dom
->opps_by_idx
, opp
->level_index
, opp
, GFP_KERNEL
);
364 xa_store(&dom
->opps_by_lvl
, opp
->perf
, opp
, GFP_KERNEL
);
365 hash_add(dom
->opps_by_freq
, &opp
->hash
, opp
->indicative_freq
);
370 iter_perf_levels_process_response(const struct scmi_protocol_handle
*ph
,
371 const void *response
,
372 struct scmi_iterator_state
*st
, void *priv
)
374 struct scmi_opp
*opp
;
375 struct scmi_perf_ipriv
*p
= priv
;
377 opp
= &p
->perf_dom
->opp
[st
->desc_index
+ st
->loop_idx
];
378 if (PROTOCOL_REV_MAJOR(p
->version
) <= 0x3)
379 process_response_opp(opp
, st
->loop_idx
, response
);
381 process_response_opp_v4(p
->perf_dom
, opp
, st
->loop_idx
,
383 p
->perf_dom
->opp_count
++;
385 dev_dbg(ph
->dev
, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
386 opp
->perf
, opp
->power
, opp
->trans_latency_us
,
387 opp
->indicative_freq
, opp
->level_index
);
393 scmi_perf_describe_levels_get(const struct scmi_protocol_handle
*ph
,
394 struct perf_dom_info
*perf_dom
, u32 version
)
398 struct scmi_iterator_ops ops
= {
399 .prepare_message
= iter_perf_levels_prepare_message
,
400 .update_state
= iter_perf_levels_update_state
,
401 .process_response
= iter_perf_levels_process_response
,
403 struct scmi_perf_ipriv ppriv
= {
405 .perf_dom
= perf_dom
,
408 iter
= ph
->hops
->iter_response_init(ph
, &ops
, MAX_OPPS
,
409 PERF_DESCRIBE_LEVELS
,
410 sizeof(struct scmi_msg_perf_describe_levels
),
413 return PTR_ERR(iter
);
415 ret
= ph
->hops
->iter_response_run(iter
);
419 if (perf_dom
->opp_count
)
420 sort(perf_dom
->opp
, perf_dom
->opp_count
,
421 sizeof(struct scmi_opp
), opp_cmp_func
, NULL
);
426 static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle
*ph
,
427 u32 domain
, u32 max_perf
, u32 min_perf
)
431 struct scmi_perf_set_limits
*limits
;
433 ret
= ph
->xops
->xfer_get_init(ph
, PERF_LIMITS_SET
,
434 sizeof(*limits
), 0, &t
);
439 limits
->domain
= cpu_to_le32(domain
);
440 limits
->max_level
= cpu_to_le32(max_perf
);
441 limits
->min_level
= cpu_to_le32(min_perf
);
443 ret
= ph
->xops
->do_xfer(ph
, t
);
445 ph
->xops
->xfer_put(ph
, t
);
449 static inline struct perf_dom_info
*
450 scmi_perf_domain_lookup(const struct scmi_protocol_handle
*ph
, u32 domain
)
452 struct scmi_perf_info
*pi
= ph
->get_priv(ph
);
454 if (domain
>= pi
->num_domains
)
455 return ERR_PTR(-EINVAL
);
457 return pi
->dom_info
+ domain
;
460 static int __scmi_perf_limits_set(const struct scmi_protocol_handle
*ph
,
461 struct perf_dom_info
*dom
, u32 max_perf
,
464 if (dom
->fc_info
&& dom
->fc_info
[PERF_FC_LIMIT
].set_addr
) {
465 struct scmi_fc_info
*fci
= &dom
->fc_info
[PERF_FC_LIMIT
];
467 trace_scmi_fc_call(SCMI_PROTOCOL_PERF
, PERF_LIMITS_SET
,
468 dom
->id
, min_perf
, max_perf
);
469 iowrite32(max_perf
, fci
->set_addr
);
470 iowrite32(min_perf
, fci
->set_addr
+ 4);
471 ph
->hops
->fastchannel_db_ring(fci
->set_db
);
475 return scmi_perf_msg_limits_set(ph
, dom
->id
, max_perf
, min_perf
);
478 static int scmi_perf_limits_set(const struct scmi_protocol_handle
*ph
,
479 u32 domain
, u32 max_perf
, u32 min_perf
)
481 struct scmi_perf_info
*pi
= ph
->get_priv(ph
);
482 struct perf_dom_info
*dom
;
484 dom
= scmi_perf_domain_lookup(ph
, domain
);
488 if (PROTOCOL_REV_MAJOR(pi
->version
) >= 0x3 && !max_perf
&& !min_perf
)
491 if (dom
->level_indexing_mode
) {
492 struct scmi_opp
*opp
;
495 opp
= xa_load(&dom
->opps_by_lvl
, min_perf
);
499 min_perf
= opp
->level_index
;
503 opp
= xa_load(&dom
->opps_by_lvl
, max_perf
);
507 max_perf
= opp
->level_index
;
511 return __scmi_perf_limits_set(ph
, dom
, max_perf
, min_perf
);
514 static int scmi_perf_msg_limits_get(const struct scmi_protocol_handle
*ph
,
515 u32 domain
, u32
*max_perf
, u32
*min_perf
)
519 struct scmi_perf_get_limits
*limits
;
521 ret
= ph
->xops
->xfer_get_init(ph
, PERF_LIMITS_GET
,
522 sizeof(__le32
), 0, &t
);
526 put_unaligned_le32(domain
, t
->tx
.buf
);
528 ret
= ph
->xops
->do_xfer(ph
, t
);
532 *max_perf
= le32_to_cpu(limits
->max_level
);
533 *min_perf
= le32_to_cpu(limits
->min_level
);
536 ph
->xops
->xfer_put(ph
, t
);
540 static int __scmi_perf_limits_get(const struct scmi_protocol_handle
*ph
,
541 struct perf_dom_info
*dom
, u32
*max_perf
,
544 if (dom
->fc_info
&& dom
->fc_info
[PERF_FC_LIMIT
].get_addr
) {
545 struct scmi_fc_info
*fci
= &dom
->fc_info
[PERF_FC_LIMIT
];
547 *max_perf
= ioread32(fci
->get_addr
);
548 *min_perf
= ioread32(fci
->get_addr
+ 4);
549 trace_scmi_fc_call(SCMI_PROTOCOL_PERF
, PERF_LIMITS_GET
,
550 dom
->id
, *min_perf
, *max_perf
);
554 return scmi_perf_msg_limits_get(ph
, dom
->id
, max_perf
, min_perf
);
557 static int scmi_perf_limits_get(const struct scmi_protocol_handle
*ph
,
558 u32 domain
, u32
*max_perf
, u32
*min_perf
)
561 struct perf_dom_info
*dom
;
563 dom
= scmi_perf_domain_lookup(ph
, domain
);
567 ret
= __scmi_perf_limits_get(ph
, dom
, max_perf
, min_perf
);
571 if (dom
->level_indexing_mode
) {
572 struct scmi_opp
*opp
;
574 opp
= xa_load(&dom
->opps_by_idx
, *min_perf
);
578 *min_perf
= opp
->perf
;
580 opp
= xa_load(&dom
->opps_by_idx
, *max_perf
);
584 *max_perf
= opp
->perf
;
590 static int scmi_perf_msg_level_set(const struct scmi_protocol_handle
*ph
,
591 u32 domain
, u32 level
, bool poll
)
595 struct scmi_perf_set_level
*lvl
;
597 ret
= ph
->xops
->xfer_get_init(ph
, PERF_LEVEL_SET
, sizeof(*lvl
), 0, &t
);
601 t
->hdr
.poll_completion
= poll
;
603 lvl
->domain
= cpu_to_le32(domain
);
604 lvl
->level
= cpu_to_le32(level
);
606 ret
= ph
->xops
->do_xfer(ph
, t
);
608 ph
->xops
->xfer_put(ph
, t
);
612 static int __scmi_perf_level_set(const struct scmi_protocol_handle
*ph
,
613 struct perf_dom_info
*dom
, u32 level
,
616 if (dom
->fc_info
&& dom
->fc_info
[PERF_FC_LEVEL
].set_addr
) {
617 struct scmi_fc_info
*fci
= &dom
->fc_info
[PERF_FC_LEVEL
];
619 trace_scmi_fc_call(SCMI_PROTOCOL_PERF
, PERF_LEVEL_SET
,
621 iowrite32(level
, fci
->set_addr
);
622 ph
->hops
->fastchannel_db_ring(fci
->set_db
);
626 return scmi_perf_msg_level_set(ph
, dom
->id
, level
, poll
);
629 static int scmi_perf_level_set(const struct scmi_protocol_handle
*ph
,
630 u32 domain
, u32 level
, bool poll
)
632 struct perf_dom_info
*dom
;
634 dom
= scmi_perf_domain_lookup(ph
, domain
);
638 if (dom
->level_indexing_mode
) {
639 struct scmi_opp
*opp
;
641 opp
= xa_load(&dom
->opps_by_lvl
, level
);
645 level
= opp
->level_index
;
648 return __scmi_perf_level_set(ph
, dom
, level
, poll
);
651 static int scmi_perf_msg_level_get(const struct scmi_protocol_handle
*ph
,
652 u32 domain
, u32
*level
, bool poll
)
657 ret
= ph
->xops
->xfer_get_init(ph
, PERF_LEVEL_GET
,
658 sizeof(u32
), sizeof(u32
), &t
);
662 t
->hdr
.poll_completion
= poll
;
663 put_unaligned_le32(domain
, t
->tx
.buf
);
665 ret
= ph
->xops
->do_xfer(ph
, t
);
667 *level
= get_unaligned_le32(t
->rx
.buf
);
669 ph
->xops
->xfer_put(ph
, t
);
673 static int __scmi_perf_level_get(const struct scmi_protocol_handle
*ph
,
674 struct perf_dom_info
*dom
, u32
*level
,
677 if (dom
->fc_info
&& dom
->fc_info
[PERF_FC_LEVEL
].get_addr
) {
678 *level
= ioread32(dom
->fc_info
[PERF_FC_LEVEL
].get_addr
);
679 trace_scmi_fc_call(SCMI_PROTOCOL_PERF
, PERF_LEVEL_GET
,
684 return scmi_perf_msg_level_get(ph
, dom
->id
, level
, poll
);
687 static int scmi_perf_level_get(const struct scmi_protocol_handle
*ph
,
688 u32 domain
, u32
*level
, bool poll
)
691 struct perf_dom_info
*dom
;
693 dom
= scmi_perf_domain_lookup(ph
, domain
);
697 ret
= __scmi_perf_level_get(ph
, dom
, level
, poll
);
701 if (dom
->level_indexing_mode
) {
702 struct scmi_opp
*opp
;
704 opp
= xa_load(&dom
->opps_by_idx
, *level
);
714 static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle
*ph
,
715 u32 domain
, int message_id
,
720 struct scmi_perf_notify_level_or_limits
*notify
;
722 ret
= ph
->xops
->xfer_get_init(ph
, message_id
, sizeof(*notify
), 0, &t
);
727 notify
->domain
= cpu_to_le32(domain
);
728 notify
->notify_enable
= enable
? cpu_to_le32(BIT(0)) : 0;
730 ret
= ph
->xops
->do_xfer(ph
, t
);
732 ph
->xops
->xfer_put(ph
, t
);
736 static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle
*ph
,
737 u32 domain
, struct scmi_fc_info
**p_fc
)
739 struct scmi_fc_info
*fc
;
741 fc
= devm_kcalloc(ph
->dev
, PERF_FC_MAX
, sizeof(*fc
), GFP_KERNEL
);
745 ph
->hops
->fastchannel_init(ph
, PERF_DESCRIBE_FASTCHANNEL
,
746 PERF_LEVEL_SET
, 4, domain
,
747 &fc
[PERF_FC_LEVEL
].set_addr
,
748 &fc
[PERF_FC_LEVEL
].set_db
);
750 ph
->hops
->fastchannel_init(ph
, PERF_DESCRIBE_FASTCHANNEL
,
751 PERF_LEVEL_GET
, 4, domain
,
752 &fc
[PERF_FC_LEVEL
].get_addr
, NULL
);
754 ph
->hops
->fastchannel_init(ph
, PERF_DESCRIBE_FASTCHANNEL
,
755 PERF_LIMITS_SET
, 8, domain
,
756 &fc
[PERF_FC_LIMIT
].set_addr
,
757 &fc
[PERF_FC_LIMIT
].set_db
);
759 ph
->hops
->fastchannel_init(ph
, PERF_DESCRIBE_FASTCHANNEL
,
760 PERF_LIMITS_GET
, 8, domain
,
761 &fc
[PERF_FC_LIMIT
].get_addr
, NULL
);
766 /* Device specific ops */
767 static int scmi_dev_domain_id(struct device
*dev
)
769 struct of_phandle_args clkspec
;
771 if (of_parse_phandle_with_args(dev
->of_node
, "clocks", "#clock-cells",
775 return clkspec
.args
[0];
778 static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle
*ph
,
781 int idx
, ret
, domain
;
783 struct scmi_opp
*opp
;
784 struct perf_dom_info
*dom
;
786 domain
= scmi_dev_domain_id(dev
);
790 dom
= scmi_perf_domain_lookup(ph
, domain
);
794 for (opp
= dom
->opp
, idx
= 0; idx
< dom
->opp_count
; idx
++, opp
++) {
795 if (!dom
->level_indexing_mode
)
796 freq
= opp
->perf
* dom
->mult_factor
;
798 freq
= opp
->indicative_freq
* 1000;
800 ret
= dev_pm_opp_add(dev
, freq
, 0);
802 dev_warn(dev
, "failed to add opp %luHz\n", freq
);
805 if (!dom
->level_indexing_mode
)
806 freq
= (--opp
)->perf
* dom
->mult_factor
;
808 freq
= (--opp
)->indicative_freq
* 1000;
809 dev_pm_opp_remove(dev
, freq
);
814 dev_dbg(dev
, "[%d][%s]:: Registered OPP[%d] %lu\n",
815 domain
, dom
->name
, idx
, freq
);
821 scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle
*ph
,
825 struct perf_dom_info
*dom
;
827 domain
= scmi_dev_domain_id(dev
);
831 dom
= scmi_perf_domain_lookup(ph
, domain
);
836 return dom
->opp
[dom
->opp_count
- 1].trans_latency_us
* 1000;
839 static int scmi_dvfs_freq_set(const struct scmi_protocol_handle
*ph
, u32 domain
,
840 unsigned long freq
, bool poll
)
843 struct perf_dom_info
*dom
;
845 dom
= scmi_perf_domain_lookup(ph
, domain
);
849 if (!dom
->level_indexing_mode
) {
850 level
= freq
/ dom
->mult_factor
;
852 struct scmi_opp
*opp
;
854 opp
= LOOKUP_BY_FREQ(dom
->opps_by_freq
, freq
/ 1000);
858 level
= opp
->level_index
;
861 return __scmi_perf_level_set(ph
, dom
, level
, poll
);
864 static int scmi_dvfs_freq_get(const struct scmi_protocol_handle
*ph
, u32 domain
,
865 unsigned long *freq
, bool poll
)
869 struct perf_dom_info
*dom
;
871 dom
= scmi_perf_domain_lookup(ph
, domain
);
875 ret
= __scmi_perf_level_get(ph
, dom
, &level
, poll
);
879 if (!dom
->level_indexing_mode
) {
880 *freq
= level
* dom
->mult_factor
;
882 struct scmi_opp
*opp
;
884 opp
= xa_load(&dom
->opps_by_idx
, level
);
888 *freq
= opp
->indicative_freq
* 1000;
894 static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle
*ph
,
895 u32 domain
, unsigned long *freq
,
896 unsigned long *power
)
898 struct perf_dom_info
*dom
;
899 unsigned long opp_freq
;
900 int idx
, ret
= -EINVAL
;
901 struct scmi_opp
*opp
;
903 dom
= scmi_perf_domain_lookup(ph
, domain
);
907 for (opp
= dom
->opp
, idx
= 0; idx
< dom
->opp_count
; idx
++, opp
++) {
908 if (!dom
->level_indexing_mode
)
909 opp_freq
= opp
->perf
* dom
->mult_factor
;
911 opp_freq
= opp
->indicative_freq
* 1000;
913 if (opp_freq
< *freq
)
925 static bool scmi_fast_switch_possible(const struct scmi_protocol_handle
*ph
,
929 struct perf_dom_info
*dom
;
931 domain
= scmi_dev_domain_id(dev
);
935 dom
= scmi_perf_domain_lookup(ph
, domain
);
939 return dom
->fc_info
&& dom
->fc_info
[PERF_FC_LEVEL
].set_addr
;
942 static enum scmi_power_scale
943 scmi_power_scale_get(const struct scmi_protocol_handle
*ph
)
945 struct scmi_perf_info
*pi
= ph
->get_priv(ph
);
947 return pi
->power_scale
;
950 static const struct scmi_perf_proto_ops perf_proto_ops
= {
951 .limits_set
= scmi_perf_limits_set
,
952 .limits_get
= scmi_perf_limits_get
,
953 .level_set
= scmi_perf_level_set
,
954 .level_get
= scmi_perf_level_get
,
955 .device_domain_id
= scmi_dev_domain_id
,
956 .transition_latency_get
= scmi_dvfs_transition_latency_get
,
957 .device_opps_add
= scmi_dvfs_device_opps_add
,
958 .freq_set
= scmi_dvfs_freq_set
,
959 .freq_get
= scmi_dvfs_freq_get
,
960 .est_power_get
= scmi_dvfs_est_power_get
,
961 .fast_switch_possible
= scmi_fast_switch_possible
,
962 .power_scale_get
= scmi_power_scale_get
,
965 static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle
*ph
,
966 u8 evt_id
, u32 src_id
, bool enable
)
970 if (evt_id
>= ARRAY_SIZE(evt_2_cmd
))
973 cmd_id
= evt_2_cmd
[evt_id
];
974 ret
= scmi_perf_level_limits_notify(ph
, src_id
, cmd_id
, enable
);
976 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
977 evt_id
, src_id
, ret
);
982 static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle
*ph
,
983 u8 evt_id
, ktime_t timestamp
,
984 const void *payld
, size_t payld_sz
,
985 void *report
, u32
*src_id
)
990 case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED
:
992 const struct scmi_perf_limits_notify_payld
*p
= payld
;
993 struct scmi_perf_limits_report
*r
= report
;
995 if (sizeof(*p
) != payld_sz
)
998 r
->timestamp
= timestamp
;
999 r
->agent_id
= le32_to_cpu(p
->agent_id
);
1000 r
->domain_id
= le32_to_cpu(p
->domain_id
);
1001 r
->range_max
= le32_to_cpu(p
->range_max
);
1002 r
->range_min
= le32_to_cpu(p
->range_min
);
1003 *src_id
= r
->domain_id
;
1007 case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED
:
1009 const struct scmi_perf_level_notify_payld
*p
= payld
;
1010 struct scmi_perf_level_report
*r
= report
;
1012 if (sizeof(*p
) != payld_sz
)
1015 r
->timestamp
= timestamp
;
1016 r
->agent_id
= le32_to_cpu(p
->agent_id
);
1017 r
->domain_id
= le32_to_cpu(p
->domain_id
);
1018 r
->performance_level
= le32_to_cpu(p
->performance_level
);
1019 *src_id
= r
->domain_id
;
1030 static int scmi_perf_get_num_sources(const struct scmi_protocol_handle
*ph
)
1032 struct scmi_perf_info
*pi
= ph
->get_priv(ph
);
1037 return pi
->num_domains
;
1040 static const struct scmi_event perf_events
[] = {
1042 .id
= SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED
,
1043 .max_payld_sz
= sizeof(struct scmi_perf_limits_notify_payld
),
1044 .max_report_sz
= sizeof(struct scmi_perf_limits_report
),
1047 .id
= SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED
,
1048 .max_payld_sz
= sizeof(struct scmi_perf_level_notify_payld
),
1049 .max_report_sz
= sizeof(struct scmi_perf_level_report
),
1053 static const struct scmi_event_ops perf_event_ops
= {
1054 .get_num_sources
= scmi_perf_get_num_sources
,
1055 .set_notify_enabled
= scmi_perf_set_notify_enabled
,
1056 .fill_custom_report
= scmi_perf_fill_custom_report
,
1059 static const struct scmi_protocol_events perf_protocol_events
= {
1060 .queue_sz
= SCMI_PROTO_QUEUE_SZ
,
1061 .ops
= &perf_event_ops
,
1062 .evts
= perf_events
,
1063 .num_events
= ARRAY_SIZE(perf_events
),
1066 static int scmi_perf_protocol_init(const struct scmi_protocol_handle
*ph
)
1070 struct scmi_perf_info
*pinfo
;
1072 ret
= ph
->xops
->version_get(ph
, &version
);
1076 dev_dbg(ph
->dev
, "Performance Version %d.%d\n",
1077 PROTOCOL_REV_MAJOR(version
), PROTOCOL_REV_MINOR(version
));
1079 pinfo
= devm_kzalloc(ph
->dev
, sizeof(*pinfo
), GFP_KERNEL
);
1083 ret
= scmi_perf_attributes_get(ph
, pinfo
);
1087 pinfo
->dom_info
= devm_kcalloc(ph
->dev
, pinfo
->num_domains
,
1088 sizeof(*pinfo
->dom_info
), GFP_KERNEL
);
1089 if (!pinfo
->dom_info
)
1092 for (domain
= 0; domain
< pinfo
->num_domains
; domain
++) {
1093 struct perf_dom_info
*dom
= pinfo
->dom_info
+ domain
;
1096 scmi_perf_domain_attributes_get(ph
, dom
, version
);
1097 scmi_perf_describe_levels_get(ph
, dom
, version
);
1099 if (dom
->perf_fastchannels
)
1100 scmi_perf_domain_init_fc(ph
, dom
->id
, &dom
->fc_info
);
1103 ret
= devm_add_action_or_reset(ph
->dev
, scmi_perf_xa_destroy
, pinfo
);
1107 pinfo
->version
= version
;
1109 return ph
->set_priv(ph
, pinfo
);
1112 static const struct scmi_protocol scmi_perf
= {
1113 .id
= SCMI_PROTOCOL_PERF
,
1114 .owner
= THIS_MODULE
,
1115 .instance_init
= &scmi_perf_protocol_init
,
1116 .ops
= &perf_proto_ops
,
1117 .events
= &perf_protocol_events
,
1120 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf
, scmi_perf
)