]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/powercap/dtpm_devfreq.c
KVM: clean up directives to compile out irqfds
[thirdparty/kernel/stable.git] / drivers / powercap / dtpm_devfreq.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2021 Linaro Limited
4 *
5 * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
6 *
7 * The devfreq device combined with the energy model and the load can
8 * give an estimation of the power consumption as well as limiting the
9 * power.
10 *
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/cpumask.h>
15 #include <linux/devfreq.h>
16 #include <linux/dtpm.h>
17 #include <linux/energy_model.h>
18 #include <linux/of.h>
19 #include <linux/pm_qos.h>
20 #include <linux/slab.h>
21 #include <linux/units.h>
22
23 struct dtpm_devfreq {
24 struct dtpm dtpm;
25 struct dev_pm_qos_request qos_req;
26 struct devfreq *devfreq;
27 };
28
29 static struct dtpm_devfreq *to_dtpm_devfreq(struct dtpm *dtpm)
30 {
31 return container_of(dtpm, struct dtpm_devfreq, dtpm);
32 }
33
34 static int update_pd_power_uw(struct dtpm *dtpm)
35 {
36 struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
37 struct devfreq *devfreq = dtpm_devfreq->devfreq;
38 struct device *dev = devfreq->dev.parent;
39 struct em_perf_domain *pd = em_pd_get(dev);
40
41 dtpm->power_min = pd->table[0].power;
42 dtpm->power_min *= MICROWATT_PER_MILLIWATT;
43
44 dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
45 dtpm->power_max *= MICROWATT_PER_MILLIWATT;
46
47 return 0;
48 }
49
50 static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
51 {
52 struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
53 struct devfreq *devfreq = dtpm_devfreq->devfreq;
54 struct device *dev = devfreq->dev.parent;
55 struct em_perf_domain *pd = em_pd_get(dev);
56 unsigned long freq;
57 u64 power;
58 int i;
59
60 for (i = 0; i < pd->nr_perf_states; i++) {
61
62 power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
63 if (power > power_limit)
64 break;
65 }
66
67 freq = pd->table[i - 1].frequency;
68
69 dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
70
71 power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
72
73 return power_limit;
74 }
75
76 static void _normalize_load(struct devfreq_dev_status *status)
77 {
78 if (status->total_time > 0xfffff) {
79 status->total_time >>= 10;
80 status->busy_time >>= 10;
81 }
82
83 status->busy_time <<= 10;
84 status->busy_time /= status->total_time ? : 1;
85
86 status->busy_time = status->busy_time ? : 1;
87 status->total_time = 1024;
88 }
89
90 static u64 get_pd_power_uw(struct dtpm *dtpm)
91 {
92 struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
93 struct devfreq *devfreq = dtpm_devfreq->devfreq;
94 struct device *dev = devfreq->dev.parent;
95 struct em_perf_domain *pd = em_pd_get(dev);
96 struct devfreq_dev_status status;
97 unsigned long freq;
98 u64 power;
99 int i;
100
101 mutex_lock(&devfreq->lock);
102 status = devfreq->last_status;
103 mutex_unlock(&devfreq->lock);
104
105 freq = DIV_ROUND_UP(status.current_frequency, HZ_PER_KHZ);
106 _normalize_load(&status);
107
108 for (i = 0; i < pd->nr_perf_states; i++) {
109
110 if (pd->table[i].frequency < freq)
111 continue;
112
113 power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
114 power *= status.busy_time;
115 power >>= 10;
116
117 return power;
118 }
119
120 return 0;
121 }
122
123 static void pd_release(struct dtpm *dtpm)
124 {
125 struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
126
127 if (dev_pm_qos_request_active(&dtpm_devfreq->qos_req))
128 dev_pm_qos_remove_request(&dtpm_devfreq->qos_req);
129
130 kfree(dtpm_devfreq);
131 }
132
133 static struct dtpm_ops dtpm_ops = {
134 .set_power_uw = set_pd_power_limit,
135 .get_power_uw = get_pd_power_uw,
136 .update_power_uw = update_pd_power_uw,
137 .release = pd_release,
138 };
139
140 static int __dtpm_devfreq_setup(struct devfreq *devfreq, struct dtpm *parent)
141 {
142 struct device *dev = devfreq->dev.parent;
143 struct dtpm_devfreq *dtpm_devfreq;
144 struct em_perf_domain *pd;
145 int ret = -ENOMEM;
146
147 pd = em_pd_get(dev);
148 if (!pd) {
149 ret = dev_pm_opp_of_register_em(dev, NULL);
150 if (ret) {
151 pr_err("No energy model available for '%s'\n", dev_name(dev));
152 return -EINVAL;
153 }
154 }
155
156 dtpm_devfreq = kzalloc(sizeof(*dtpm_devfreq), GFP_KERNEL);
157 if (!dtpm_devfreq)
158 return -ENOMEM;
159
160 dtpm_init(&dtpm_devfreq->dtpm, &dtpm_ops);
161
162 dtpm_devfreq->devfreq = devfreq;
163
164 ret = dtpm_register(dev_name(dev), &dtpm_devfreq->dtpm, parent);
165 if (ret) {
166 pr_err("Failed to register '%s': %d\n", dev_name(dev), ret);
167 kfree(dtpm_devfreq);
168 return ret;
169 }
170
171 ret = dev_pm_qos_add_request(dev, &dtpm_devfreq->qos_req,
172 DEV_PM_QOS_MAX_FREQUENCY,
173 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
174 if (ret) {
175 pr_err("Failed to add QoS request: %d\n", ret);
176 goto out_dtpm_unregister;
177 }
178
179 dtpm_update_power(&dtpm_devfreq->dtpm);
180
181 return 0;
182
183 out_dtpm_unregister:
184 dtpm_unregister(&dtpm_devfreq->dtpm);
185
186 return ret;
187 }
188
189 static int dtpm_devfreq_setup(struct dtpm *dtpm, struct device_node *np)
190 {
191 struct devfreq *devfreq;
192
193 devfreq = devfreq_get_devfreq_by_node(np);
194 if (IS_ERR(devfreq))
195 return 0;
196
197 return __dtpm_devfreq_setup(devfreq, dtpm);
198 }
199
200 struct dtpm_subsys_ops dtpm_devfreq_ops = {
201 .name = KBUILD_MODNAME,
202 .setup = dtpm_devfreq_setup,
203 };