]>
git.ipfire.org Git - people/ms/linux.git/blob - lib/flex_proportions.c
1 // SPDX-License-Identifier: GPL-2.0
3 * Floating proportions with flexible aging period
5 * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
7 * The goal of this code is: Given different types of event, measure proportion
8 * of each type of event over time. The proportions are measured with
9 * exponentially decaying history to give smooth transitions. A formula
10 * expressing proportion of event of type 'j' is:
12 * p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
14 * Where x_{i,j} is j's number of events in i-th last time period and x_i is
15 * total number of events in i-th last time period.
17 * Note that p_{j}'s are normalised, i.e.
21 * This formula can be straightforwardly computed by maintaining denominator
22 * (let's call it 'd') and for each event type its numerator (let's call it
23 * 'n_j'). When an event of type 'j' happens, we simply need to do:
26 * When a new period is declared, we could do:
31 * To avoid iteration over all event types, we instead shift numerator of event
32 * j lazily when someone asks for a proportion of event j or when event j
33 * occurs. This can bit trivially implemented by remembering last period in
34 * which something happened with proportion of type j.
36 #include <linux/flex_proportions.h>
38 int fprop_global_init(struct fprop_global
*p
, gfp_t gfp
)
43 /* Use 1 to avoid dealing with periods with 0 events... */
44 err
= percpu_counter_init(&p
->events
, 1, gfp
);
47 seqcount_init(&p
->sequence
);
51 void fprop_global_destroy(struct fprop_global
*p
)
53 percpu_counter_destroy(&p
->events
);
57 * Declare @periods new periods. It is upto the caller to make sure period
58 * transitions cannot happen in parallel.
60 * The function returns true if the proportions are still defined and false
61 * if aging zeroed out all events. This can be used to detect whether declaring
62 * further periods has any effect.
64 bool fprop_new_period(struct fprop_global
*p
, int periods
)
66 s64 events
= percpu_counter_sum(&p
->events
);
69 * Don't do anything if there are no events.
73 write_seqcount_begin(&p
->sequence
);
75 events
-= events
>> periods
;
76 /* Use addition to avoid losing events happening between sum and set */
77 percpu_counter_add(&p
->events
, -events
);
79 write_seqcount_end(&p
->sequence
);
88 int fprop_local_init_single(struct fprop_local_single
*pl
)
92 raw_spin_lock_init(&pl
->lock
);
96 void fprop_local_destroy_single(struct fprop_local_single
*pl
)
100 static void fprop_reflect_period_single(struct fprop_global
*p
,
101 struct fprop_local_single
*pl
)
103 unsigned int period
= p
->period
;
106 /* Fast path - period didn't change */
107 if (pl
->period
== period
)
109 raw_spin_lock_irqsave(&pl
->lock
, flags
);
110 /* Someone updated pl->period while we were spinning? */
111 if (pl
->period
>= period
) {
112 raw_spin_unlock_irqrestore(&pl
->lock
, flags
);
115 /* Aging zeroed our fraction? */
116 if (period
- pl
->period
< BITS_PER_LONG
)
117 pl
->events
>>= period
- pl
->period
;
121 raw_spin_unlock_irqrestore(&pl
->lock
, flags
);
124 /* Event of type pl happened */
125 void __fprop_inc_single(struct fprop_global
*p
, struct fprop_local_single
*pl
)
127 fprop_reflect_period_single(p
, pl
);
129 percpu_counter_add(&p
->events
, 1);
132 /* Return fraction of events of type pl */
133 void fprop_fraction_single(struct fprop_global
*p
,
134 struct fprop_local_single
*pl
,
135 unsigned long *numerator
, unsigned long *denominator
)
141 seq
= read_seqcount_begin(&p
->sequence
);
142 fprop_reflect_period_single(p
, pl
);
144 den
= percpu_counter_read_positive(&p
->events
);
145 } while (read_seqcount_retry(&p
->sequence
, seq
));
148 * Make fraction <= 1 and denominator > 0 even in presence of percpu
164 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
166 int fprop_local_init_percpu(struct fprop_local_percpu
*pl
, gfp_t gfp
)
170 err
= percpu_counter_init(&pl
->events
, 0, gfp
);
174 raw_spin_lock_init(&pl
->lock
);
178 void fprop_local_destroy_percpu(struct fprop_local_percpu
*pl
)
180 percpu_counter_destroy(&pl
->events
);
183 static void fprop_reflect_period_percpu(struct fprop_global
*p
,
184 struct fprop_local_percpu
*pl
)
186 unsigned int period
= p
->period
;
189 /* Fast path - period didn't change */
190 if (pl
->period
== period
)
192 raw_spin_lock_irqsave(&pl
->lock
, flags
);
193 /* Someone updated pl->period while we were spinning? */
194 if (pl
->period
>= period
) {
195 raw_spin_unlock_irqrestore(&pl
->lock
, flags
);
198 /* Aging zeroed our fraction? */
199 if (period
- pl
->period
< BITS_PER_LONG
) {
200 s64 val
= percpu_counter_read(&pl
->events
);
202 if (val
< (nr_cpu_ids
* PROP_BATCH
))
203 val
= percpu_counter_sum(&pl
->events
);
205 percpu_counter_add_batch(&pl
->events
,
206 -val
+ (val
>> (period
-pl
->period
)), PROP_BATCH
);
208 percpu_counter_set(&pl
->events
, 0);
210 raw_spin_unlock_irqrestore(&pl
->lock
, flags
);
213 /* Event of type pl happened */
214 void __fprop_add_percpu(struct fprop_global
*p
, struct fprop_local_percpu
*pl
,
217 fprop_reflect_period_percpu(p
, pl
);
218 percpu_counter_add_batch(&pl
->events
, nr
, PROP_BATCH
);
219 percpu_counter_add(&p
->events
, nr
);
222 void fprop_fraction_percpu(struct fprop_global
*p
,
223 struct fprop_local_percpu
*pl
,
224 unsigned long *numerator
, unsigned long *denominator
)
230 seq
= read_seqcount_begin(&p
->sequence
);
231 fprop_reflect_period_percpu(p
, pl
);
232 num
= percpu_counter_read_positive(&pl
->events
);
233 den
= percpu_counter_read_positive(&p
->events
);
234 } while (read_seqcount_retry(&p
->sequence
, seq
));
237 * Make fraction <= 1 and denominator > 0 even in presence of percpu
251 * Like __fprop_add_percpu() except that event is counted only if the given
252 * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
254 void __fprop_add_percpu_max(struct fprop_global
*p
,
255 struct fprop_local_percpu
*pl
, int max_frac
, long nr
)
257 if (unlikely(max_frac
< FPROP_FRAC_BASE
)) {
258 unsigned long numerator
, denominator
;
261 fprop_fraction_percpu(p
, pl
, &numerator
, &denominator
);
262 /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
263 tmp
= (u64
)denominator
* max_frac
-
264 ((u64
)numerator
<< FPROP_FRAC_SHIFT
);
266 /* Maximum fraction already exceeded? */
268 } else if (tmp
< nr
* (FPROP_FRAC_BASE
- max_frac
)) {
269 /* Add just enough for the fraction to saturate */
270 nr
= div_u64(tmp
+ FPROP_FRAC_BASE
- max_frac
- 1,
271 FPROP_FRAC_BASE
- max_frac
);
275 __fprop_add_percpu(p
, pl
, nr
);