]> git.ipfire.org Git - people/arne_f/kernel.git/blame - block/blk-wbt.h
nvme-fc: cancel async events before freeing event struct
[people/arne_f/kernel.git] / block / blk-wbt.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
e34cbd30
JA
2#ifndef WB_THROTTLE_H
3#define WB_THROTTLE_H
4
5#include <linux/kernel.h>
6#include <linux/atomic.h>
7#include <linux/wait.h>
8#include <linux/timer.h>
9#include <linux/ktime.h>
10
11#include "blk-stat.h"
12
13enum wbt_flags {
14 WBT_TRACKED = 1, /* write, tracked for throttling */
15 WBT_READ = 2, /* read */
16 WBT_KSWAPD = 4, /* write, from kswapd */
17
18 WBT_NR_BITS = 3, /* number of bits */
19};
20
21enum {
22 WBT_NUM_RWQ = 2,
23};
24
d62118b6
JA
25/*
26 * Enable states. Either off, or on by default (done at init time),
27 * or on through manual setup in sysfs.
28 */
29enum {
30 WBT_STATE_ON_DEFAULT = 1,
31 WBT_STATE_ON_MANUAL = 2,
32};
33
e34cbd30
JA
34static inline void wbt_clear_state(struct blk_issue_stat *stat)
35{
88eeca49 36 stat->stat &= ~BLK_STAT_RES_MASK;
e34cbd30
JA
37}
38
39static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat)
40{
88eeca49 41 return (stat->stat & BLK_STAT_RES_MASK) >> BLK_STAT_RES_SHIFT;
e34cbd30
JA
42}
43
44static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct)
45{
88eeca49 46 stat->stat |= ((u64) wb_acct) << BLK_STAT_RES_SHIFT;
e34cbd30
JA
47}
48
49static inline bool wbt_is_tracked(struct blk_issue_stat *stat)
50{
88eeca49 51 return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_TRACKED;
e34cbd30
JA
52}
53
54static inline bool wbt_is_read(struct blk_issue_stat *stat)
55{
88eeca49 56 return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_READ;
e34cbd30
JA
57}
58
e34cbd30
JA
59struct rq_wait {
60 wait_queue_head_t wait;
61 atomic_t inflight;
62};
63
64struct rq_wb {
65 /*
66 * Settings that govern how we throttle
67 */
68 unsigned int wb_background; /* background writeback */
69 unsigned int wb_normal; /* normal writeback */
70 unsigned int wb_max; /* max throughput writeback */
71 int scale_step;
72 bool scaled_max;
73
d62118b6
JA
74 short enable_state; /* WBT_STATE_* */
75
e34cbd30
JA
76 /*
77 * Number of consecutive periods where we don't have enough
78 * information to make a firm scale up/down decision.
79 */
80 unsigned int unknown_cnt;
81
82 u64 win_nsec; /* default window size */
83 u64 cur_win_nsec; /* current window size */
84
34dbad5d 85 struct blk_stat_callback *cb;
e34cbd30
JA
86
87 s64 sync_issue;
88 void *sync_cookie;
89
90 unsigned int wc;
91 unsigned int queue_depth;
92
93 unsigned long last_issue; /* last non-throttled issue */
94 unsigned long last_comp; /* last non-throttled comp */
95 unsigned long min_lat_nsec;
d8a0cbfd 96 struct request_queue *queue;
e34cbd30 97 struct rq_wait rq_wait[WBT_NUM_RWQ];
e34cbd30
JA
98};
99
100static inline unsigned int wbt_inflight(struct rq_wb *rwb)
101{
102 unsigned int i, ret = 0;
103
104 for (i = 0; i < WBT_NUM_RWQ; i++)
105 ret += atomic_read(&rwb->rq_wait[i].inflight);
106
107 return ret;
108}
109
e34cbd30
JA
110#ifdef CONFIG_BLK_WBT
111
112void __wbt_done(struct rq_wb *, enum wbt_flags);
113void wbt_done(struct rq_wb *, struct blk_issue_stat *);
114enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *);
8054b89f 115int wbt_init(struct request_queue *);
e34cbd30
JA
116void wbt_exit(struct request_queue *);
117void wbt_update_limits(struct rq_wb *);
118void wbt_requeue(struct rq_wb *, struct blk_issue_stat *);
119void wbt_issue(struct rq_wb *, struct blk_issue_stat *);
fa224eed 120void wbt_disable_default(struct request_queue *);
8330cdb0 121void wbt_enable_default(struct request_queue *);
e34cbd30
JA
122
123void wbt_set_queue_depth(struct rq_wb *, unsigned int);
124void wbt_set_write_cache(struct rq_wb *, bool);
125
80e091d1
JA
126u64 wbt_default_latency_nsec(struct request_queue *);
127
e34cbd30
JA
128#else
129
130static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags)
131{
132}
133static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
134{
135}
136static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio,
137 spinlock_t *lock)
138{
139 return 0;
140}
8054b89f 141static inline int wbt_init(struct request_queue *q)
e34cbd30
JA
142{
143 return -EINVAL;
144}
145static inline void wbt_exit(struct request_queue *q)
146{
147}
148static inline void wbt_update_limits(struct rq_wb *rwb)
149{
150}
151static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
152{
153}
154static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
155{
156}
fa224eed 157static inline void wbt_disable_default(struct request_queue *q)
e34cbd30
JA
158{
159}
8330cdb0
JK
160static inline void wbt_enable_default(struct request_queue *q)
161{
162}
e34cbd30
JA
163static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
164{
165}
166static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc)
167{
168}
80e091d1
JA
169static inline u64 wbt_default_latency_nsec(struct request_queue *q)
170{
171 return 0;
172}
e34cbd30
JA
173
174#endif /* CONFIG_BLK_WBT */
175
176#endif