]>
Commit | Line | Data |
---|---|---|
e34cbd30 JA |
1 | #ifndef WB_THROTTLE_H |
2 | #define WB_THROTTLE_H | |
3 | ||
4 | #include <linux/kernel.h> | |
5 | #include <linux/atomic.h> | |
6 | #include <linux/wait.h> | |
7 | #include <linux/timer.h> | |
8 | #include <linux/ktime.h> | |
9 | ||
10 | #include "blk-stat.h" | |
11 | ||
12 | enum wbt_flags { | |
13 | WBT_TRACKED = 1, /* write, tracked for throttling */ | |
14 | WBT_READ = 2, /* read */ | |
15 | WBT_KSWAPD = 4, /* write, from kswapd */ | |
16 | ||
17 | WBT_NR_BITS = 3, /* number of bits */ | |
18 | }; | |
19 | ||
20 | enum { | |
21 | WBT_NUM_RWQ = 2, | |
22 | }; | |
23 | ||
24 | static inline void wbt_clear_state(struct blk_issue_stat *stat) | |
25 | { | |
26 | stat->time &= BLK_STAT_TIME_MASK; | |
27 | } | |
28 | ||
29 | static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat) | |
30 | { | |
31 | return (stat->time & BLK_STAT_MASK) >> BLK_STAT_SHIFT; | |
32 | } | |
33 | ||
34 | static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct) | |
35 | { | |
36 | stat->time |= ((u64) wb_acct) << BLK_STAT_SHIFT; | |
37 | } | |
38 | ||
39 | static inline bool wbt_is_tracked(struct blk_issue_stat *stat) | |
40 | { | |
41 | return (stat->time >> BLK_STAT_SHIFT) & WBT_TRACKED; | |
42 | } | |
43 | ||
44 | static inline bool wbt_is_read(struct blk_issue_stat *stat) | |
45 | { | |
46 | return (stat->time >> BLK_STAT_SHIFT) & WBT_READ; | |
47 | } | |
48 | ||
49 | struct wb_stat_ops { | |
50 | void (*get)(void *, struct blk_rq_stat *); | |
51 | bool (*is_current)(struct blk_rq_stat *); | |
52 | void (*clear)(void *); | |
53 | }; | |
54 | ||
55 | struct rq_wait { | |
56 | wait_queue_head_t wait; | |
57 | atomic_t inflight; | |
58 | }; | |
59 | ||
60 | struct rq_wb { | |
61 | /* | |
62 | * Settings that govern how we throttle | |
63 | */ | |
64 | unsigned int wb_background; /* background writeback */ | |
65 | unsigned int wb_normal; /* normal writeback */ | |
66 | unsigned int wb_max; /* max throughput writeback */ | |
67 | int scale_step; | |
68 | bool scaled_max; | |
69 | ||
70 | /* | |
71 | * Number of consecutive periods where we don't have enough | |
72 | * information to make a firm scale up/down decision. | |
73 | */ | |
74 | unsigned int unknown_cnt; | |
75 | ||
76 | u64 win_nsec; /* default window size */ | |
77 | u64 cur_win_nsec; /* current window size */ | |
78 | ||
79 | struct timer_list window_timer; | |
80 | ||
81 | s64 sync_issue; | |
82 | void *sync_cookie; | |
83 | ||
84 | unsigned int wc; | |
85 | unsigned int queue_depth; | |
86 | ||
87 | unsigned long last_issue; /* last non-throttled issue */ | |
88 | unsigned long last_comp; /* last non-throttled comp */ | |
89 | unsigned long min_lat_nsec; | |
90 | struct backing_dev_info *bdi; | |
91 | struct rq_wait rq_wait[WBT_NUM_RWQ]; | |
92 | ||
93 | struct wb_stat_ops *stat_ops; | |
94 | void *ops_data; | |
95 | }; | |
96 | ||
97 | static inline unsigned int wbt_inflight(struct rq_wb *rwb) | |
98 | { | |
99 | unsigned int i, ret = 0; | |
100 | ||
101 | for (i = 0; i < WBT_NUM_RWQ; i++) | |
102 | ret += atomic_read(&rwb->rq_wait[i].inflight); | |
103 | ||
104 | return ret; | |
105 | } | |
106 | ||
107 | struct backing_dev_info; | |
108 | ||
109 | #ifdef CONFIG_BLK_WBT | |
110 | ||
111 | void __wbt_done(struct rq_wb *, enum wbt_flags); | |
112 | void wbt_done(struct rq_wb *, struct blk_issue_stat *); | |
113 | enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *); | |
114 | int wbt_init(struct request_queue *, struct wb_stat_ops *); | |
115 | void wbt_exit(struct request_queue *); | |
116 | void wbt_update_limits(struct rq_wb *); | |
117 | void wbt_requeue(struct rq_wb *, struct blk_issue_stat *); | |
118 | void wbt_issue(struct rq_wb *, struct blk_issue_stat *); | |
119 | void wbt_disable(struct rq_wb *); | |
120 | ||
121 | void wbt_set_queue_depth(struct rq_wb *, unsigned int); | |
122 | void wbt_set_write_cache(struct rq_wb *, bool); | |
123 | ||
124 | #else | |
125 | ||
126 | static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags) | |
127 | { | |
128 | } | |
129 | static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat) | |
130 | { | |
131 | } | |
132 | static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, | |
133 | spinlock_t *lock) | |
134 | { | |
135 | return 0; | |
136 | } | |
137 | static inline int wbt_init(struct request_queue *q, struct wb_stat_ops *ops) | |
138 | { | |
139 | return -EINVAL; | |
140 | } | |
141 | static inline void wbt_exit(struct request_queue *q) | |
142 | { | |
143 | } | |
144 | static inline void wbt_update_limits(struct rq_wb *rwb) | |
145 | { | |
146 | } | |
147 | static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat) | |
148 | { | |
149 | } | |
150 | static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat) | |
151 | { | |
152 | } | |
153 | static inline void wbt_disable(struct rq_wb *rwb) | |
154 | { | |
155 | } | |
156 | static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth) | |
157 | { | |
158 | } | |
159 | static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc) | |
160 | { | |
161 | } | |
162 | ||
163 | #endif /* CONFIG_BLK_WBT */ | |
164 | ||
165 | #endif |