]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - drivers/md/bcache/io.c
block: bio: pass bvec table to bio_init()
[thirdparty/kernel/linux.git] / drivers / md / bcache / io.c
1 /*
2 * Some low level IO code, and hacks for various block layer limitations
3 *
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
6 */
7
8 #include "bcache.h"
9 #include "bset.h"
10 #include "debug.h"
11
12 #include <linux/blkdev.h>
13
14 /* Bios with headers */
15
16 void bch_bbio_free(struct bio *bio, struct cache_set *c)
17 {
18 struct bbio *b = container_of(bio, struct bbio, bio);
19 mempool_free(b, c->bio_meta);
20 }
21
22 struct bio *bch_bbio_alloc(struct cache_set *c)
23 {
24 struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
25 struct bio *bio = &b->bio;
26
27 bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
28
29 return bio;
30 }
31
32 void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
33 {
34 struct bbio *b = container_of(bio, struct bbio, bio);
35
36 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
37 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
38
39 b->submit_time_us = local_clock_us();
40 closure_bio_submit(bio, bio->bi_private);
41 }
42
43 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
44 struct bkey *k, unsigned ptr)
45 {
46 struct bbio *b = container_of(bio, struct bbio, bio);
47 bch_bkey_copy_single_ptr(&b->key, k, ptr);
48 __bch_submit_bbio(bio, c);
49 }
50
51 /* IO errors */
52
53 void bch_count_io_errors(struct cache *ca, int error, const char *m)
54 {
55 /*
56 * The halflife of an error is:
57 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
58 */
59
60 if (ca->set->error_decay) {
61 unsigned count = atomic_inc_return(&ca->io_count);
62
63 while (count > ca->set->error_decay) {
64 unsigned errors;
65 unsigned old = count;
66 unsigned new = count - ca->set->error_decay;
67
68 /*
69 * First we subtract refresh from count; each time we
70 * succesfully do so, we rescale the errors once:
71 */
72
73 count = atomic_cmpxchg(&ca->io_count, old, new);
74
75 if (count == old) {
76 count = new;
77
78 errors = atomic_read(&ca->io_errors);
79 do {
80 old = errors;
81 new = ((uint64_t) errors * 127) / 128;
82 errors = atomic_cmpxchg(&ca->io_errors,
83 old, new);
84 } while (old != errors);
85 }
86 }
87 }
88
89 if (error) {
90 char buf[BDEVNAME_SIZE];
91 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
92 &ca->io_errors);
93 errors >>= IO_ERROR_SHIFT;
94
95 if (errors < ca->set->error_limit)
96 pr_err("%s: IO error on %s, recovering",
97 bdevname(ca->bdev, buf), m);
98 else
99 bch_cache_set_error(ca->set,
100 "%s: too many IO errors %s",
101 bdevname(ca->bdev, buf), m);
102 }
103 }
104
105 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
106 int error, const char *m)
107 {
108 struct bbio *b = container_of(bio, struct bbio, bio);
109 struct cache *ca = PTR_CACHE(c, &b->key, 0);
110
111 unsigned threshold = op_is_write(bio_op(bio))
112 ? c->congested_write_threshold_us
113 : c->congested_read_threshold_us;
114
115 if (threshold) {
116 unsigned t = local_clock_us();
117
118 int us = t - b->submit_time_us;
119 int congested = atomic_read(&c->congested);
120
121 if (us > (int) threshold) {
122 int ms = us / 1024;
123 c->congested_last_us = t;
124
125 ms = min(ms, CONGESTED_MAX + congested);
126 atomic_sub(ms, &c->congested);
127 } else if (congested < 0)
128 atomic_inc(&c->congested);
129 }
130
131 bch_count_io_errors(ca, error, m);
132 }
133
134 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
135 int error, const char *m)
136 {
137 struct closure *cl = bio->bi_private;
138
139 bch_bbio_count_io_errors(c, bio, error, m);
140 bio_put(bio);
141 closure_put(cl);
142 }