]>
Commit | Line | Data |
---|---|---|
7addb33a GKH |
1 | From 16961b042db8cc5cf75d782b4255193ad56e1d4f Mon Sep 17 00:00:00 2001 |
2 | From: Mike Snitzer <snitzer@redhat.com> | |
3 | Date: Tue, 17 Dec 2013 13:19:11 -0500 | |
4 | Subject: dm thin: initialize dm_thin_new_mapping returned by get_next_mapping | |
5 | ||
6 | From: Mike Snitzer <snitzer@redhat.com> | |
7 | ||
8 | commit 16961b042db8cc5cf75d782b4255193ad56e1d4f upstream. | |
9 | ||
10 | As additional members are added to the dm_thin_new_mapping structure | |
11 | care should be taken to make sure they get initialized before use. | |
12 | ||
13 | Signed-off-by: Mike Snitzer <snitzer@redhat.com> | |
14 | Acked-by: Joe Thornber <ejt@redhat.com> | |
15 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
16 | ||
17 | --- | |
18 | drivers/md/dm-thin.c | 17 ++++++----------- | |
19 | 1 file changed, 6 insertions(+), 11 deletions(-) | |
20 | ||
21 | --- a/drivers/md/dm-thin.c | |
22 | +++ b/drivers/md/dm-thin.c | |
23 | @@ -760,13 +760,17 @@ static int ensure_next_mapping(struct po | |
24 | ||
25 | static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) | |
26 | { | |
27 | - struct dm_thin_new_mapping *r = pool->next_mapping; | |
28 | + struct dm_thin_new_mapping *m = pool->next_mapping; | |
29 | ||
30 | BUG_ON(!pool->next_mapping); | |
31 | ||
32 | + memset(m, 0, sizeof(struct dm_thin_new_mapping)); | |
33 | + INIT_LIST_HEAD(&m->list); | |
34 | + m->bio = NULL; | |
35 | + | |
36 | pool->next_mapping = NULL; | |
37 | ||
38 | - return r; | |
39 | + return m; | |
40 | } | |
41 | ||
42 | static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, | |
43 | @@ -778,15 +782,10 @@ static void schedule_copy(struct thin_c | |
44 | struct pool *pool = tc->pool; | |
45 | struct dm_thin_new_mapping *m = get_next_mapping(pool); | |
46 | ||
47 | - INIT_LIST_HEAD(&m->list); | |
48 | - m->quiesced = 0; | |
49 | - m->prepared = 0; | |
50 | m->tc = tc; | |
51 | m->virt_block = virt_block; | |
52 | m->data_block = data_dest; | |
53 | m->cell = cell; | |
54 | - m->err = 0; | |
55 | - m->bio = NULL; | |
56 | ||
57 | if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) | |
58 | m->quiesced = 1; | |
59 | @@ -849,15 +848,12 @@ static void schedule_zero(struct thin_c | |
60 | struct pool *pool = tc->pool; | |
61 | struct dm_thin_new_mapping *m = get_next_mapping(pool); | |
62 | ||
63 | - INIT_LIST_HEAD(&m->list); | |
64 | m->quiesced = 1; | |
65 | m->prepared = 0; | |
66 | m->tc = tc; | |
67 | m->virt_block = virt_block; | |
68 | m->data_block = data_block; | |
69 | m->cell = cell; | |
70 | - m->err = 0; | |
71 | - m->bio = NULL; | |
72 | ||
73 | /* | |
74 | * If the whole block of data is being overwritten or we are not | |
75 | @@ -1047,7 +1043,6 @@ static void process_discard(struct thin_ | |
76 | m->data_block = lookup_result.block; | |
77 | m->cell = cell; | |
78 | m->cell2 = cell2; | |
79 | - m->err = 0; | |
80 | m->bio = bio; | |
81 | ||
82 | if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) { |