]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/3.19.7/mnt-improve-the-umount_tree-flags.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.19.7 / mnt-improve-the-umount_tree-flags.patch
1 From e819f152104c9f7c9fe50e1aecce6f5d4bf06d65 Mon Sep 17 00:00:00 2001
2 From: "Eric W. Biederman" <ebiederm@xmission.com>
3 Date: Wed, 24 Dec 2014 07:20:01 -0600
4 Subject: mnt: Improve the umount_tree flags
5
6 From: "Eric W. Biederman" <ebiederm@xmission.com>
7
8 commit e819f152104c9f7c9fe50e1aecce6f5d4bf06d65 upstream.
9
10 - Remove the unneeded declaration from pnode.h
11 - Mark umount_tree static as it has no callers outside of namespace.c
12 - Define an enumeration of umount_tree's flags.
13 - Pass umount_tree's flags in by name
14
15 This removes the magic numbers 0, 1 and 2 making the code a little
16 clearer and makes it possible for there to be lazy unmounts that don't
17 propagate. Which is what __detach_mounts actually wants for example.
18
19 Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
20 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
21
22 ---
23 fs/namespace.c | 31 ++++++++++++++++---------------
24 fs/pnode.h | 1 -
25 2 files changed, 16 insertions(+), 16 deletions(-)
26
27 --- a/fs/namespace.c
28 +++ b/fs/namespace.c
29 @@ -1323,14 +1323,15 @@ static inline void namespace_lock(void)
30 down_write(&namespace_sem);
31 }
32
33 +enum umount_tree_flags {
34 + UMOUNT_SYNC = 1,
35 + UMOUNT_PROPAGATE = 2,
36 +};
37 /*
38 * mount_lock must be held
39 * namespace_sem must be held for write
40 - * how = 0 => just this tree, don't propagate
41 - * how = 1 => propagate; we know that nobody else has reference to any victims
42 - * how = 2 => lazy umount
43 */
44 -void umount_tree(struct mount *mnt, int how)
45 +static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
46 {
47 HLIST_HEAD(tmp_list);
48 struct mount *p;
49 @@ -1344,7 +1345,7 @@ void umount_tree(struct mount *mnt, int
50 hlist_for_each_entry(p, &tmp_list, mnt_hash)
51 list_del_init(&p->mnt_child);
52
53 - if (how)
54 + if (how & UMOUNT_PROPAGATE)
55 propagate_umount(&tmp_list);
56
57 hlist_for_each_entry(p, &tmp_list, mnt_hash) {
58 @@ -1352,7 +1353,7 @@ void umount_tree(struct mount *mnt, int
59 list_del_init(&p->mnt_list);
60 __touch_mnt_namespace(p->mnt_ns);
61 p->mnt_ns = NULL;
62 - if (how < 2)
63 + if (how & UMOUNT_SYNC)
64 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
65 if (mnt_has_parent(p)) {
66 hlist_del_init(&p->mnt_mp_list);
67 @@ -1457,14 +1458,14 @@ static int do_umount(struct mount *mnt,
68
69 if (flags & MNT_DETACH) {
70 if (!list_empty(&mnt->mnt_list))
71 - umount_tree(mnt, 2);
72 + umount_tree(mnt, UMOUNT_PROPAGATE);
73 retval = 0;
74 } else {
75 shrink_submounts(mnt);
76 retval = -EBUSY;
77 if (!propagate_mount_busy(mnt, 2)) {
78 if (!list_empty(&mnt->mnt_list))
79 - umount_tree(mnt, 1);
80 + umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
81 retval = 0;
82 }
83 }
84 @@ -1496,7 +1497,7 @@ void __detach_mounts(struct dentry *dent
85 lock_mount_hash();
86 while (!hlist_empty(&mp->m_list)) {
87 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
88 - umount_tree(mnt, 2);
89 + umount_tree(mnt, UMOUNT_PROPAGATE);
90 }
91 unlock_mount_hash();
92 put_mountpoint(mp);
93 @@ -1658,7 +1659,7 @@ struct mount *copy_tree(struct mount *mn
94 out:
95 if (res) {
96 lock_mount_hash();
97 - umount_tree(res, 0);
98 + umount_tree(res, UMOUNT_SYNC);
99 unlock_mount_hash();
100 }
101 return q;
102 @@ -1682,7 +1683,7 @@ void drop_collected_mounts(struct vfsmou
103 {
104 namespace_lock();
105 lock_mount_hash();
106 - umount_tree(real_mount(mnt), 0);
107 + umount_tree(real_mount(mnt), UMOUNT_SYNC);
108 unlock_mount_hash();
109 namespace_unlock();
110 }
111 @@ -1865,7 +1866,7 @@ static int attach_recursive_mnt(struct m
112 out_cleanup_ids:
113 while (!hlist_empty(&tree_list)) {
114 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
115 - umount_tree(child, 0);
116 + umount_tree(child, UMOUNT_SYNC);
117 }
118 unlock_mount_hash();
119 cleanup_group_ids(source_mnt, NULL);
120 @@ -2045,7 +2046,7 @@ static int do_loopback(struct path *path
121 err = graft_tree(mnt, parent, mp);
122 if (err) {
123 lock_mount_hash();
124 - umount_tree(mnt, 0);
125 + umount_tree(mnt, UMOUNT_SYNC);
126 unlock_mount_hash();
127 }
128 out2:
129 @@ -2416,7 +2417,7 @@ void mark_mounts_for_expiry(struct list_
130 while (!list_empty(&graveyard)) {
131 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
132 touch_mnt_namespace(mnt->mnt_ns);
133 - umount_tree(mnt, 1);
134 + umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
135 }
136 unlock_mount_hash();
137 namespace_unlock();
138 @@ -2487,7 +2488,7 @@ static void shrink_submounts(struct moun
139 m = list_first_entry(&graveyard, struct mount,
140 mnt_expire);
141 touch_mnt_namespace(m->mnt_ns);
142 - umount_tree(m, 1);
143 + umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
144 }
145 }
146 }
147 --- a/fs/pnode.h
148 +++ b/fs/pnode.h
149 @@ -47,7 +47,6 @@ int get_dominating_id(struct mount *mnt,
150 unsigned int mnt_get_count(struct mount *mnt);
151 void mnt_set_mountpoint(struct mount *, struct mountpoint *,
152 struct mount *);
153 -void umount_tree(struct mount *, int);
154 struct mount *copy_tree(struct mount *, struct dentry *, int);
155 bool is_path_reachable(struct mount *, struct dentry *,
156 const struct path *root);