]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - scrub/vfs.c
xfs: preserve rmapbt swapext block reservation from freed blocks
[thirdparty/xfsprogs-dev.git] / scrub / vfs.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include <stdint.h>
8 #include <dirent.h>
9 #include <sys/types.h>
10 #include <sys/statvfs.h>
11 #include "handle.h"
12 #include "libfrog/paths.h"
13 #include "libfrog/workqueue.h"
14 #include "xfs_scrub.h"
15 #include "common.h"
16 #include "vfs.h"
17
18 #ifndef AT_NO_AUTOMOUNT
19 # define AT_NO_AUTOMOUNT 0x800
20 #endif
21
22 /*
23 * Helper functions to assist in traversing a directory tree using regular
24 * VFS calls.
25 */
26
27 /* Scan a filesystem tree. */
28 struct scan_fs_tree {
29 unsigned int nr_dirs;
30 pthread_mutex_t lock;
31 pthread_cond_t wakeup;
32 struct stat root_sb;
33 bool aborted;
34 scan_fs_tree_dir_fn dir_fn;
35 scan_fs_tree_dirent_fn dirent_fn;
36 void *arg;
37 };
38
39 /* Per-work-item scan context. */
40 struct scan_fs_tree_dir {
41 char *path;
42 struct scan_fs_tree *sft;
43 bool rootdir;
44 };
45
46 static void scan_fs_dir(struct workqueue *wq, xfs_agnumber_t agno, void *arg);
47
48 /* Increment the number of directories that are queued for processing. */
49 static void
50 inc_nr_dirs(
51 struct scan_fs_tree *sft)
52 {
53 pthread_mutex_lock(&sft->lock);
54 sft->nr_dirs++;
55 pthread_mutex_unlock(&sft->lock);
56 }
57
58 /*
59 * Decrement the number of directories that are queued for processing and if
60 * we ran out of dirs to process, wake up anyone who was waiting for processing
61 * to finish.
62 */
63 static void
64 dec_nr_dirs(
65 struct scan_fs_tree *sft)
66 {
67 pthread_mutex_lock(&sft->lock);
68 sft->nr_dirs--;
69 if (sft->nr_dirs == 0)
70 pthread_cond_signal(&sft->wakeup);
71 pthread_mutex_unlock(&sft->lock);
72 }
73
74 /* Queue a directory for scanning. */
75 static int
76 queue_subdir(
77 struct scrub_ctx *ctx,
78 struct scan_fs_tree *sft,
79 struct workqueue *wq,
80 const char *path,
81 bool is_rootdir)
82 {
83 struct scan_fs_tree_dir *new_sftd;
84 int error;
85
86 new_sftd = malloc(sizeof(struct scan_fs_tree_dir));
87 if (!new_sftd)
88 return errno;
89
90 new_sftd->path = strdup(path);
91 if (!new_sftd->path) {
92 error = errno;
93 goto out_sftd;
94 }
95
96 new_sftd->sft = sft;
97 new_sftd->rootdir = is_rootdir;
98
99 inc_nr_dirs(sft);
100 error = -workqueue_add(wq, scan_fs_dir, 0, new_sftd);
101 if (error) {
102 dec_nr_dirs(sft);
103 str_liberror(ctx, error, _("queueing directory scan work"));
104 goto out_path;
105 }
106
107 return 0;
108 out_path:
109 free(new_sftd->path);
110 out_sftd:
111 free(new_sftd);
112 return error;
113 }
114
115 /* Scan a directory sub tree. */
116 static void
117 scan_fs_dir(
118 struct workqueue *wq,
119 xfs_agnumber_t agno,
120 void *arg)
121 {
122 struct scrub_ctx *ctx = (struct scrub_ctx *)wq->wq_ctx;
123 struct scan_fs_tree_dir *sftd = arg;
124 struct scan_fs_tree *sft = sftd->sft;
125 DIR *dir;
126 struct dirent *dirent;
127 char newpath[PATH_MAX];
128 struct stat sb;
129 int dir_fd;
130 int error;
131
132 /* Open the directory. */
133 dir_fd = open(sftd->path, O_RDONLY | O_NOATIME | O_NOFOLLOW | O_NOCTTY);
134 if (dir_fd < 0) {
135 if (errno != ENOENT)
136 str_errno(ctx, sftd->path);
137 goto out;
138 }
139
140 /* Caller-specific directory checks. */
141 error = sft->dir_fn(ctx, sftd->path, dir_fd, sft->arg);
142 if (error) {
143 sft->aborted = true;
144 error = close(dir_fd);
145 if (error)
146 str_errno(ctx, sftd->path);
147 goto out;
148 }
149
150 /* Iterate the directory entries. */
151 dir = fdopendir(dir_fd);
152 if (!dir) {
153 str_errno(ctx, sftd->path);
154 sft->aborted = true;
155 close(dir_fd);
156 goto out;
157 }
158 rewinddir(dir);
159 for (dirent = readdir(dir);
160 !sft->aborted && dirent != NULL;
161 dirent = readdir(dir)) {
162 snprintf(newpath, PATH_MAX, "%s/%s", sftd->path,
163 dirent->d_name);
164
165 /* Get the stat info for this directory entry. */
166 error = fstatat(dir_fd, dirent->d_name, &sb,
167 AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW);
168 if (error) {
169 str_errno(ctx, newpath);
170 continue;
171 }
172
173 /* Ignore files on other filesystems. */
174 if (sb.st_dev != sft->root_sb.st_dev)
175 continue;
176
177 /* Caller-specific directory entry function. */
178 error = sft->dirent_fn(ctx, newpath, dir_fd, dirent, &sb,
179 sft->arg);
180 if (error) {
181 sft->aborted = true;
182 break;
183 }
184
185 if (scrub_excessive_errors(ctx)) {
186 sft->aborted = true;
187 break;
188 }
189
190 /* If directory, call ourselves recursively. */
191 if (S_ISDIR(sb.st_mode) && strcmp(".", dirent->d_name) &&
192 strcmp("..", dirent->d_name)) {
193 error = queue_subdir(ctx, sft, wq, newpath, false);
194 if (error) {
195 str_liberror(ctx, error,
196 _("queueing subdirectory scan"));
197 sft->aborted = true;
198 break;
199 }
200 }
201 }
202
203 /* Close dir, go away. */
204 error = closedir(dir);
205 if (error)
206 str_errno(ctx, sftd->path);
207
208 out:
209 dec_nr_dirs(sft);
210 free(sftd->path);
211 free(sftd);
212 }
213
214 /*
215 * Scan the entire filesystem. This function returns 0 on success; if there
216 * are errors, this function will log them and returns nonzero.
217 */
218 int
219 scan_fs_tree(
220 struct scrub_ctx *ctx,
221 scan_fs_tree_dir_fn dir_fn,
222 scan_fs_tree_dirent_fn dirent_fn,
223 void *arg)
224 {
225 struct workqueue wq;
226 struct scan_fs_tree sft = {
227 .root_sb = ctx->mnt_sb,
228 .dir_fn = dir_fn,
229 .dirent_fn = dirent_fn,
230 .arg = arg,
231 };
232 int ret;
233
234 ret = pthread_mutex_init(&sft.lock, NULL);
235 if (ret) {
236 str_liberror(ctx, ret, _("creating directory scan lock"));
237 return ret;
238 }
239 ret = pthread_cond_init(&sft.wakeup, NULL);
240 if (ret) {
241 str_liberror(ctx, ret, _("creating directory scan signal"));
242 goto out_mutex;
243 }
244
245 ret = -workqueue_create(&wq, (struct xfs_mount *)ctx,
246 scrub_nproc_workqueue(ctx));
247 if (ret) {
248 str_liberror(ctx, ret, _("creating directory scan workqueue"));
249 goto out_cond;
250 }
251
252 ret = queue_subdir(ctx, &sft, &wq, ctx->mntpoint, true);
253 if (ret) {
254 str_liberror(ctx, ret, _("queueing directory scan"));
255 goto out_wq;
256 }
257
258 /*
259 * Wait for the wakeup to trigger, which should only happen when the
260 * last worker thread decrements nr_dirs to zero. Once the worker
261 * triggers the wakeup and unlocks the sft lock, it's no longer safe
262 * for any worker thread to access sft, as we now own the lock and are
263 * about to tear everything down.
264 */
265 pthread_mutex_lock(&sft.lock);
266 if (sft.nr_dirs)
267 pthread_cond_wait(&sft.wakeup, &sft.lock);
268 assert(sft.nr_dirs == 0);
269 pthread_mutex_unlock(&sft.lock);
270
271 ret = -workqueue_terminate(&wq);
272 if (ret) {
273 str_liberror(ctx, ret, _("finishing directory scan work"));
274 goto out_wq;
275 }
276
277 if (!ret && sft.aborted)
278 ret = -1;
279
280 out_wq:
281 workqueue_destroy(&wq);
282 out_cond:
283 pthread_cond_destroy(&sft.wakeup);
284 out_mutex:
285 pthread_mutex_destroy(&sft.lock);
286 return ret;
287 }
288
289 #ifndef FITRIM
290 struct fstrim_range {
291 __u64 start;
292 __u64 len;
293 __u64 minlen;
294 };
295 #define FITRIM _IOWR('X', 121, struct fstrim_range) /* Trim */
296 #endif
297
298 /* Call FITRIM to trim all the unused space in a filesystem. */
299 void
300 fstrim(
301 struct scrub_ctx *ctx)
302 {
303 struct fstrim_range range = {0};
304 int error;
305
306 range.len = ULLONG_MAX;
307 error = ioctl(ctx->mnt.fd, FITRIM, &range);
308 if (error && errno != EOPNOTSUPP && errno != ENOTTY)
309 perror(_("fstrim"));
310 }