]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - scrub/vfs.c
libfrog: fix workqueue error communication problems
[thirdparty/xfsprogs-dev.git] / scrub / vfs.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include <stdint.h>
8 #include <dirent.h>
9 #include <sys/types.h>
10 #include <sys/statvfs.h>
11 #include "handle.h"
12 #include "libfrog/paths.h"
13 #include "libfrog/workqueue.h"
14 #include "xfs_scrub.h"
15 #include "common.h"
16 #include "vfs.h"
17
18 #ifndef AT_NO_AUTOMOUNT
19 # define AT_NO_AUTOMOUNT 0x800
20 #endif
21
22 /*
23 * Helper functions to assist in traversing a directory tree using regular
24 * VFS calls.
25 */
26
27 /* Scan a filesystem tree. */
28 struct scan_fs_tree {
29 unsigned int nr_dirs;
30 pthread_mutex_t lock;
31 pthread_cond_t wakeup;
32 struct stat root_sb;
33 bool moveon;
34 scan_fs_tree_dir_fn dir_fn;
35 scan_fs_tree_dirent_fn dirent_fn;
36 void *arg;
37 };
38
39 /* Per-work-item scan context. */
40 struct scan_fs_tree_dir {
41 char *path;
42 struct scan_fs_tree *sft;
43 bool rootdir;
44 };
45
46 static void scan_fs_dir(struct workqueue *wq, xfs_agnumber_t agno, void *arg);
47
48 /* Increment the number of directories that are queued for processing. */
49 static void
50 inc_nr_dirs(
51 struct scan_fs_tree *sft)
52 {
53 pthread_mutex_lock(&sft->lock);
54 sft->nr_dirs++;
55 pthread_mutex_unlock(&sft->lock);
56 }
57
58 /*
59 * Decrement the number of directories that are queued for processing and if
60 * we ran out of dirs to process, wake up anyone who was waiting for processing
61 * to finish.
62 */
63 static void
64 dec_nr_dirs(
65 struct scan_fs_tree *sft)
66 {
67 pthread_mutex_lock(&sft->lock);
68 sft->nr_dirs--;
69 if (sft->nr_dirs == 0)
70 pthread_cond_signal(&sft->wakeup);
71 pthread_mutex_unlock(&sft->lock);
72 }
73
74 /* Queue a directory for scanning. */
75 static bool
76 queue_subdir(
77 struct scrub_ctx *ctx,
78 struct scan_fs_tree *sft,
79 struct workqueue *wq,
80 const char *path,
81 bool is_rootdir)
82 {
83 struct scan_fs_tree_dir *new_sftd;
84 int error;
85
86 new_sftd = malloc(sizeof(struct scan_fs_tree_dir));
87 if (!new_sftd) {
88 str_errno(ctx, _("creating directory scan context"));
89 return false;
90 }
91
92 new_sftd->path = strdup(path);
93 if (!new_sftd->path) {
94 str_errno(ctx, _("creating directory scan path"));
95 goto out_sftd;
96 }
97
98 new_sftd->sft = sft;
99 new_sftd->rootdir = is_rootdir;
100
101 inc_nr_dirs(sft);
102 error = workqueue_add(wq, scan_fs_dir, 0, new_sftd);
103 if (error) {
104 dec_nr_dirs(sft);
105 str_liberror(ctx, error, _("queueing directory scan work"));
106 goto out_path;
107 }
108
109 return true;
110 out_path:
111 free(new_sftd->path);
112 out_sftd:
113 free(new_sftd);
114 return false;
115 }
116
117 /* Scan a directory sub tree. */
118 static void
119 scan_fs_dir(
120 struct workqueue *wq,
121 xfs_agnumber_t agno,
122 void *arg)
123 {
124 struct scrub_ctx *ctx = (struct scrub_ctx *)wq->wq_ctx;
125 struct scan_fs_tree_dir *sftd = arg;
126 struct scan_fs_tree *sft = sftd->sft;
127 DIR *dir;
128 struct dirent *dirent;
129 char newpath[PATH_MAX];
130 struct stat sb;
131 int dir_fd;
132 int error;
133
134 /* Open the directory. */
135 dir_fd = open(sftd->path, O_RDONLY | O_NOATIME | O_NOFOLLOW | O_NOCTTY);
136 if (dir_fd < 0) {
137 if (errno != ENOENT)
138 str_errno(ctx, sftd->path);
139 goto out;
140 }
141
142 /* Caller-specific directory checks. */
143 if (!sft->dir_fn(ctx, sftd->path, dir_fd, sft->arg)) {
144 sft->moveon = false;
145 error = close(dir_fd);
146 if (error)
147 str_errno(ctx, sftd->path);
148 goto out;
149 }
150
151 /* Iterate the directory entries. */
152 dir = fdopendir(dir_fd);
153 if (!dir) {
154 str_errno(ctx, sftd->path);
155 close(dir_fd);
156 goto out;
157 }
158 rewinddir(dir);
159 for (dirent = readdir(dir); dirent != NULL; dirent = readdir(dir)) {
160 snprintf(newpath, PATH_MAX, "%s/%s", sftd->path,
161 dirent->d_name);
162
163 /* Get the stat info for this directory entry. */
164 error = fstatat(dir_fd, dirent->d_name, &sb,
165 AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW);
166 if (error) {
167 str_errno(ctx, newpath);
168 continue;
169 }
170
171 /* Ignore files on other filesystems. */
172 if (sb.st_dev != sft->root_sb.st_dev)
173 continue;
174
175 /* Caller-specific directory entry function. */
176 if (!sft->dirent_fn(ctx, newpath, dir_fd, dirent, &sb,
177 sft->arg)) {
178 sft->moveon = false;
179 break;
180 }
181
182 if (xfs_scrub_excessive_errors(ctx)) {
183 sft->moveon = false;
184 break;
185 }
186
187 /* If directory, call ourselves recursively. */
188 if (S_ISDIR(sb.st_mode) && strcmp(".", dirent->d_name) &&
189 strcmp("..", dirent->d_name)) {
190 sft->moveon = queue_subdir(ctx, sft, wq, newpath,
191 false);
192 if (!sft->moveon)
193 break;
194 }
195 }
196
197 /* Close dir, go away. */
198 error = closedir(dir);
199 if (error)
200 str_errno(ctx, sftd->path);
201
202 out:
203 dec_nr_dirs(sft);
204 free(sftd->path);
205 free(sftd);
206 }
207
208 /* Scan the entire filesystem. */
209 bool
210 scan_fs_tree(
211 struct scrub_ctx *ctx,
212 scan_fs_tree_dir_fn dir_fn,
213 scan_fs_tree_dirent_fn dirent_fn,
214 void *arg)
215 {
216 struct workqueue wq;
217 struct scan_fs_tree sft;
218 int ret;
219
220 sft.moveon = true;
221 sft.nr_dirs = 0;
222 sft.root_sb = ctx->mnt_sb;
223 sft.dir_fn = dir_fn;
224 sft.dirent_fn = dirent_fn;
225 sft.arg = arg;
226 pthread_mutex_init(&sft.lock, NULL);
227 pthread_cond_init(&sft.wakeup, NULL);
228
229 ret = workqueue_create(&wq, (struct xfs_mount *)ctx,
230 scrub_nproc_workqueue(ctx));
231 if (ret) {
232 str_info(ctx, ctx->mntpoint, _("Could not create workqueue."));
233 return false;
234 }
235
236 sft.moveon = queue_subdir(ctx, &sft, &wq, ctx->mntpoint, true);
237 if (!sft.moveon)
238 goto out_wq;
239
240 /*
241 * Wait for the wakeup to trigger, which should only happen when the
242 * last worker thread decrements nr_dirs to zero. Once the worker
243 * triggers the wakeup and unlocks the sft lock, it's no longer safe
244 * for any worker thread to access sft, as we now own the lock and are
245 * about to tear everything down.
246 */
247 pthread_mutex_lock(&sft.lock);
248 if (sft.nr_dirs)
249 pthread_cond_wait(&sft.wakeup, &sft.lock);
250 assert(sft.nr_dirs == 0);
251 pthread_mutex_unlock(&sft.lock);
252
253 out_wq:
254 workqueue_destroy(&wq);
255 return sft.moveon;
256 }
257
258 #ifndef FITRIM
259 struct fstrim_range {
260 __u64 start;
261 __u64 len;
262 __u64 minlen;
263 };
264 #define FITRIM _IOWR('X', 121, struct fstrim_range) /* Trim */
265 #endif
266
267 /* Call FITRIM to trim all the unused space in a filesystem. */
268 void
269 fstrim(
270 struct scrub_ctx *ctx)
271 {
272 struct fstrim_range range = {0};
273 int error;
274
275 range.len = ULLONG_MAX;
276 error = ioctl(ctx->mnt.fd, FITRIM, &range);
277 if (error && errno != EOPNOTSUPP && errno != ENOTTY)
278 perror(_("fstrim"));
279 }