#include "libxfs.h"
#include "threads.h"
+#include "threads.h"
#include "prefetch.h"
#include "avl.h"
#include "globals.h"
}
static void
-traverse_function(
+do_dir_inode(
struct workqueue *wq,
- xfs_agnumber_t agno,
+ xfs_agnumber_t agno,
void *arg)
{
- ino_tree_node_t *irec;
+ struct ino_tree_node *irec = arg;
int i;
+
+ for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
+ if (inode_isadir(irec, i))
+ process_dir_inode(wq->wq_ctx, agno, irec, i);
+ }
+}
+
+static void
+traverse_function(
+ struct workqueue *wq,
+ xfs_agnumber_t agno,
+ void *arg)
+{
+ struct ino_tree_node *irec;
prefetch_args_t *pf_args = arg;
+ struct workqueue lwq;
+ struct xfs_mount *mp = wq->wq_ctx;
wait_for_inode_prefetch(pf_args);
if (verbose)
do_log(_(" - agno = %d\n"), agno);
+ /*
+ * The more AGs we have in flight at once, the fewer processing threads
+ * per AG. This means we don't overwhelm the machine with hundreds of
+ * threads when we start acting on lots of AGs at once. We just want
+ * enough that we can keep multiple CPUs busy across multiple AGs.
+ */
+ workqueue_create_bound(&lwq, mp, ag_stride, 1000);
+
for (irec = findfirst_inode_rec(agno); irec; irec = next_ino_rec(irec)) {
if (irec->ino_isa_dir == 0)
continue;
if (pf_args) {
sem_post(&pf_args->ra_count);
#ifdef XR_PF_TRACE
+ {
+ int i;
sem_getvalue(&pf_args->ra_count, &i);
pftrace(
"processing inode chunk %p in AG %d (sem count = %d)",
irec, agno, i);
+ }
#endif
}
- for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
- if (inode_isadir(irec, i))
- process_dir_inode(wq->wq_ctx, agno, irec, i);
- }
+ queue_work(&lwq, do_dir_inode, agno, irec);
}
+ destroy_work_queue(&lwq);
cleanup_inode_prefetch(pf_args);
}
traverse_ags(
struct xfs_mount *mp)
{
- do_inode_prefetch(mp, 0, traverse_function, false, true);
+ do_inode_prefetch(mp, ag_stride, traverse_function, false, true);
}
void