]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/cgtop/cgtop.c
Merge pull request #10751 from yuwata/sd-event-util
[thirdparty/systemd.git] / src / cgtop / cgtop.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <alloca.h>
4 #include <errno.h>
5 #include <getopt.h>
6 #include <signal.h>
7 #include <stdint.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <unistd.h>
11
12 #include "sd-bus.h"
13
14 #include "alloc-util.h"
15 #include "bus-error.h"
16 #include "bus-util.h"
17 #include "cgroup-show.h"
18 #include "cgroup-util.h"
19 #include "fd-util.h"
20 #include "fileio.h"
21 #include "hashmap.h"
22 #include "parse-util.h"
23 #include "path-util.h"
24 #include "process-util.h"
25 #include "procfs-util.h"
26 #include "stdio-util.h"
27 #include "strv.h"
28 #include "terminal-util.h"
29 #include "unit-name.h"
30 #include "util.h"
31 #include "virt.h"
32
33 typedef struct Group {
34 char *path;
35
36 bool n_tasks_valid:1;
37 bool cpu_valid:1;
38 bool memory_valid:1;
39 bool io_valid:1;
40
41 uint64_t n_tasks;
42
43 unsigned cpu_iteration;
44 nsec_t cpu_usage;
45 nsec_t cpu_timestamp;
46 double cpu_fraction;
47
48 uint64_t memory;
49
50 unsigned io_iteration;
51 uint64_t io_input, io_output;
52 nsec_t io_timestamp;
53 uint64_t io_input_bps, io_output_bps;
54 } Group;
55
56 static unsigned arg_depth = 3;
57 static unsigned arg_iterations = (unsigned) -1;
58 static bool arg_batch = false;
59 static bool arg_raw = false;
60 static usec_t arg_delay = 1*USEC_PER_SEC;
61 static char* arg_machine = NULL;
62 static char* arg_root = NULL;
63 static bool arg_recursive = true;
64 static bool arg_recursive_unset = false;
65
66 static enum {
67 COUNT_PIDS,
68 COUNT_USERSPACE_PROCESSES,
69 COUNT_ALL_PROCESSES,
70 } arg_count = COUNT_PIDS;
71
72 static enum {
73 ORDER_PATH,
74 ORDER_TASKS,
75 ORDER_CPU,
76 ORDER_MEMORY,
77 ORDER_IO,
78 } arg_order = ORDER_CPU;
79
80 static enum {
81 CPU_PERCENT,
82 CPU_TIME,
83 } arg_cpu_type = CPU_PERCENT;
84
85 static void group_free(Group *g) {
86 assert(g);
87
88 free(g->path);
89 free(g);
90 }
91
92 static void group_hashmap_clear(Hashmap *h) {
93 hashmap_clear_with_destructor(h, group_free);
94 }
95
96 static void group_hashmap_free(Hashmap *h) {
97 group_hashmap_clear(h);
98 hashmap_free(h);
99 }
100
101 DEFINE_TRIVIAL_CLEANUP_FUNC(Hashmap*, group_hashmap_free);
102
103 static const char *maybe_format_bytes(char *buf, size_t l, bool is_valid, uint64_t t) {
104 if (!is_valid)
105 return "-";
106 if (arg_raw) {
107 snprintf(buf, l, "%" PRIu64, t);
108 return buf;
109 }
110 return format_bytes(buf, l, t);
111 }
112
113 static bool is_root_cgroup(const char *path) {
114
115 /* Returns true if the specified path belongs to the root cgroup. The root cgroup is special on cgroupsv2 as it
116 * carries only very few attributes in order not to export multiple truth about system state as most
117 * information is available elsewhere in /proc anyway. We need to be able to deal with that, and need to get
118 * our data from different sources in that case.
119 *
120 * There's one extra complication in all of this, though 😣: if the path to the cgroup indicates we are in the
121 * root cgroup this might actually not be the case, because cgroup namespacing might be in effect
122 * (CLONE_NEWCGROUP). Since there's no nice way to distuingish a real cgroup root from a fake namespaced one we
123 * do an explicit container check here, under the assumption that CLONE_NEWCGROUP is generally used when
124 * container managers are used too.
125 *
126 * Note that checking for a container environment is kinda ugly, since in theory people could use cgtop from
127 * inside a container where cgroup namespacing is turned off to watch the host system. However, that's mostly a
128 * theoretic usecase, and if people actually try all they'll lose is accounting for the top-level cgroup. Which
129 * isn't too bad. */
130
131 if (detect_container() > 0)
132 return false;
133
134 return empty_or_root(path);
135 }
136
137 static int process(
138 const char *controller,
139 const char *path,
140 Hashmap *a,
141 Hashmap *b,
142 unsigned iteration,
143 Group **ret) {
144
145 Group *g;
146 int r, all_unified;
147
148 assert(controller);
149 assert(path);
150 assert(a);
151
152 all_unified = cg_all_unified();
153 if (all_unified < 0)
154 return all_unified;
155
156 g = hashmap_get(a, path);
157 if (!g) {
158 g = hashmap_get(b, path);
159 if (!g) {
160 g = new0(Group, 1);
161 if (!g)
162 return -ENOMEM;
163
164 g->path = strdup(path);
165 if (!g->path) {
166 group_free(g);
167 return -ENOMEM;
168 }
169
170 r = hashmap_put(a, g->path, g);
171 if (r < 0) {
172 group_free(g);
173 return r;
174 }
175 } else {
176 r = hashmap_move_one(a, b, path);
177 if (r < 0)
178 return r;
179
180 g->cpu_valid = g->memory_valid = g->io_valid = g->n_tasks_valid = false;
181 }
182 }
183
184 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER) &&
185 IN_SET(arg_count, COUNT_ALL_PROCESSES, COUNT_USERSPACE_PROCESSES)) {
186 _cleanup_fclose_ FILE *f = NULL;
187 pid_t pid;
188
189 r = cg_enumerate_processes(controller, path, &f);
190 if (r == -ENOENT)
191 return 0;
192 if (r < 0)
193 return r;
194
195 g->n_tasks = 0;
196 while (cg_read_pid(f, &pid) > 0) {
197
198 if (arg_count == COUNT_USERSPACE_PROCESSES && is_kernel_thread(pid) > 0)
199 continue;
200
201 g->n_tasks++;
202 }
203
204 if (g->n_tasks > 0)
205 g->n_tasks_valid = true;
206
207 } else if (streq(controller, "pids") && arg_count == COUNT_PIDS) {
208
209 if (is_root_cgroup(path)) {
210 r = procfs_tasks_get_current(&g->n_tasks);
211 if (r < 0)
212 return r;
213 } else {
214 _cleanup_free_ char *p = NULL, *v = NULL;
215
216 r = cg_get_path(controller, path, "pids.current", &p);
217 if (r < 0)
218 return r;
219
220 r = read_one_line_file(p, &v);
221 if (r == -ENOENT)
222 return 0;
223 if (r < 0)
224 return r;
225
226 r = safe_atou64(v, &g->n_tasks);
227 if (r < 0)
228 return r;
229 }
230
231 if (g->n_tasks > 0)
232 g->n_tasks_valid = true;
233
234 } else if (STR_IN_SET(controller, "cpu", "cpuacct")) {
235 _cleanup_free_ char *p = NULL, *v = NULL;
236 uint64_t new_usage;
237 nsec_t timestamp;
238
239 if (is_root_cgroup(path)) {
240 r = procfs_cpu_get_usage(&new_usage);
241 if (r < 0)
242 return r;
243 } else if (all_unified) {
244 _cleanup_free_ char *val = NULL;
245
246 if (!streq(controller, "cpu"))
247 return 0;
248
249 r = cg_get_keyed_attribute("cpu", path, "cpu.stat", STRV_MAKE("usage_usec"), &val);
250 if (IN_SET(r, -ENOENT, -ENXIO))
251 return 0;
252 if (r < 0)
253 return r;
254
255 r = safe_atou64(val, &new_usage);
256 if (r < 0)
257 return r;
258
259 new_usage *= NSEC_PER_USEC;
260 } else {
261 if (!streq(controller, "cpuacct"))
262 return 0;
263
264 r = cg_get_path(controller, path, "cpuacct.usage", &p);
265 if (r < 0)
266 return r;
267
268 r = read_one_line_file(p, &v);
269 if (r == -ENOENT)
270 return 0;
271 if (r < 0)
272 return r;
273
274 r = safe_atou64(v, &new_usage);
275 if (r < 0)
276 return r;
277 }
278
279 timestamp = now_nsec(CLOCK_MONOTONIC);
280
281 if (g->cpu_iteration == iteration - 1 &&
282 (nsec_t) new_usage > g->cpu_usage) {
283
284 nsec_t x, y;
285
286 x = timestamp - g->cpu_timestamp;
287 if (x < 1)
288 x = 1;
289
290 y = (nsec_t) new_usage - g->cpu_usage;
291 g->cpu_fraction = (double) y / (double) x;
292 g->cpu_valid = true;
293 }
294
295 g->cpu_usage = (nsec_t) new_usage;
296 g->cpu_timestamp = timestamp;
297 g->cpu_iteration = iteration;
298
299 } else if (streq(controller, "memory")) {
300
301 if (is_root_cgroup(path)) {
302 r = procfs_memory_get_current(&g->memory);
303 if (r < 0)
304 return r;
305 } else {
306 _cleanup_free_ char *p = NULL, *v = NULL;
307
308 if (all_unified)
309 r = cg_get_path(controller, path, "memory.current", &p);
310 else
311 r = cg_get_path(controller, path, "memory.usage_in_bytes", &p);
312 if (r < 0)
313 return r;
314
315 r = read_one_line_file(p, &v);
316 if (r == -ENOENT)
317 return 0;
318 if (r < 0)
319 return r;
320
321 r = safe_atou64(v, &g->memory);
322 if (r < 0)
323 return r;
324 }
325
326 if (g->memory > 0)
327 g->memory_valid = true;
328
329 } else if ((streq(controller, "io") && all_unified) ||
330 (streq(controller, "blkio") && !all_unified)) {
331 _cleanup_fclose_ FILE *f = NULL;
332 _cleanup_free_ char *p = NULL;
333 uint64_t wr = 0, rd = 0;
334 nsec_t timestamp;
335
336 r = cg_get_path(controller, path, all_unified ? "io.stat" : "blkio.io_service_bytes", &p);
337 if (r < 0)
338 return r;
339
340 f = fopen(p, "re");
341 if (!f) {
342 if (errno == ENOENT)
343 return 0;
344 return -errno;
345 }
346
347 for (;;) {
348 _cleanup_free_ char *line = NULL;
349 uint64_t k, *q;
350 char *l;
351
352 r = read_line(f, LONG_LINE_MAX, &line);
353 if (r < 0)
354 return r;
355 if (r == 0)
356 break;
357
358 /* Trim and skip the device */
359 l = strstrip(line);
360 l += strcspn(l, WHITESPACE);
361 l += strspn(l, WHITESPACE);
362
363 if (all_unified) {
364 while (!isempty(l)) {
365 if (sscanf(l, "rbytes=%" SCNu64, &k))
366 rd += k;
367 else if (sscanf(l, "wbytes=%" SCNu64, &k))
368 wr += k;
369
370 l += strcspn(l, WHITESPACE);
371 l += strspn(l, WHITESPACE);
372 }
373 } else {
374 if (first_word(l, "Read")) {
375 l += 4;
376 q = &rd;
377 } else if (first_word(l, "Write")) {
378 l += 5;
379 q = &wr;
380 } else
381 continue;
382
383 l += strspn(l, WHITESPACE);
384 r = safe_atou64(l, &k);
385 if (r < 0)
386 continue;
387
388 *q += k;
389 }
390 }
391
392 timestamp = now_nsec(CLOCK_MONOTONIC);
393
394 if (g->io_iteration == iteration - 1) {
395 uint64_t x, yr, yw;
396
397 x = (uint64_t) (timestamp - g->io_timestamp);
398 if (x < 1)
399 x = 1;
400
401 if (rd > g->io_input)
402 yr = rd - g->io_input;
403 else
404 yr = 0;
405
406 if (wr > g->io_output)
407 yw = wr - g->io_output;
408 else
409 yw = 0;
410
411 if (yr > 0 || yw > 0) {
412 g->io_input_bps = (yr * 1000000000ULL) / x;
413 g->io_output_bps = (yw * 1000000000ULL) / x;
414 g->io_valid = true;
415 }
416 }
417
418 g->io_input = rd;
419 g->io_output = wr;
420 g->io_timestamp = timestamp;
421 g->io_iteration = iteration;
422 }
423
424 if (ret)
425 *ret = g;
426
427 return 0;
428 }
429
430 static int refresh_one(
431 const char *controller,
432 const char *path,
433 Hashmap *a,
434 Hashmap *b,
435 unsigned iteration,
436 unsigned depth,
437 Group **ret) {
438
439 _cleanup_closedir_ DIR *d = NULL;
440 Group *ours = NULL;
441 int r;
442
443 assert(controller);
444 assert(path);
445 assert(a);
446
447 if (depth > arg_depth)
448 return 0;
449
450 r = process(controller, path, a, b, iteration, &ours);
451 if (r < 0)
452 return r;
453
454 r = cg_enumerate_subgroups(controller, path, &d);
455 if (r == -ENOENT)
456 return 0;
457 if (r < 0)
458 return r;
459
460 for (;;) {
461 _cleanup_free_ char *fn = NULL, *p = NULL;
462 Group *child = NULL;
463
464 r = cg_read_subgroup(d, &fn);
465 if (r < 0)
466 return r;
467 if (r == 0)
468 break;
469
470 p = strjoin(path, "/", fn);
471 if (!p)
472 return -ENOMEM;
473
474 path_simplify(p, false);
475
476 r = refresh_one(controller, p, a, b, iteration, depth + 1, &child);
477 if (r < 0)
478 return r;
479
480 if (arg_recursive &&
481 IN_SET(arg_count, COUNT_ALL_PROCESSES, COUNT_USERSPACE_PROCESSES) &&
482 child &&
483 child->n_tasks_valid &&
484 streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
485
486 /* Recursively sum up processes */
487
488 if (ours->n_tasks_valid)
489 ours->n_tasks += child->n_tasks;
490 else {
491 ours->n_tasks = child->n_tasks;
492 ours->n_tasks_valid = true;
493 }
494 }
495 }
496
497 if (ret)
498 *ret = ours;
499
500 return 1;
501 }
502
503 static int refresh(const char *root, Hashmap *a, Hashmap *b, unsigned iteration) {
504 const char *c;
505 int r;
506
507 FOREACH_STRING(c, SYSTEMD_CGROUP_CONTROLLER, "cpu", "cpuacct", "memory", "io", "blkio", "pids") {
508 r = refresh_one(c, root, a, b, iteration, 0, NULL);
509 if (r < 0)
510 return r;
511 }
512
513 return 0;
514 }
515
516 static int group_compare(Group * const *a, Group * const *b) {
517 const Group *x = *a, *y = *b;
518 int r;
519
520 if (arg_order != ORDER_TASKS || arg_recursive) {
521 /* Let's make sure that the parent is always before
522 * the child. Except when ordering by tasks and
523 * recursive summing is off, since that is actually
524 * not accumulative for all children. */
525
526 if (path_startswith(empty_to_root(y->path), empty_to_root(x->path)))
527 return -1;
528 if (path_startswith(empty_to_root(x->path), empty_to_root(y->path)))
529 return 1;
530 }
531
532 switch (arg_order) {
533
534 case ORDER_PATH:
535 break;
536
537 case ORDER_CPU:
538 if (arg_cpu_type == CPU_PERCENT) {
539 if (x->cpu_valid && y->cpu_valid) {
540 r = CMP(y->cpu_fraction, x->cpu_fraction);
541 if (r != 0)
542 return r;
543 } else if (x->cpu_valid)
544 return -1;
545 else if (y->cpu_valid)
546 return 1;
547 } else {
548 r = CMP(y->cpu_usage, x->cpu_usage);
549 if (r != 0)
550 return r;
551 }
552
553 break;
554
555 case ORDER_TASKS:
556 if (x->n_tasks_valid && y->n_tasks_valid) {
557 r = CMP(y->n_tasks, x->n_tasks);
558 if (r != 0)
559 return r;
560 } else if (x->n_tasks_valid)
561 return -1;
562 else if (y->n_tasks_valid)
563 return 1;
564
565 break;
566
567 case ORDER_MEMORY:
568 if (x->memory_valid && y->memory_valid) {
569 r = CMP(y->memory, x->memory);
570 if (r != 0)
571 return r;
572 } else if (x->memory_valid)
573 return -1;
574 else if (y->memory_valid)
575 return 1;
576
577 break;
578
579 case ORDER_IO:
580 if (x->io_valid && y->io_valid) {
581 r = CMP(y->io_input_bps + y->io_output_bps, x->io_input_bps + x->io_output_bps);
582 if (r != 0)
583 return r;
584 } else if (x->io_valid)
585 return -1;
586 else if (y->io_valid)
587 return 1;
588 }
589
590 return path_compare(x->path, y->path);
591 }
592
593 static void display(Hashmap *a) {
594 Iterator i;
595 Group *g;
596 Group **array;
597 signed path_columns;
598 unsigned rows, n = 0, j, maxtcpu = 0, maxtpath = 3; /* 3 for ellipsize() to work properly */
599 char buffer[MAX3(21, FORMAT_BYTES_MAX, FORMAT_TIMESPAN_MAX)];
600
601 assert(a);
602
603 if (!terminal_is_dumb())
604 fputs(ANSI_HOME_CLEAR, stdout);
605
606 array = newa(Group*, hashmap_size(a));
607
608 HASHMAP_FOREACH(g, a, i)
609 if (g->n_tasks_valid || g->cpu_valid || g->memory_valid || g->io_valid)
610 array[n++] = g;
611
612 typesafe_qsort(array, n, group_compare);
613
614 /* Find the longest names in one run */
615 for (j = 0; j < n; j++) {
616 unsigned cputlen, pathtlen;
617
618 format_timespan(buffer, sizeof(buffer), (usec_t) (array[j]->cpu_usage / NSEC_PER_USEC), 0);
619 cputlen = strlen(buffer);
620 maxtcpu = MAX(maxtcpu, cputlen);
621
622 pathtlen = strlen(array[j]->path);
623 maxtpath = MAX(maxtpath, pathtlen);
624 }
625
626 if (arg_cpu_type == CPU_PERCENT)
627 xsprintf(buffer, "%6s", "%CPU");
628 else
629 xsprintf(buffer, "%*s", maxtcpu, "CPU Time");
630
631 rows = lines();
632 if (rows <= 10)
633 rows = 10;
634
635 if (on_tty()) {
636 const char *on, *off;
637
638 path_columns = columns() - 36 - strlen(buffer);
639 if (path_columns < 10)
640 path_columns = 10;
641
642 on = ansi_highlight_underline();
643 off = ansi_underline();
644
645 printf("%s%s%-*s%s %s%7s%s %s%s%s %s%8s%s %s%8s%s %s%8s%s%s\n",
646 ansi_underline(),
647 arg_order == ORDER_PATH ? on : "", path_columns, "Control Group",
648 arg_order == ORDER_PATH ? off : "",
649 arg_order == ORDER_TASKS ? on : "", arg_count == COUNT_PIDS ? "Tasks" : arg_count == COUNT_USERSPACE_PROCESSES ? "Procs" : "Proc+",
650 arg_order == ORDER_TASKS ? off : "",
651 arg_order == ORDER_CPU ? on : "", buffer,
652 arg_order == ORDER_CPU ? off : "",
653 arg_order == ORDER_MEMORY ? on : "", "Memory",
654 arg_order == ORDER_MEMORY ? off : "",
655 arg_order == ORDER_IO ? on : "", "Input/s",
656 arg_order == ORDER_IO ? off : "",
657 arg_order == ORDER_IO ? on : "", "Output/s",
658 arg_order == ORDER_IO ? off : "",
659 ansi_normal());
660 } else
661 path_columns = maxtpath;
662
663 for (j = 0; j < n; j++) {
664 _cleanup_free_ char *ellipsized = NULL;
665 const char *path;
666
667 if (on_tty() && j + 6 > rows)
668 break;
669
670 g = array[j];
671
672 path = empty_to_root(g->path);
673 ellipsized = ellipsize(path, path_columns, 33);
674 printf("%-*s", path_columns, ellipsized ?: path);
675
676 if (g->n_tasks_valid)
677 printf(" %7" PRIu64, g->n_tasks);
678 else
679 fputs(" -", stdout);
680
681 if (arg_cpu_type == CPU_PERCENT) {
682 if (g->cpu_valid)
683 printf(" %6.1f", g->cpu_fraction*100);
684 else
685 fputs(" -", stdout);
686 } else
687 printf(" %*s", maxtcpu, format_timespan(buffer, sizeof(buffer), (usec_t) (g->cpu_usage / NSEC_PER_USEC), 0));
688
689 printf(" %8s", maybe_format_bytes(buffer, sizeof(buffer), g->memory_valid, g->memory));
690 printf(" %8s", maybe_format_bytes(buffer, sizeof(buffer), g->io_valid, g->io_input_bps));
691 printf(" %8s", maybe_format_bytes(buffer, sizeof(buffer), g->io_valid, g->io_output_bps));
692
693 putchar('\n');
694 }
695 }
696
697 static int help(void) {
698 _cleanup_free_ char *link = NULL;
699 int r;
700
701 r = terminal_urlify_man("systemd-cgtop", "1", &link);
702 if (r < 0)
703 return log_oom();
704
705 printf("%s [OPTIONS...] [CGROUP]\n\n"
706 "Show top control groups by their resource usage.\n\n"
707 " -h --help Show this help\n"
708 " --version Show package version\n"
709 " -p --order=path Order by path\n"
710 " -t --order=tasks Order by number of tasks/processes\n"
711 " -c --order=cpu Order by CPU load (default)\n"
712 " -m --order=memory Order by memory load\n"
713 " -i --order=io Order by IO load\n"
714 " -r --raw Provide raw (not human-readable) numbers\n"
715 " --cpu=percentage Show CPU usage as percentage (default)\n"
716 " --cpu=time Show CPU usage as time\n"
717 " -P Count userspace processes instead of tasks (excl. kernel)\n"
718 " -k Count all processes instead of tasks (incl. kernel)\n"
719 " --recursive=BOOL Sum up process count recursively\n"
720 " -d --delay=DELAY Delay between updates\n"
721 " -n --iterations=N Run for N iterations before exiting\n"
722 " -1 Shortcut for --iterations=1\n"
723 " -b --batch Run in batch mode, accepting no input\n"
724 " --depth=DEPTH Maximum traversal depth (default: %u)\n"
725 " -M --machine= Show container\n"
726 "\nSee the %s for details.\n"
727 , program_invocation_short_name
728 , arg_depth
729 , link
730 );
731
732 return 0;
733 }
734
735 static int parse_argv(int argc, char *argv[]) {
736
737 enum {
738 ARG_VERSION = 0x100,
739 ARG_DEPTH,
740 ARG_CPU_TYPE,
741 ARG_ORDER,
742 ARG_RECURSIVE,
743 };
744
745 static const struct option options[] = {
746 { "help", no_argument, NULL, 'h' },
747 { "version", no_argument, NULL, ARG_VERSION },
748 { "delay", required_argument, NULL, 'd' },
749 { "iterations", required_argument, NULL, 'n' },
750 { "batch", no_argument, NULL, 'b' },
751 { "raw", no_argument, NULL, 'r' },
752 { "depth", required_argument, NULL, ARG_DEPTH },
753 { "cpu", optional_argument, NULL, ARG_CPU_TYPE },
754 { "order", required_argument, NULL, ARG_ORDER },
755 { "recursive", required_argument, NULL, ARG_RECURSIVE },
756 { "machine", required_argument, NULL, 'M' },
757 {}
758 };
759
760 int c, r;
761
762 assert(argc >= 1);
763 assert(argv);
764
765 while ((c = getopt_long(argc, argv, "hptcmin:brd:kPM:1", options, NULL)) >= 0)
766
767 switch (c) {
768
769 case 'h':
770 return help();
771
772 case ARG_VERSION:
773 return version();
774
775 case ARG_CPU_TYPE:
776 if (optarg) {
777 if (streq(optarg, "time"))
778 arg_cpu_type = CPU_TIME;
779 else if (streq(optarg, "percentage"))
780 arg_cpu_type = CPU_PERCENT;
781 else {
782 log_error("Unknown argument to --cpu=: %s", optarg);
783 return -EINVAL;
784 }
785 } else
786 arg_cpu_type = CPU_TIME;
787
788 break;
789
790 case ARG_DEPTH:
791 r = safe_atou(optarg, &arg_depth);
792 if (r < 0)
793 return log_error_errno(r, "Failed to parse depth parameter '%s': %m", optarg);
794
795 break;
796
797 case 'd':
798 r = parse_sec(optarg, &arg_delay);
799 if (r < 0)
800 return log_error_errno(r, "Failed to parse delay parameter '%s': %m", optarg);
801 if (arg_delay <= 0) {
802 log_error("Invalid delay parameter '%s'", optarg);
803 return -EINVAL;
804 }
805
806 break;
807
808 case 'n':
809 r = safe_atou(optarg, &arg_iterations);
810 if (r < 0)
811 return log_error_errno(r, "Failed to parse iterations parameter '%s': %m", optarg);
812
813 break;
814
815 case '1':
816 arg_iterations = 1;
817 break;
818
819 case 'b':
820 arg_batch = true;
821 break;
822
823 case 'r':
824 arg_raw = true;
825 break;
826
827 case 'p':
828 arg_order = ORDER_PATH;
829 break;
830
831 case 't':
832 arg_order = ORDER_TASKS;
833 break;
834
835 case 'c':
836 arg_order = ORDER_CPU;
837 break;
838
839 case 'm':
840 arg_order = ORDER_MEMORY;
841 break;
842
843 case 'i':
844 arg_order = ORDER_IO;
845 break;
846
847 case ARG_ORDER:
848 if (streq(optarg, "path"))
849 arg_order = ORDER_PATH;
850 else if (streq(optarg, "tasks"))
851 arg_order = ORDER_TASKS;
852 else if (streq(optarg, "cpu"))
853 arg_order = ORDER_CPU;
854 else if (streq(optarg, "memory"))
855 arg_order = ORDER_MEMORY;
856 else if (streq(optarg, "io"))
857 arg_order = ORDER_IO;
858 else {
859 log_error("Invalid argument to --order=: %s", optarg);
860 return -EINVAL;
861 }
862 break;
863
864 case 'k':
865 arg_count = COUNT_ALL_PROCESSES;
866 break;
867
868 case 'P':
869 arg_count = COUNT_USERSPACE_PROCESSES;
870 break;
871
872 case ARG_RECURSIVE:
873 r = parse_boolean(optarg);
874 if (r < 0)
875 return log_error_errno(r, "Failed to parse --recursive= argument '%s': %m", optarg);
876
877 arg_recursive = r;
878 arg_recursive_unset = r == 0;
879 break;
880
881 case 'M':
882 arg_machine = optarg;
883 break;
884
885 case '?':
886 return -EINVAL;
887
888 default:
889 assert_not_reached("Unhandled option");
890 }
891
892 if (optind == argc - 1)
893 arg_root = argv[optind];
894 else if (optind < argc) {
895 log_error("Too many arguments.");
896 return -EINVAL;
897 }
898
899 return 1;
900 }
901
902 static const char* counting_what(void) {
903 if (arg_count == COUNT_PIDS)
904 return "tasks";
905 else if (arg_count == COUNT_ALL_PROCESSES)
906 return "all processes (incl. kernel)";
907 else
908 return "userspace processes (excl. kernel)";
909 }
910
911 int main(int argc, char *argv[]) {
912 _cleanup_(group_hashmap_freep) Hashmap *a = NULL, *b = NULL;
913 unsigned iteration = 0;
914 usec_t last_refresh = 0;
915 bool quit = false, immediate_refresh = false;
916 _cleanup_free_ char *root = NULL;
917 CGroupMask mask;
918 int r;
919
920 log_parse_environment();
921 log_open();
922
923 r = parse_argv(argc, argv);
924 if (r <= 0)
925 goto finish;
926
927 r = cg_mask_supported(&mask);
928 if (r < 0) {
929 log_error_errno(r, "Failed to determine supported controllers: %m");
930 goto finish;
931 }
932
933 arg_count = (mask & CGROUP_MASK_PIDS) ? COUNT_PIDS : COUNT_USERSPACE_PROCESSES;
934
935 if (arg_recursive_unset && arg_count == COUNT_PIDS) {
936 log_error("Non-recursive counting is only supported when counting processes, not tasks. Use -P or -k.");
937 return -EINVAL;
938 }
939
940 r = show_cgroup_get_path_and_warn(arg_machine, arg_root, &root);
941 if (r < 0) {
942 log_error_errno(r, "Failed to get root control group path: %m");
943 goto finish;
944 } else
945 log_debug("Cgroup path: %s", root);
946
947 a = hashmap_new(&path_hash_ops);
948 b = hashmap_new(&path_hash_ops);
949 if (!a || !b) {
950 r = log_oom();
951 goto finish;
952 }
953
954 signal(SIGWINCH, columns_lines_cache_reset);
955
956 if (arg_iterations == (unsigned) -1)
957 arg_iterations = on_tty() ? 0 : 1;
958
959 while (!quit) {
960 Hashmap *c;
961 usec_t t;
962 char key;
963 char h[FORMAT_TIMESPAN_MAX];
964
965 t = now(CLOCK_MONOTONIC);
966
967 if (t >= last_refresh + arg_delay || immediate_refresh) {
968
969 r = refresh(root, a, b, iteration++);
970 if (r < 0) {
971 log_error_errno(r, "Failed to refresh: %m");
972 goto finish;
973 }
974
975 group_hashmap_clear(b);
976
977 c = a;
978 a = b;
979 b = c;
980
981 last_refresh = t;
982 immediate_refresh = false;
983 }
984
985 display(b);
986
987 if (arg_iterations && iteration >= arg_iterations)
988 break;
989
990 if (!on_tty()) /* non-TTY: Empty newline as delimiter between polls */
991 fputs("\n", stdout);
992 fflush(stdout);
993
994 if (arg_batch)
995 (void) usleep(last_refresh + arg_delay - t);
996 else {
997 r = read_one_char(stdin, &key, last_refresh + arg_delay - t, NULL);
998 if (r == -ETIMEDOUT)
999 continue;
1000 if (r < 0) {
1001 log_error_errno(r, "Couldn't read key: %m");
1002 goto finish;
1003 }
1004 }
1005
1006 if (on_tty()) { /* TTY: Clear any user keystroke */
1007 fputs("\r \r", stdout);
1008 fflush(stdout);
1009 }
1010
1011 if (arg_batch)
1012 continue;
1013
1014 switch (key) {
1015
1016 case ' ':
1017 immediate_refresh = true;
1018 break;
1019
1020 case 'q':
1021 quit = true;
1022 break;
1023
1024 case 'p':
1025 arg_order = ORDER_PATH;
1026 break;
1027
1028 case 't':
1029 arg_order = ORDER_TASKS;
1030 break;
1031
1032 case 'c':
1033 arg_order = ORDER_CPU;
1034 break;
1035
1036 case 'm':
1037 arg_order = ORDER_MEMORY;
1038 break;
1039
1040 case 'i':
1041 arg_order = ORDER_IO;
1042 break;
1043
1044 case '%':
1045 arg_cpu_type = arg_cpu_type == CPU_TIME ? CPU_PERCENT : CPU_TIME;
1046 break;
1047
1048 case 'k':
1049 arg_count = arg_count != COUNT_ALL_PROCESSES ? COUNT_ALL_PROCESSES : COUNT_PIDS;
1050 fprintf(stdout, "\nCounting: %s.", counting_what());
1051 fflush(stdout);
1052 sleep(1);
1053 break;
1054
1055 case 'P':
1056 arg_count = arg_count != COUNT_USERSPACE_PROCESSES ? COUNT_USERSPACE_PROCESSES : COUNT_PIDS;
1057 fprintf(stdout, "\nCounting: %s.", counting_what());
1058 fflush(stdout);
1059 sleep(1);
1060 break;
1061
1062 case 'r':
1063 if (arg_count == COUNT_PIDS)
1064 fprintf(stdout, "\n\aCannot toggle recursive counting, not available in task counting mode.");
1065 else {
1066 arg_recursive = !arg_recursive;
1067 fprintf(stdout, "\nRecursive process counting: %s", yes_no(arg_recursive));
1068 }
1069 fflush(stdout);
1070 sleep(1);
1071 break;
1072
1073 case '+':
1074 if (arg_delay < USEC_PER_SEC)
1075 arg_delay += USEC_PER_MSEC*250;
1076 else
1077 arg_delay += USEC_PER_SEC;
1078
1079 fprintf(stdout, "\nIncreased delay to %s.", format_timespan(h, sizeof(h), arg_delay, 0));
1080 fflush(stdout);
1081 sleep(1);
1082 break;
1083
1084 case '-':
1085 if (arg_delay <= USEC_PER_MSEC*500)
1086 arg_delay = USEC_PER_MSEC*250;
1087 else if (arg_delay < USEC_PER_MSEC*1250)
1088 arg_delay -= USEC_PER_MSEC*250;
1089 else
1090 arg_delay -= USEC_PER_SEC;
1091
1092 fprintf(stdout, "\nDecreased delay to %s.", format_timespan(h, sizeof(h), arg_delay, 0));
1093 fflush(stdout);
1094 sleep(1);
1095 break;
1096
1097 case '?':
1098 case 'h':
1099
1100 #define ON ANSI_HIGHLIGHT
1101 #define OFF ANSI_NORMAL
1102
1103 fprintf(stdout,
1104 "\t<" ON "p" OFF "> By path; <" ON "t" OFF "> By tasks/procs; <" ON "c" OFF "> By CPU; <" ON "m" OFF "> By memory; <" ON "i" OFF "> By I/O\n"
1105 "\t<" ON "+" OFF "> Inc. delay; <" ON "-" OFF "> Dec. delay; <" ON "%%" OFF "> Toggle time; <" ON "SPACE" OFF "> Refresh\n"
1106 "\t<" ON "P" OFF "> Toggle count userspace processes; <" ON "k" OFF "> Toggle count all processes\n"
1107 "\t<" ON "r" OFF "> Count processes recursively; <" ON "q" OFF "> Quit");
1108 fflush(stdout);
1109 sleep(3);
1110 break;
1111
1112 default:
1113 if (key < ' ')
1114 fprintf(stdout, "\nUnknown key '\\x%x'. Ignoring.", key);
1115 else
1116 fprintf(stdout, "\nUnknown key '%c'. Ignoring.", key);
1117 fflush(stdout);
1118 sleep(1);
1119 break;
1120 }
1121 }
1122
1123 r = 0;
1124
1125 finish:
1126
1127 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
1128 }