]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/cgtop/cgtop.c
tree-wide: drop 'This file is part of systemd' blurb
[thirdparty/systemd.git] / src / cgtop / cgtop.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 Copyright 2012 Lennart Poettering
4 ***/
5
6 #include <alloca.h>
7 #include <errno.h>
8 #include <getopt.h>
9 #include <signal.h>
10 #include <stdint.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <unistd.h>
14
15 #include "sd-bus.h"
16
17 #include "alloc-util.h"
18 #include "bus-error.h"
19 #include "bus-util.h"
20 #include "cgroup-show.h"
21 #include "cgroup-util.h"
22 #include "fd-util.h"
23 #include "fileio.h"
24 #include "hashmap.h"
25 #include "parse-util.h"
26 #include "path-util.h"
27 #include "process-util.h"
28 #include "procfs-util.h"
29 #include "stdio-util.h"
30 #include "strv.h"
31 #include "terminal-util.h"
32 #include "unit-name.h"
33 #include "util.h"
34 #include "virt.h"
35
36 typedef struct Group {
37 char *path;
38
39 bool n_tasks_valid:1;
40 bool cpu_valid:1;
41 bool memory_valid:1;
42 bool io_valid:1;
43
44 uint64_t n_tasks;
45
46 unsigned cpu_iteration;
47 nsec_t cpu_usage;
48 nsec_t cpu_timestamp;
49 double cpu_fraction;
50
51 uint64_t memory;
52
53 unsigned io_iteration;
54 uint64_t io_input, io_output;
55 nsec_t io_timestamp;
56 uint64_t io_input_bps, io_output_bps;
57 } Group;
58
59 static unsigned arg_depth = 3;
60 static unsigned arg_iterations = (unsigned) -1;
61 static bool arg_batch = false;
62 static bool arg_raw = false;
63 static usec_t arg_delay = 1*USEC_PER_SEC;
64 static char* arg_machine = NULL;
65 static char* arg_root = NULL;
66 static bool arg_recursive = true;
67 static bool arg_recursive_unset = false;
68
69 static enum {
70 COUNT_PIDS,
71 COUNT_USERSPACE_PROCESSES,
72 COUNT_ALL_PROCESSES,
73 } arg_count = COUNT_PIDS;
74
75 static enum {
76 ORDER_PATH,
77 ORDER_TASKS,
78 ORDER_CPU,
79 ORDER_MEMORY,
80 ORDER_IO,
81 } arg_order = ORDER_CPU;
82
83 static enum {
84 CPU_PERCENT,
85 CPU_TIME,
86 } arg_cpu_type = CPU_PERCENT;
87
88 static void group_free(Group *g) {
89 assert(g);
90
91 free(g->path);
92 free(g);
93 }
94
95 static void group_hashmap_clear(Hashmap *h) {
96 hashmap_clear_with_destructor(h, group_free);
97 }
98
99 static void group_hashmap_free(Hashmap *h) {
100 group_hashmap_clear(h);
101 hashmap_free(h);
102 }
103
104 static const char *maybe_format_bytes(char *buf, size_t l, bool is_valid, uint64_t t) {
105 if (!is_valid)
106 return "-";
107 if (arg_raw) {
108 snprintf(buf, l, "%" PRIu64, t);
109 return buf;
110 }
111 return format_bytes(buf, l, t);
112 }
113
114 static bool is_root_cgroup(const char *path) {
115
116 /* Returns true if the specified path belongs to the root cgroup. The root cgroup is special on cgroupsv2 as it
117 * carries only very few attributes in order not to export multiple truth about system state as most
118 * information is available elsewhere in /proc anyway. We need to be able to deal with that, and need to get
119 * our data from different sources in that case.
120 *
121 * There's one extra complication in all of this, though 😣: if the path to the cgroup indicates we are in the
122 * root cgroup this might actually not be the case, because cgroup namespacing might be in effect
123 * (CLONE_NEWCGROUP). Since there's no nice way to distuingish a real cgroup root from a fake namespaced one we
124 * do an explicit container check here, under the assumption that CLONE_NEWCGROUP is generally used when
125 * container managers are used too.
126 *
127 * Note that checking for a container environment is kinda ugly, since in theory people could use cgtop from
128 * inside a container where cgroup namespacing is turned off to watch the host system. However, that's mostly a
129 * theoretic usecase, and if people actually try all they'll lose is accounting for the top-level cgroup. Which
130 * isn't too bad. */
131
132 if (detect_container() > 0)
133 return false;
134
135 return empty_or_root(path);
136 }
137
138 static int process(
139 const char *controller,
140 const char *path,
141 Hashmap *a,
142 Hashmap *b,
143 unsigned iteration,
144 Group **ret) {
145
146 Group *g;
147 int r, all_unified;
148
149 assert(controller);
150 assert(path);
151 assert(a);
152
153 all_unified = cg_all_unified();
154 if (all_unified < 0)
155 return all_unified;
156
157 g = hashmap_get(a, path);
158 if (!g) {
159 g = hashmap_get(b, path);
160 if (!g) {
161 g = new0(Group, 1);
162 if (!g)
163 return -ENOMEM;
164
165 g->path = strdup(path);
166 if (!g->path) {
167 group_free(g);
168 return -ENOMEM;
169 }
170
171 r = hashmap_put(a, g->path, g);
172 if (r < 0) {
173 group_free(g);
174 return r;
175 }
176 } else {
177 r = hashmap_move_one(a, b, path);
178 if (r < 0)
179 return r;
180
181 g->cpu_valid = g->memory_valid = g->io_valid = g->n_tasks_valid = false;
182 }
183 }
184
185 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER) &&
186 IN_SET(arg_count, COUNT_ALL_PROCESSES, COUNT_USERSPACE_PROCESSES)) {
187 _cleanup_fclose_ FILE *f = NULL;
188 pid_t pid;
189
190 r = cg_enumerate_processes(controller, path, &f);
191 if (r == -ENOENT)
192 return 0;
193 if (r < 0)
194 return r;
195
196 g->n_tasks = 0;
197 while (cg_read_pid(f, &pid) > 0) {
198
199 if (arg_count == COUNT_USERSPACE_PROCESSES && is_kernel_thread(pid) > 0)
200 continue;
201
202 g->n_tasks++;
203 }
204
205 if (g->n_tasks > 0)
206 g->n_tasks_valid = true;
207
208 } else if (streq(controller, "pids") && arg_count == COUNT_PIDS) {
209
210 if (is_root_cgroup(path)) {
211 r = procfs_tasks_get_current(&g->n_tasks);
212 if (r < 0)
213 return r;
214 } else {
215 _cleanup_free_ char *p = NULL, *v = NULL;
216
217 r = cg_get_path(controller, path, "pids.current", &p);
218 if (r < 0)
219 return r;
220
221 r = read_one_line_file(p, &v);
222 if (r == -ENOENT)
223 return 0;
224 if (r < 0)
225 return r;
226
227 r = safe_atou64(v, &g->n_tasks);
228 if (r < 0)
229 return r;
230 }
231
232 if (g->n_tasks > 0)
233 g->n_tasks_valid = true;
234
235 } else if (STR_IN_SET(controller, "cpu", "cpuacct")) {
236 _cleanup_free_ char *p = NULL, *v = NULL;
237 uint64_t new_usage;
238 nsec_t timestamp;
239
240 if (is_root_cgroup(path)) {
241 r = procfs_cpu_get_usage(&new_usage);
242 if (r < 0)
243 return r;
244 } else if (all_unified) {
245 _cleanup_free_ char *val = NULL;
246
247 if (!streq(controller, "cpu"))
248 return 0;
249
250 r = cg_get_keyed_attribute("cpu", path, "cpu.stat", STRV_MAKE("usage_usec"), &val);
251 if (IN_SET(r, -ENOENT, -ENXIO))
252 return 0;
253 if (r < 0)
254 return r;
255
256 r = safe_atou64(val, &new_usage);
257 if (r < 0)
258 return r;
259
260 new_usage *= NSEC_PER_USEC;
261 } else {
262 if (!streq(controller, "cpuacct"))
263 return 0;
264
265 r = cg_get_path(controller, path, "cpuacct.usage", &p);
266 if (r < 0)
267 return r;
268
269 r = read_one_line_file(p, &v);
270 if (r == -ENOENT)
271 return 0;
272 if (r < 0)
273 return r;
274
275 r = safe_atou64(v, &new_usage);
276 if (r < 0)
277 return r;
278 }
279
280 timestamp = now_nsec(CLOCK_MONOTONIC);
281
282 if (g->cpu_iteration == iteration - 1 &&
283 (nsec_t) new_usage > g->cpu_usage) {
284
285 nsec_t x, y;
286
287 x = timestamp - g->cpu_timestamp;
288 if (x < 1)
289 x = 1;
290
291 y = (nsec_t) new_usage - g->cpu_usage;
292 g->cpu_fraction = (double) y / (double) x;
293 g->cpu_valid = true;
294 }
295
296 g->cpu_usage = (nsec_t) new_usage;
297 g->cpu_timestamp = timestamp;
298 g->cpu_iteration = iteration;
299
300 } else if (streq(controller, "memory")) {
301
302 if (is_root_cgroup(path)) {
303 r = procfs_memory_get_current(&g->memory);
304 if (r < 0)
305 return r;
306 } else {
307 _cleanup_free_ char *p = NULL, *v = NULL;
308
309 if (all_unified)
310 r = cg_get_path(controller, path, "memory.current", &p);
311 else
312 r = cg_get_path(controller, path, "memory.usage_in_bytes", &p);
313 if (r < 0)
314 return r;
315
316 r = read_one_line_file(p, &v);
317 if (r == -ENOENT)
318 return 0;
319 if (r < 0)
320 return r;
321
322 r = safe_atou64(v, &g->memory);
323 if (r < 0)
324 return r;
325 }
326
327 if (g->memory > 0)
328 g->memory_valid = true;
329
330 } else if ((streq(controller, "io") && all_unified) ||
331 (streq(controller, "blkio") && !all_unified)) {
332 _cleanup_fclose_ FILE *f = NULL;
333 _cleanup_free_ char *p = NULL;
334 uint64_t wr = 0, rd = 0;
335 nsec_t timestamp;
336
337 r = cg_get_path(controller, path, all_unified ? "io.stat" : "blkio.io_service_bytes", &p);
338 if (r < 0)
339 return r;
340
341 f = fopen(p, "re");
342 if (!f) {
343 if (errno == ENOENT)
344 return 0;
345 return -errno;
346 }
347
348 for (;;) {
349 char line[LINE_MAX], *l;
350 uint64_t k, *q;
351
352 if (!fgets(line, sizeof(line), f))
353 break;
354
355 /* Trim and skip the device */
356 l = strstrip(line);
357 l += strcspn(l, WHITESPACE);
358 l += strspn(l, WHITESPACE);
359
360 if (all_unified) {
361 while (!isempty(l)) {
362 if (sscanf(l, "rbytes=%" SCNu64, &k))
363 rd += k;
364 else if (sscanf(l, "wbytes=%" SCNu64, &k))
365 wr += k;
366
367 l += strcspn(l, WHITESPACE);
368 l += strspn(l, WHITESPACE);
369 }
370 } else {
371 if (first_word(l, "Read")) {
372 l += 4;
373 q = &rd;
374 } else if (first_word(l, "Write")) {
375 l += 5;
376 q = &wr;
377 } else
378 continue;
379
380 l += strspn(l, WHITESPACE);
381 r = safe_atou64(l, &k);
382 if (r < 0)
383 continue;
384
385 *q += k;
386 }
387 }
388
389 timestamp = now_nsec(CLOCK_MONOTONIC);
390
391 if (g->io_iteration == iteration - 1) {
392 uint64_t x, yr, yw;
393
394 x = (uint64_t) (timestamp - g->io_timestamp);
395 if (x < 1)
396 x = 1;
397
398 if (rd > g->io_input)
399 yr = rd - g->io_input;
400 else
401 yr = 0;
402
403 if (wr > g->io_output)
404 yw = wr - g->io_output;
405 else
406 yw = 0;
407
408 if (yr > 0 || yw > 0) {
409 g->io_input_bps = (yr * 1000000000ULL) / x;
410 g->io_output_bps = (yw * 1000000000ULL) / x;
411 g->io_valid = true;
412 }
413 }
414
415 g->io_input = rd;
416 g->io_output = wr;
417 g->io_timestamp = timestamp;
418 g->io_iteration = iteration;
419 }
420
421 if (ret)
422 *ret = g;
423
424 return 0;
425 }
426
427 static int refresh_one(
428 const char *controller,
429 const char *path,
430 Hashmap *a,
431 Hashmap *b,
432 unsigned iteration,
433 unsigned depth,
434 Group **ret) {
435
436 _cleanup_closedir_ DIR *d = NULL;
437 Group *ours = NULL;
438 int r;
439
440 assert(controller);
441 assert(path);
442 assert(a);
443
444 if (depth > arg_depth)
445 return 0;
446
447 r = process(controller, path, a, b, iteration, &ours);
448 if (r < 0)
449 return r;
450
451 r = cg_enumerate_subgroups(controller, path, &d);
452 if (r == -ENOENT)
453 return 0;
454 if (r < 0)
455 return r;
456
457 for (;;) {
458 _cleanup_free_ char *fn = NULL, *p = NULL;
459 Group *child = NULL;
460
461 r = cg_read_subgroup(d, &fn);
462 if (r < 0)
463 return r;
464 if (r == 0)
465 break;
466
467 p = strjoin(path, "/", fn);
468 if (!p)
469 return -ENOMEM;
470
471 path_simplify(p, false);
472
473 r = refresh_one(controller, p, a, b, iteration, depth + 1, &child);
474 if (r < 0)
475 return r;
476
477 if (arg_recursive &&
478 IN_SET(arg_count, COUNT_ALL_PROCESSES, COUNT_USERSPACE_PROCESSES) &&
479 child &&
480 child->n_tasks_valid &&
481 streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
482
483 /* Recursively sum up processes */
484
485 if (ours->n_tasks_valid)
486 ours->n_tasks += child->n_tasks;
487 else {
488 ours->n_tasks = child->n_tasks;
489 ours->n_tasks_valid = true;
490 }
491 }
492 }
493
494 if (ret)
495 *ret = ours;
496
497 return 1;
498 }
499
500 static int refresh(const char *root, Hashmap *a, Hashmap *b, unsigned iteration) {
501 int r;
502
503 assert(a);
504
505 r = refresh_one(SYSTEMD_CGROUP_CONTROLLER, root, a, b, iteration, 0, NULL);
506 if (r < 0)
507 return r;
508 r = refresh_one("cpu", root, a, b, iteration, 0, NULL);
509 if (r < 0)
510 return r;
511 r = refresh_one("cpuacct", root, a, b, iteration, 0, NULL);
512 if (r < 0)
513 return r;
514 r = refresh_one("memory", root, a, b, iteration, 0, NULL);
515 if (r < 0)
516 return r;
517 r = refresh_one("io", root, a, b, iteration, 0, NULL);
518 if (r < 0)
519 return r;
520 r = refresh_one("blkio", root, a, b, iteration, 0, NULL);
521 if (r < 0)
522 return r;
523 r = refresh_one("pids", root, a, b, iteration, 0, NULL);
524 if (r < 0)
525 return r;
526
527 return 0;
528 }
529
530 static int group_compare(const void*a, const void *b) {
531 const Group *x = *(Group**)a, *y = *(Group**)b;
532
533 if (arg_order != ORDER_TASKS || arg_recursive) {
534 /* Let's make sure that the parent is always before
535 * the child. Except when ordering by tasks and
536 * recursive summing is off, since that is actually
537 * not accumulative for all children. */
538
539 if (path_startswith(empty_to_root(y->path), empty_to_root(x->path)))
540 return -1;
541 if (path_startswith(empty_to_root(x->path), empty_to_root(y->path)))
542 return 1;
543 }
544
545 switch (arg_order) {
546
547 case ORDER_PATH:
548 break;
549
550 case ORDER_CPU:
551 if (arg_cpu_type == CPU_PERCENT) {
552 if (x->cpu_valid && y->cpu_valid) {
553 if (x->cpu_fraction > y->cpu_fraction)
554 return -1;
555 else if (x->cpu_fraction < y->cpu_fraction)
556 return 1;
557 } else if (x->cpu_valid)
558 return -1;
559 else if (y->cpu_valid)
560 return 1;
561 } else {
562 if (x->cpu_usage > y->cpu_usage)
563 return -1;
564 else if (x->cpu_usage < y->cpu_usage)
565 return 1;
566 }
567
568 break;
569
570 case ORDER_TASKS:
571 if (x->n_tasks_valid && y->n_tasks_valid) {
572 if (x->n_tasks > y->n_tasks)
573 return -1;
574 else if (x->n_tasks < y->n_tasks)
575 return 1;
576 } else if (x->n_tasks_valid)
577 return -1;
578 else if (y->n_tasks_valid)
579 return 1;
580
581 break;
582
583 case ORDER_MEMORY:
584 if (x->memory_valid && y->memory_valid) {
585 if (x->memory > y->memory)
586 return -1;
587 else if (x->memory < y->memory)
588 return 1;
589 } else if (x->memory_valid)
590 return -1;
591 else if (y->memory_valid)
592 return 1;
593
594 break;
595
596 case ORDER_IO:
597 if (x->io_valid && y->io_valid) {
598 if (x->io_input_bps + x->io_output_bps > y->io_input_bps + y->io_output_bps)
599 return -1;
600 else if (x->io_input_bps + x->io_output_bps < y->io_input_bps + y->io_output_bps)
601 return 1;
602 } else if (x->io_valid)
603 return -1;
604 else if (y->io_valid)
605 return 1;
606 }
607
608 return path_compare(x->path, y->path);
609 }
610
611 static void display(Hashmap *a) {
612 Iterator i;
613 Group *g;
614 Group **array;
615 signed path_columns;
616 unsigned rows, n = 0, j, maxtcpu = 0, maxtpath = 3; /* 3 for ellipsize() to work properly */
617 char buffer[MAX3(21, FORMAT_BYTES_MAX, FORMAT_TIMESPAN_MAX)];
618
619 assert(a);
620
621 if (!terminal_is_dumb())
622 fputs(ANSI_HOME_CLEAR, stdout);
623
624 array = newa(Group*, hashmap_size(a));
625
626 HASHMAP_FOREACH(g, a, i)
627 if (g->n_tasks_valid || g->cpu_valid || g->memory_valid || g->io_valid)
628 array[n++] = g;
629
630 qsort_safe(array, n, sizeof(Group*), group_compare);
631
632 /* Find the longest names in one run */
633 for (j = 0; j < n; j++) {
634 unsigned cputlen, pathtlen;
635
636 format_timespan(buffer, sizeof(buffer), (usec_t) (array[j]->cpu_usage / NSEC_PER_USEC), 0);
637 cputlen = strlen(buffer);
638 maxtcpu = MAX(maxtcpu, cputlen);
639
640 pathtlen = strlen(array[j]->path);
641 maxtpath = MAX(maxtpath, pathtlen);
642 }
643
644 if (arg_cpu_type == CPU_PERCENT)
645 xsprintf(buffer, "%6s", "%CPU");
646 else
647 xsprintf(buffer, "%*s", maxtcpu, "CPU Time");
648
649 rows = lines();
650 if (rows <= 10)
651 rows = 10;
652
653 if (on_tty()) {
654 const char *on, *off;
655
656 path_columns = columns() - 36 - strlen(buffer);
657 if (path_columns < 10)
658 path_columns = 10;
659
660 on = ansi_highlight_underline();
661 off = ansi_underline();
662
663 printf("%s%s%-*s%s %s%7s%s %s%s%s %s%8s%s %s%8s%s %s%8s%s%s\n",
664 ansi_underline(),
665 arg_order == ORDER_PATH ? on : "", path_columns, "Control Group",
666 arg_order == ORDER_PATH ? off : "",
667 arg_order == ORDER_TASKS ? on : "", arg_count == COUNT_PIDS ? "Tasks" : arg_count == COUNT_USERSPACE_PROCESSES ? "Procs" : "Proc+",
668 arg_order == ORDER_TASKS ? off : "",
669 arg_order == ORDER_CPU ? on : "", buffer,
670 arg_order == ORDER_CPU ? off : "",
671 arg_order == ORDER_MEMORY ? on : "", "Memory",
672 arg_order == ORDER_MEMORY ? off : "",
673 arg_order == ORDER_IO ? on : "", "Input/s",
674 arg_order == ORDER_IO ? off : "",
675 arg_order == ORDER_IO ? on : "", "Output/s",
676 arg_order == ORDER_IO ? off : "",
677 ansi_normal());
678 } else
679 path_columns = maxtpath;
680
681 for (j = 0; j < n; j++) {
682 _cleanup_free_ char *ellipsized = NULL;
683 const char *path;
684
685 if (on_tty() && j + 6 > rows)
686 break;
687
688 g = array[j];
689
690 path = empty_to_root(g->path);
691 ellipsized = ellipsize(path, path_columns, 33);
692 printf("%-*s", path_columns, ellipsized ?: path);
693
694 if (g->n_tasks_valid)
695 printf(" %7" PRIu64, g->n_tasks);
696 else
697 fputs(" -", stdout);
698
699 if (arg_cpu_type == CPU_PERCENT) {
700 if (g->cpu_valid)
701 printf(" %6.1f", g->cpu_fraction*100);
702 else
703 fputs(" -", stdout);
704 } else
705 printf(" %*s", maxtcpu, format_timespan(buffer, sizeof(buffer), (usec_t) (g->cpu_usage / NSEC_PER_USEC), 0));
706
707 printf(" %8s", maybe_format_bytes(buffer, sizeof(buffer), g->memory_valid, g->memory));
708 printf(" %8s", maybe_format_bytes(buffer, sizeof(buffer), g->io_valid, g->io_input_bps));
709 printf(" %8s", maybe_format_bytes(buffer, sizeof(buffer), g->io_valid, g->io_output_bps));
710
711 putchar('\n');
712 }
713 }
714
715 static void help(void) {
716 printf("%s [OPTIONS...] [CGROUP]\n\n"
717 "Show top control groups by their resource usage.\n\n"
718 " -h --help Show this help\n"
719 " --version Show package version\n"
720 " -p --order=path Order by path\n"
721 " -t --order=tasks Order by number of tasks/processes\n"
722 " -c --order=cpu Order by CPU load (default)\n"
723 " -m --order=memory Order by memory load\n"
724 " -i --order=io Order by IO load\n"
725 " -r --raw Provide raw (not human-readable) numbers\n"
726 " --cpu=percentage Show CPU usage as percentage (default)\n"
727 " --cpu=time Show CPU usage as time\n"
728 " -P Count userspace processes instead of tasks (excl. kernel)\n"
729 " -k Count all processes instead of tasks (incl. kernel)\n"
730 " --recursive=BOOL Sum up process count recursively\n"
731 " -d --delay=DELAY Delay between updates\n"
732 " -n --iterations=N Run for N iterations before exiting\n"
733 " -1 Shortcut for --iterations=1\n"
734 " -b --batch Run in batch mode, accepting no input\n"
735 " --depth=DEPTH Maximum traversal depth (default: %u)\n"
736 " -M --machine= Show container\n"
737 , program_invocation_short_name, arg_depth);
738 }
739
740 static int parse_argv(int argc, char *argv[]) {
741
742 enum {
743 ARG_VERSION = 0x100,
744 ARG_DEPTH,
745 ARG_CPU_TYPE,
746 ARG_ORDER,
747 ARG_RECURSIVE,
748 };
749
750 static const struct option options[] = {
751 { "help", no_argument, NULL, 'h' },
752 { "version", no_argument, NULL, ARG_VERSION },
753 { "delay", required_argument, NULL, 'd' },
754 { "iterations", required_argument, NULL, 'n' },
755 { "batch", no_argument, NULL, 'b' },
756 { "raw", no_argument, NULL, 'r' },
757 { "depth", required_argument, NULL, ARG_DEPTH },
758 { "cpu", optional_argument, NULL, ARG_CPU_TYPE },
759 { "order", required_argument, NULL, ARG_ORDER },
760 { "recursive", required_argument, NULL, ARG_RECURSIVE },
761 { "machine", required_argument, NULL, 'M' },
762 {}
763 };
764
765 int c, r;
766
767 assert(argc >= 1);
768 assert(argv);
769
770 while ((c = getopt_long(argc, argv, "hptcmin:brd:kPM:1", options, NULL)) >= 0)
771
772 switch (c) {
773
774 case 'h':
775 help();
776 return 0;
777
778 case ARG_VERSION:
779 return version();
780
781 case ARG_CPU_TYPE:
782 if (optarg) {
783 if (streq(optarg, "time"))
784 arg_cpu_type = CPU_TIME;
785 else if (streq(optarg, "percentage"))
786 arg_cpu_type = CPU_PERCENT;
787 else {
788 log_error("Unknown argument to --cpu=: %s", optarg);
789 return -EINVAL;
790 }
791 } else
792 arg_cpu_type = CPU_TIME;
793
794 break;
795
796 case ARG_DEPTH:
797 r = safe_atou(optarg, &arg_depth);
798 if (r < 0)
799 return log_error_errno(r, "Failed to parse depth parameter: %s", optarg);
800
801 break;
802
803 case 'd':
804 r = parse_sec(optarg, &arg_delay);
805 if (r < 0 || arg_delay <= 0) {
806 log_error("Failed to parse delay parameter: %s", optarg);
807 return -EINVAL;
808 }
809
810 break;
811
812 case 'n':
813 r = safe_atou(optarg, &arg_iterations);
814 if (r < 0)
815 return log_error_errno(r, "Failed to parse iterations parameter: %s", optarg);
816
817 break;
818
819 case '1':
820 arg_iterations = 1;
821 break;
822
823 case 'b':
824 arg_batch = true;
825 break;
826
827 case 'r':
828 arg_raw = true;
829 break;
830
831 case 'p':
832 arg_order = ORDER_PATH;
833 break;
834
835 case 't':
836 arg_order = ORDER_TASKS;
837 break;
838
839 case 'c':
840 arg_order = ORDER_CPU;
841 break;
842
843 case 'm':
844 arg_order = ORDER_MEMORY;
845 break;
846
847 case 'i':
848 arg_order = ORDER_IO;
849 break;
850
851 case ARG_ORDER:
852 if (streq(optarg, "path"))
853 arg_order = ORDER_PATH;
854 else if (streq(optarg, "tasks"))
855 arg_order = ORDER_TASKS;
856 else if (streq(optarg, "cpu"))
857 arg_order = ORDER_CPU;
858 else if (streq(optarg, "memory"))
859 arg_order = ORDER_MEMORY;
860 else if (streq(optarg, "io"))
861 arg_order = ORDER_IO;
862 else {
863 log_error("Invalid argument to --order=: %s", optarg);
864 return -EINVAL;
865 }
866 break;
867
868 case 'k':
869 arg_count = COUNT_ALL_PROCESSES;
870 break;
871
872 case 'P':
873 arg_count = COUNT_USERSPACE_PROCESSES;
874 break;
875
876 case ARG_RECURSIVE:
877 r = parse_boolean(optarg);
878 if (r < 0)
879 return log_error_errno(r, "Failed to parse --recursive= argument: %s", optarg);
880
881 arg_recursive = r;
882 arg_recursive_unset = r == 0;
883 break;
884
885 case 'M':
886 arg_machine = optarg;
887 break;
888
889 case '?':
890 return -EINVAL;
891
892 default:
893 assert_not_reached("Unhandled option");
894 }
895
896 if (optind == argc - 1)
897 arg_root = argv[optind];
898 else if (optind < argc) {
899 log_error("Too many arguments.");
900 return -EINVAL;
901 }
902
903 return 1;
904 }
905
906 static const char* counting_what(void) {
907 if (arg_count == COUNT_PIDS)
908 return "tasks";
909 else if (arg_count == COUNT_ALL_PROCESSES)
910 return "all processes (incl. kernel)";
911 else
912 return "userspace processes (excl. kernel)";
913 }
914
915 int main(int argc, char *argv[]) {
916 int r;
917 Hashmap *a = NULL, *b = NULL;
918 unsigned iteration = 0;
919 usec_t last_refresh = 0;
920 bool quit = false, immediate_refresh = false;
921 _cleanup_free_ char *root = NULL;
922 CGroupMask mask;
923
924 log_parse_environment();
925 log_open();
926
927 r = parse_argv(argc, argv);
928 if (r <= 0)
929 goto finish;
930
931 r = cg_mask_supported(&mask);
932 if (r < 0) {
933 log_error_errno(r, "Failed to determine supported controllers: %m");
934 goto finish;
935 }
936
937 arg_count = (mask & CGROUP_MASK_PIDS) ? COUNT_PIDS : COUNT_USERSPACE_PROCESSES;
938
939 if (arg_recursive_unset && arg_count == COUNT_PIDS) {
940 log_error("Non-recursive counting is only supported when counting processes, not tasks. Use -P or -k.");
941 return -EINVAL;
942 }
943
944 r = show_cgroup_get_path_and_warn(arg_machine, arg_root, &root);
945 if (r < 0) {
946 log_error_errno(r, "Failed to get root control group path: %m");
947 goto finish;
948 } else
949 log_debug("Cgroup path: %s", root);
950
951 a = hashmap_new(&path_hash_ops);
952 b = hashmap_new(&path_hash_ops);
953 if (!a || !b) {
954 r = log_oom();
955 goto finish;
956 }
957
958 signal(SIGWINCH, columns_lines_cache_reset);
959
960 if (arg_iterations == (unsigned) -1)
961 arg_iterations = on_tty() ? 0 : 1;
962
963 while (!quit) {
964 Hashmap *c;
965 usec_t t;
966 char key;
967 char h[FORMAT_TIMESPAN_MAX];
968
969 t = now(CLOCK_MONOTONIC);
970
971 if (t >= last_refresh + arg_delay || immediate_refresh) {
972
973 r = refresh(root, a, b, iteration++);
974 if (r < 0) {
975 log_error_errno(r, "Failed to refresh: %m");
976 goto finish;
977 }
978
979 group_hashmap_clear(b);
980
981 c = a;
982 a = b;
983 b = c;
984
985 last_refresh = t;
986 immediate_refresh = false;
987 }
988
989 display(b);
990
991 if (arg_iterations && iteration >= arg_iterations)
992 break;
993
994 if (!on_tty()) /* non-TTY: Empty newline as delimiter between polls */
995 fputs("\n", stdout);
996 fflush(stdout);
997
998 if (arg_batch)
999 (void) usleep(last_refresh + arg_delay - t);
1000 else {
1001 r = read_one_char(stdin, &key, last_refresh + arg_delay - t, NULL);
1002 if (r == -ETIMEDOUT)
1003 continue;
1004 if (r < 0) {
1005 log_error_errno(r, "Couldn't read key: %m");
1006 goto finish;
1007 }
1008 }
1009
1010 if (on_tty()) { /* TTY: Clear any user keystroke */
1011 fputs("\r \r", stdout);
1012 fflush(stdout);
1013 }
1014
1015 if (arg_batch)
1016 continue;
1017
1018 switch (key) {
1019
1020 case ' ':
1021 immediate_refresh = true;
1022 break;
1023
1024 case 'q':
1025 quit = true;
1026 break;
1027
1028 case 'p':
1029 arg_order = ORDER_PATH;
1030 break;
1031
1032 case 't':
1033 arg_order = ORDER_TASKS;
1034 break;
1035
1036 case 'c':
1037 arg_order = ORDER_CPU;
1038 break;
1039
1040 case 'm':
1041 arg_order = ORDER_MEMORY;
1042 break;
1043
1044 case 'i':
1045 arg_order = ORDER_IO;
1046 break;
1047
1048 case '%':
1049 arg_cpu_type = arg_cpu_type == CPU_TIME ? CPU_PERCENT : CPU_TIME;
1050 break;
1051
1052 case 'k':
1053 arg_count = arg_count != COUNT_ALL_PROCESSES ? COUNT_ALL_PROCESSES : COUNT_PIDS;
1054 fprintf(stdout, "\nCounting: %s.", counting_what());
1055 fflush(stdout);
1056 sleep(1);
1057 break;
1058
1059 case 'P':
1060 arg_count = arg_count != COUNT_USERSPACE_PROCESSES ? COUNT_USERSPACE_PROCESSES : COUNT_PIDS;
1061 fprintf(stdout, "\nCounting: %s.", counting_what());
1062 fflush(stdout);
1063 sleep(1);
1064 break;
1065
1066 case 'r':
1067 if (arg_count == COUNT_PIDS)
1068 fprintf(stdout, "\n\aCannot toggle recursive counting, not available in task counting mode.");
1069 else {
1070 arg_recursive = !arg_recursive;
1071 fprintf(stdout, "\nRecursive process counting: %s", yes_no(arg_recursive));
1072 }
1073 fflush(stdout);
1074 sleep(1);
1075 break;
1076
1077 case '+':
1078 if (arg_delay < USEC_PER_SEC)
1079 arg_delay += USEC_PER_MSEC*250;
1080 else
1081 arg_delay += USEC_PER_SEC;
1082
1083 fprintf(stdout, "\nIncreased delay to %s.", format_timespan(h, sizeof(h), arg_delay, 0));
1084 fflush(stdout);
1085 sleep(1);
1086 break;
1087
1088 case '-':
1089 if (arg_delay <= USEC_PER_MSEC*500)
1090 arg_delay = USEC_PER_MSEC*250;
1091 else if (arg_delay < USEC_PER_MSEC*1250)
1092 arg_delay -= USEC_PER_MSEC*250;
1093 else
1094 arg_delay -= USEC_PER_SEC;
1095
1096 fprintf(stdout, "\nDecreased delay to %s.", format_timespan(h, sizeof(h), arg_delay, 0));
1097 fflush(stdout);
1098 sleep(1);
1099 break;
1100
1101 case '?':
1102 case 'h':
1103
1104 #define ON ANSI_HIGHLIGHT
1105 #define OFF ANSI_NORMAL
1106
1107 fprintf(stdout,
1108 "\t<" ON "p" OFF "> By path; <" ON "t" OFF "> By tasks/procs; <" ON "c" OFF "> By CPU; <" ON "m" OFF "> By memory; <" ON "i" OFF "> By I/O\n"
1109 "\t<" ON "+" OFF "> Inc. delay; <" ON "-" OFF "> Dec. delay; <" ON "%%" OFF "> Toggle time; <" ON "SPACE" OFF "> Refresh\n"
1110 "\t<" ON "P" OFF "> Toggle count userspace processes; <" ON "k" OFF "> Toggle count all processes\n"
1111 "\t<" ON "r" OFF "> Count processes recursively; <" ON "q" OFF "> Quit");
1112 fflush(stdout);
1113 sleep(3);
1114 break;
1115
1116 default:
1117 if (key < ' ')
1118 fprintf(stdout, "\nUnknown key '\\x%x'. Ignoring.", key);
1119 else
1120 fprintf(stdout, "\nUnknown key '%c'. Ignoring.", key);
1121 fflush(stdout);
1122 sleep(1);
1123 break;
1124 }
1125 }
1126
1127 r = 0;
1128
1129 finish:
1130 group_hashmap_free(a);
1131 group_hashmap_free(b);
1132
1133 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
1134 }