]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/cgtop/cgtop.c
treewide: fix typos
[thirdparty/systemd.git] / src / cgtop / cgtop.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <getopt.h>
5 #include <signal.h>
6 #include <stdint.h>
7 #include <stdlib.h>
8 #include <unistd.h>
9
10 #include "sd-bus.h"
11
12 #include "alloc-util.h"
13 #include "build.h"
14 #include "bus-error.h"
15 #include "bus-util.h"
16 #include "cgroup-show.h"
17 #include "cgroup-util.h"
18 #include "fd-util.h"
19 #include "fileio.h"
20 #include "hashmap.h"
21 #include "main-func.h"
22 #include "missing_sched.h"
23 #include "parse-argument.h"
24 #include "parse-util.h"
25 #include "path-util.h"
26 #include "pretty-print.h"
27 #include "process-util.h"
28 #include "procfs-util.h"
29 #include "sort-util.h"
30 #include "stdio-util.h"
31 #include "strv.h"
32 #include "terminal-util.h"
33 #include "unit-name.h"
34 #include "virt.h"
35
36 typedef struct Group {
37 char *path;
38
39 bool n_tasks_valid;
40 bool cpu_valid;
41 bool memory_valid;
42 bool io_valid;
43
44 uint64_t n_tasks;
45
46 unsigned cpu_iteration;
47 nsec_t cpu_usage;
48 nsec_t cpu_timestamp;
49 double cpu_fraction;
50
51 uint64_t memory;
52
53 unsigned io_iteration;
54 uint64_t io_input, io_output;
55 nsec_t io_timestamp;
56 uint64_t io_input_bps, io_output_bps;
57 } Group;
58
59 /* Counted objects, enum order matters */
60 typedef enum PidsCount {
61 COUNT_USERSPACE_PROCESSES, /* least */
62 COUNT_ALL_PROCESSES,
63 COUNT_PIDS, /* most, requires pids controller */
64 } PidsCount;
65
66 static unsigned arg_depth = 3;
67 static unsigned arg_iterations = UINT_MAX;
68 static bool arg_batch = false;
69 static bool arg_raw = false;
70 static usec_t arg_delay = 1*USEC_PER_SEC;
71 static char* arg_machine = NULL;
72 static char* arg_root = NULL;
73 static bool arg_recursive = true;
74 static bool arg_recursive_unset = false;
75
76 static PidsCount arg_count = COUNT_PIDS;
77
78 static enum {
79 ORDER_PATH,
80 ORDER_TASKS,
81 ORDER_CPU,
82 ORDER_MEMORY,
83 ORDER_IO,
84 } arg_order = ORDER_CPU;
85
86 static enum {
87 CPU_PERCENT,
88 CPU_TIME,
89 } arg_cpu_type = CPU_PERCENT;
90
91 static Group *group_free(Group *g) {
92 if (!g)
93 return NULL;
94
95 free(g->path);
96 return mfree(g);
97 }
98
99 DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(group_hash_ops, char, path_hash_func, path_compare, Group, group_free);
100
101 static const char *maybe_format_timespan(char *buf, size_t l, usec_t t, usec_t accuracy) {
102 if (arg_raw) {
103 (void) snprintf(buf, l, USEC_FMT, t);
104 return buf;
105 }
106 return format_timespan(buf, l, t, accuracy);
107 }
108
109 #define BUFSIZE1 CONST_MAX(FORMAT_TIMESPAN_MAX, DECIMAL_STR_MAX(usec_t))
110 #define MAYBE_FORMAT_TIMESPAN(t, accuracy) \
111 maybe_format_timespan((char[BUFSIZE1]){}, BUFSIZE1, t, accuracy)
112
113 static const char *maybe_format_bytes(char *buf, size_t l, bool is_valid, uint64_t t) {
114 if (!is_valid)
115 return "-";
116 if (arg_raw) {
117 (void) snprintf(buf, l, "%" PRIu64, t);
118 return buf;
119 }
120 return format_bytes(buf, l, t);
121 }
122
123 #define BUFSIZE2 CONST_MAX(FORMAT_BYTES_MAX, DECIMAL_STR_MAX(uint64_t))
124 #define MAYBE_FORMAT_BYTES(is_valid, t) \
125 maybe_format_bytes((char[BUFSIZE2]){}, BUFSIZE2, is_valid, t)
126
127 static bool is_root_cgroup(const char *path) {
128
129 /* Returns true if the specified path belongs to the root cgroup. The root cgroup is special on cgroup v2 as it
130 * carries only very few attributes in order not to export multiple truth about system state as most
131 * information is available elsewhere in /proc anyway. We need to be able to deal with that, and need to get
132 * our data from different sources in that case.
133 *
134 * There's one extra complication in all of this, though 😣: if the path to the cgroup indicates we are in the
135 * root cgroup this might actually not be the case, because cgroup namespacing might be in effect
136 * (CLONE_NEWCGROUP). Since there's no nice way to distinguish a real cgroup root from a fake namespaced one we
137 * do an explicit container check here, under the assumption that CLONE_NEWCGROUP is generally used when
138 * container managers are used too.
139 *
140 * Note that checking for a container environment is kinda ugly, since in theory people could use cgtop from
141 * inside a container where cgroup namespacing is turned off to watch the host system. However, that's mostly a
142 * theoretic use case, and if people actually try all they'll lose is accounting for the top-level cgroup. Which
143 * isn't too bad. */
144
145 if (detect_container() > 0)
146 return false;
147
148 return empty_or_root(path);
149 }
150
151 static int process(
152 const char *controller,
153 const char *path,
154 Hashmap *a,
155 Hashmap *b,
156 unsigned iteration,
157 Group **ret) {
158
159 Group *g;
160 int r, all_unified;
161
162 assert(controller);
163 assert(path);
164 assert(a);
165
166 all_unified = cg_all_unified();
167 if (all_unified < 0)
168 return all_unified;
169
170 g = hashmap_get(a, path);
171 if (!g) {
172 g = hashmap_get(b, path);
173 if (!g) {
174 g = new0(Group, 1);
175 if (!g)
176 return -ENOMEM;
177
178 g->path = strdup(path);
179 if (!g->path) {
180 group_free(g);
181 return -ENOMEM;
182 }
183
184 r = hashmap_put(a, g->path, g);
185 if (r < 0) {
186 group_free(g);
187 return r;
188 }
189 } else {
190 r = hashmap_move_one(a, b, path);
191 if (r < 0)
192 return r;
193
194 g->cpu_valid = g->memory_valid = g->io_valid = g->n_tasks_valid = false;
195 }
196 }
197
198 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER) &&
199 IN_SET(arg_count, COUNT_ALL_PROCESSES, COUNT_USERSPACE_PROCESSES)) {
200 _cleanup_fclose_ FILE *f = NULL;
201 pid_t pid;
202
203 r = cg_enumerate_processes(controller, path, &f);
204 if (r == -ENOENT)
205 return 0;
206 if (r < 0)
207 return r;
208
209 g->n_tasks = 0;
210 while (cg_read_pid(f, &pid) > 0) {
211
212 if (arg_count == COUNT_USERSPACE_PROCESSES && is_kernel_thread(pid) > 0)
213 continue;
214
215 g->n_tasks++;
216 }
217
218 if (g->n_tasks > 0)
219 g->n_tasks_valid = true;
220
221 } else if (streq(controller, "pids") && arg_count == COUNT_PIDS) {
222
223 if (is_root_cgroup(path)) {
224 r = procfs_tasks_get_current(&g->n_tasks);
225 if (r < 0)
226 return r;
227 } else {
228 _cleanup_free_ char *p = NULL, *v = NULL;
229
230 r = cg_get_path(controller, path, "pids.current", &p);
231 if (r < 0)
232 return r;
233
234 r = read_one_line_file(p, &v);
235 if (r == -ENOENT)
236 return 0;
237 if (r < 0)
238 return r;
239
240 r = safe_atou64(v, &g->n_tasks);
241 if (r < 0)
242 return r;
243 }
244
245 if (g->n_tasks > 0)
246 g->n_tasks_valid = true;
247
248 } else if (streq(controller, "memory")) {
249
250 if (is_root_cgroup(path)) {
251 r = procfs_memory_get_used(&g->memory);
252 if (r < 0)
253 return r;
254 } else {
255 _cleanup_free_ char *p = NULL, *v = NULL;
256
257 if (all_unified)
258 r = cg_get_path(controller, path, "memory.current", &p);
259 else
260 r = cg_get_path(controller, path, "memory.usage_in_bytes", &p);
261 if (r < 0)
262 return r;
263
264 r = read_one_line_file(p, &v);
265 if (r == -ENOENT)
266 return 0;
267 if (r < 0)
268 return r;
269
270 r = safe_atou64(v, &g->memory);
271 if (r < 0)
272 return r;
273 }
274
275 if (g->memory > 0)
276 g->memory_valid = true;
277
278 } else if ((streq(controller, "io") && all_unified) ||
279 (streq(controller, "blkio") && !all_unified)) {
280 _cleanup_fclose_ FILE *f = NULL;
281 _cleanup_free_ char *p = NULL;
282 uint64_t wr = 0, rd = 0;
283 nsec_t timestamp;
284
285 r = cg_get_path(controller, path, all_unified ? "io.stat" : "blkio.io_service_bytes", &p);
286 if (r < 0)
287 return r;
288
289 f = fopen(p, "re");
290 if (!f) {
291 if (errno == ENOENT)
292 return 0;
293 return -errno;
294 }
295
296 for (;;) {
297 _cleanup_free_ char *line = NULL;
298 uint64_t k, *q;
299 char *l;
300
301 r = read_line(f, LONG_LINE_MAX, &line);
302 if (r < 0)
303 return r;
304 if (r == 0)
305 break;
306
307 /* Trim and skip the device */
308 l = strstrip(line);
309 l += strcspn(l, WHITESPACE);
310 l += strspn(l, WHITESPACE);
311
312 if (all_unified) {
313 while (!isempty(l)) {
314 if (sscanf(l, "rbytes=%" SCNu64, &k))
315 rd += k;
316 else if (sscanf(l, "wbytes=%" SCNu64, &k))
317 wr += k;
318
319 l += strcspn(l, WHITESPACE);
320 l += strspn(l, WHITESPACE);
321 }
322 } else {
323 if (first_word(l, "Read")) {
324 l += 4;
325 q = &rd;
326 } else if (first_word(l, "Write")) {
327 l += 5;
328 q = &wr;
329 } else
330 continue;
331
332 l += strspn(l, WHITESPACE);
333 r = safe_atou64(l, &k);
334 if (r < 0)
335 continue;
336
337 *q += k;
338 }
339 }
340
341 timestamp = now_nsec(CLOCK_MONOTONIC);
342
343 if (g->io_iteration == iteration - 1) {
344 uint64_t x, yr, yw;
345
346 x = (uint64_t) (timestamp - g->io_timestamp);
347 if (x < 1)
348 x = 1;
349
350 if (rd > g->io_input)
351 yr = rd - g->io_input;
352 else
353 yr = 0;
354
355 if (wr > g->io_output)
356 yw = wr - g->io_output;
357 else
358 yw = 0;
359
360 if (yr > 0 || yw > 0) {
361 g->io_input_bps = (yr * 1000000000ULL) / x;
362 g->io_output_bps = (yw * 1000000000ULL) / x;
363 g->io_valid = true;
364 }
365 }
366
367 g->io_input = rd;
368 g->io_output = wr;
369 g->io_timestamp = timestamp;
370 g->io_iteration = iteration;
371 } else if (STR_IN_SET(controller, "cpu", "cpuacct") || cpu_accounting_is_cheap()) {
372 _cleanup_free_ char *p = NULL, *v = NULL;
373 uint64_t new_usage;
374 nsec_t timestamp;
375
376 if (is_root_cgroup(path)) {
377 r = procfs_cpu_get_usage(&new_usage);
378 if (r < 0)
379 return r;
380 } else if (all_unified) {
381 _cleanup_free_ char *val = NULL;
382
383 if (!streq(controller, "cpu"))
384 return 0;
385
386 r = cg_get_keyed_attribute("cpu", path, "cpu.stat", STRV_MAKE("usage_usec"), &val);
387 if (IN_SET(r, -ENOENT, -ENXIO))
388 return 0;
389 if (r < 0)
390 return r;
391
392 r = safe_atou64(val, &new_usage);
393 if (r < 0)
394 return r;
395
396 new_usage *= NSEC_PER_USEC;
397 } else {
398 if (!streq(controller, "cpuacct"))
399 return 0;
400
401 r = cg_get_path(controller, path, "cpuacct.usage", &p);
402 if (r < 0)
403 return r;
404
405 r = read_one_line_file(p, &v);
406 if (r == -ENOENT)
407 return 0;
408 if (r < 0)
409 return r;
410
411 r = safe_atou64(v, &new_usage);
412 if (r < 0)
413 return r;
414 }
415
416 timestamp = now_nsec(CLOCK_MONOTONIC);
417
418 if (g->cpu_iteration == iteration - 1 &&
419 (nsec_t) new_usage > g->cpu_usage) {
420
421 nsec_t x, y;
422
423 x = timestamp - g->cpu_timestamp;
424 if (x < 1)
425 x = 1;
426
427 y = (nsec_t) new_usage - g->cpu_usage;
428 g->cpu_fraction = (double) y / (double) x;
429 g->cpu_valid = true;
430 }
431
432 g->cpu_usage = (nsec_t) new_usage;
433 g->cpu_timestamp = timestamp;
434 g->cpu_iteration = iteration;
435
436 }
437
438 if (ret)
439 *ret = g;
440
441 return 0;
442 }
443
444 static int refresh_one(
445 const char *controller,
446 const char *path,
447 Hashmap *a,
448 Hashmap *b,
449 unsigned iteration,
450 unsigned depth,
451 Group **ret) {
452
453 _cleanup_closedir_ DIR *d = NULL;
454 Group *ours = NULL;
455 int r;
456
457 assert(controller);
458 assert(path);
459 assert(a);
460
461 if (depth > arg_depth)
462 return 0;
463
464 r = process(controller, path, a, b, iteration, &ours);
465 if (r < 0)
466 return r;
467
468 r = cg_enumerate_subgroups(controller, path, &d);
469 if (r == -ENOENT)
470 return 0;
471 if (r < 0)
472 return r;
473
474 for (;;) {
475 _cleanup_free_ char *fn = NULL, *p = NULL;
476 Group *child = NULL;
477
478 r = cg_read_subgroup(d, &fn);
479 if (r < 0)
480 return r;
481 if (r == 0)
482 break;
483
484 p = path_join(path, fn);
485 if (!p)
486 return -ENOMEM;
487
488 path_simplify(p);
489
490 r = refresh_one(controller, p, a, b, iteration, depth + 1, &child);
491 if (r < 0)
492 return r;
493
494 if (arg_recursive &&
495 IN_SET(arg_count, COUNT_ALL_PROCESSES, COUNT_USERSPACE_PROCESSES) &&
496 child &&
497 child->n_tasks_valid &&
498 streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
499
500 /* Recursively sum up processes */
501
502 if (ours->n_tasks_valid)
503 ours->n_tasks += child->n_tasks;
504 else {
505 ours->n_tasks = child->n_tasks;
506 ours->n_tasks_valid = true;
507 }
508 }
509 }
510
511 if (ret)
512 *ret = ours;
513
514 return 1;
515 }
516
517 static int refresh(const char *root, Hashmap *a, Hashmap *b, unsigned iteration) {
518 int r;
519
520 FOREACH_STRING(c, SYSTEMD_CGROUP_CONTROLLER, "cpu", "cpuacct", "memory", "io", "blkio", "pids") {
521 r = refresh_one(c, root, a, b, iteration, 0, NULL);
522 if (r < 0)
523 return r;
524 }
525
526 return 0;
527 }
528
529 static int group_compare(Group * const *a, Group * const *b) {
530 const Group *x = *a, *y = *b;
531 int r;
532
533 if (arg_order != ORDER_TASKS || arg_recursive) {
534 /* Let's make sure that the parent is always before
535 * the child. Except when ordering by tasks and
536 * recursive summing is off, since that is actually
537 * not accumulative for all children. */
538
539 if (path_startswith(empty_to_root(y->path), empty_to_root(x->path)))
540 return -1;
541 if (path_startswith(empty_to_root(x->path), empty_to_root(y->path)))
542 return 1;
543 }
544
545 switch (arg_order) {
546
547 case ORDER_PATH:
548 break;
549
550 case ORDER_CPU:
551 if (arg_cpu_type == CPU_PERCENT) {
552 if (x->cpu_valid && y->cpu_valid) {
553 r = CMP(y->cpu_fraction, x->cpu_fraction);
554 if (r != 0)
555 return r;
556 } else if (x->cpu_valid)
557 return -1;
558 else if (y->cpu_valid)
559 return 1;
560 } else {
561 r = CMP(y->cpu_usage, x->cpu_usage);
562 if (r != 0)
563 return r;
564 }
565
566 break;
567
568 case ORDER_TASKS:
569 if (x->n_tasks_valid && y->n_tasks_valid) {
570 r = CMP(y->n_tasks, x->n_tasks);
571 if (r != 0)
572 return r;
573 } else if (x->n_tasks_valid)
574 return -1;
575 else if (y->n_tasks_valid)
576 return 1;
577
578 break;
579
580 case ORDER_MEMORY:
581 if (x->memory_valid && y->memory_valid) {
582 r = CMP(y->memory, x->memory);
583 if (r != 0)
584 return r;
585 } else if (x->memory_valid)
586 return -1;
587 else if (y->memory_valid)
588 return 1;
589
590 break;
591
592 case ORDER_IO:
593 if (x->io_valid && y->io_valid) {
594 r = CMP(y->io_input_bps + y->io_output_bps, x->io_input_bps + x->io_output_bps);
595 if (r != 0)
596 return r;
597 } else if (x->io_valid)
598 return -1;
599 else if (y->io_valid)
600 return 1;
601 }
602
603 return path_compare(x->path, y->path);
604 }
605
606 static void display(Hashmap *a) {
607 Group *g;
608 Group **array;
609 signed path_columns;
610 unsigned rows, n = 0, maxtcpu = 0, maxtpath = 3; /* 3 for ellipsize() to work properly */
611
612 assert(a);
613
614 if (!terminal_is_dumb())
615 fputs(ANSI_HOME_CLEAR, stdout);
616
617 array = newa(Group*, hashmap_size(a));
618
619 HASHMAP_FOREACH(g, a)
620 if (g->n_tasks_valid || g->cpu_valid || g->memory_valid || g->io_valid)
621 array[n++] = g;
622
623 typesafe_qsort(array, n, group_compare);
624
625 /* Find the longest names in one run */
626 for (unsigned j = 0; j < n; j++) {
627 maxtcpu = MAX(maxtcpu,
628 strlen(MAYBE_FORMAT_TIMESPAN((usec_t) (array[j]->cpu_usage / NSEC_PER_USEC), 0)));
629 maxtpath = MAX(maxtpath,
630 strlen(array[j]->path));
631 }
632
633 rows = lines();
634 if (rows <= 10)
635 rows = 10;
636
637 if (on_tty()) {
638 const char *on, *off;
639 int cpu_len = arg_cpu_type == CPU_PERCENT ? 6 : maxtcpu;
640
641 path_columns = columns() - 36 - cpu_len;
642 if (path_columns < 10)
643 path_columns = 10;
644
645 on = ansi_highlight_underline();
646 off = ansi_underline();
647
648 printf("%s%s%-*s%s %s%7s%s %s%*s%s %s%8s%s %s%8s%s %s%8s%s%s\n",
649 ansi_underline(),
650 arg_order == ORDER_PATH ? on : "", path_columns, "CGroup",
651 arg_order == ORDER_PATH ? off : "",
652 arg_order == ORDER_TASKS ? on : "",
653 arg_count == COUNT_PIDS ? "Tasks" : arg_count == COUNT_USERSPACE_PROCESSES ? "Procs" : "Proc+",
654 arg_order == ORDER_TASKS ? off : "",
655 arg_order == ORDER_CPU ? on : "",
656 cpu_len,
657 arg_cpu_type == CPU_PERCENT ? "%CPU" : "CPU Time",
658 arg_order == ORDER_CPU ? off : "",
659 arg_order == ORDER_MEMORY ? on : "", "Memory",
660 arg_order == ORDER_MEMORY ? off : "",
661 arg_order == ORDER_IO ? on : "", "Input/s",
662 arg_order == ORDER_IO ? off : "",
663 arg_order == ORDER_IO ? on : "", "Output/s",
664 arg_order == ORDER_IO ? off : "",
665 ansi_normal());
666 } else
667 path_columns = maxtpath;
668
669 for (unsigned j = 0; j < n; j++) {
670 _cleanup_free_ char *ellipsized = NULL;
671 const char *path;
672
673 if (on_tty() && j + 6 > rows)
674 break;
675
676 g = array[j];
677
678 path = empty_to_root(g->path);
679 ellipsized = ellipsize(path, path_columns, 33);
680 printf("%-*s", path_columns, ellipsized ?: path);
681
682 if (g->n_tasks_valid)
683 printf(" %7" PRIu64, g->n_tasks);
684 else
685 fputs(" -", stdout);
686
687 if (arg_cpu_type == CPU_PERCENT) {
688 if (g->cpu_valid)
689 printf(" %6.1f", g->cpu_fraction*100);
690 else
691 fputs(" -", stdout);
692 } else
693 printf(" %*s",
694 (int) maxtcpu,
695 MAYBE_FORMAT_TIMESPAN((usec_t) (g->cpu_usage / NSEC_PER_USEC), 0));
696
697 printf(" %8s", MAYBE_FORMAT_BYTES(g->memory_valid, g->memory));
698 printf(" %8s", MAYBE_FORMAT_BYTES(g->io_valid, g->io_input_bps));
699 printf(" %8s", MAYBE_FORMAT_BYTES(g->io_valid, g->io_output_bps));
700
701 putchar('\n');
702 }
703 }
704
705 static int help(void) {
706 _cleanup_free_ char *link = NULL;
707 int r;
708
709 r = terminal_urlify_man("systemd-cgtop", "1", &link);
710 if (r < 0)
711 return log_oom();
712
713 printf("%s [OPTIONS...] [CGROUP]\n\n"
714 "Show top control groups by their resource usage.\n\n"
715 " -h --help Show this help\n"
716 " --version Show package version\n"
717 " -p --order=path Order by path\n"
718 " -t --order=tasks Order by number of tasks/processes\n"
719 " -c --order=cpu Order by CPU load (default)\n"
720 " -m --order=memory Order by memory load\n"
721 " -i --order=io Order by IO load\n"
722 " -r --raw Provide raw (not human-readable) numbers\n"
723 " --cpu=percentage Show CPU usage as percentage (default)\n"
724 " --cpu=time Show CPU usage as time\n"
725 " -P Count userspace processes instead of tasks (excl. kernel)\n"
726 " -k Count all processes instead of tasks (incl. kernel)\n"
727 " --recursive=BOOL Sum up process count recursively\n"
728 " -d --delay=DELAY Delay between updates\n"
729 " -n --iterations=N Run for N iterations before exiting\n"
730 " -1 Shortcut for --iterations=1\n"
731 " -b --batch Run in batch mode, accepting no input\n"
732 " --depth=DEPTH Maximum traversal depth (default: %u)\n"
733 " -M --machine= Show container\n"
734 "\nSee the %s for details.\n",
735 program_invocation_short_name,
736 arg_depth,
737 link);
738
739 return 0;
740 }
741
742 static int parse_argv(int argc, char *argv[]) {
743 enum {
744 ARG_VERSION = 0x100,
745 ARG_DEPTH,
746 ARG_CPU_TYPE,
747 ARG_ORDER,
748 ARG_RECURSIVE,
749 };
750
751 static const struct option options[] = {
752 { "help", no_argument, NULL, 'h' },
753 { "version", no_argument, NULL, ARG_VERSION },
754 { "delay", required_argument, NULL, 'd' },
755 { "iterations", required_argument, NULL, 'n' },
756 { "batch", no_argument, NULL, 'b' },
757 { "raw", no_argument, NULL, 'r' },
758 { "depth", required_argument, NULL, ARG_DEPTH },
759 { "cpu", optional_argument, NULL, ARG_CPU_TYPE },
760 { "order", required_argument, NULL, ARG_ORDER },
761 { "recursive", required_argument, NULL, ARG_RECURSIVE },
762 { "machine", required_argument, NULL, 'M' },
763 {}
764 };
765
766 int c, r;
767
768 assert(argc >= 1);
769 assert(argv);
770
771 while ((c = getopt_long(argc, argv, "hptcmin:brd:kPM:1", options, NULL)) >= 0)
772
773 switch (c) {
774
775 case 'h':
776 return help();
777
778 case ARG_VERSION:
779 return version();
780
781 case ARG_CPU_TYPE:
782 if (optarg) {
783 if (streq(optarg, "time"))
784 arg_cpu_type = CPU_TIME;
785 else if (streq(optarg, "percentage"))
786 arg_cpu_type = CPU_PERCENT;
787 else
788 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
789 "Unknown argument to --cpu=: %s",
790 optarg);
791 } else
792 arg_cpu_type = CPU_TIME;
793
794 break;
795
796 case ARG_DEPTH:
797 r = safe_atou(optarg, &arg_depth);
798 if (r < 0)
799 return log_error_errno(r, "Failed to parse depth parameter '%s': %m", optarg);
800
801 break;
802
803 case 'd':
804 r = parse_sec(optarg, &arg_delay);
805 if (r < 0)
806 return log_error_errno(r, "Failed to parse delay parameter '%s': %m", optarg);
807 if (arg_delay <= 0)
808 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
809 "Invalid delay parameter '%s'",
810 optarg);
811
812 break;
813
814 case 'n':
815 r = safe_atou(optarg, &arg_iterations);
816 if (r < 0)
817 return log_error_errno(r, "Failed to parse iterations parameter '%s': %m", optarg);
818
819 break;
820
821 case '1':
822 arg_iterations = 1;
823 break;
824
825 case 'b':
826 arg_batch = true;
827 break;
828
829 case 'r':
830 arg_raw = true;
831 break;
832
833 case 'p':
834 arg_order = ORDER_PATH;
835 break;
836
837 case 't':
838 arg_order = ORDER_TASKS;
839 break;
840
841 case 'c':
842 arg_order = ORDER_CPU;
843 break;
844
845 case 'm':
846 arg_order = ORDER_MEMORY;
847 break;
848
849 case 'i':
850 arg_order = ORDER_IO;
851 break;
852
853 case ARG_ORDER:
854 if (streq(optarg, "path"))
855 arg_order = ORDER_PATH;
856 else if (streq(optarg, "tasks"))
857 arg_order = ORDER_TASKS;
858 else if (streq(optarg, "cpu"))
859 arg_order = ORDER_CPU;
860 else if (streq(optarg, "memory"))
861 arg_order = ORDER_MEMORY;
862 else if (streq(optarg, "io"))
863 arg_order = ORDER_IO;
864 else
865 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
866 "Invalid argument to --order=: %s",
867 optarg);
868 break;
869
870 case 'k':
871 arg_count = COUNT_ALL_PROCESSES;
872 break;
873
874 case 'P':
875 arg_count = COUNT_USERSPACE_PROCESSES;
876 break;
877
878 case ARG_RECURSIVE:
879 r = parse_boolean_argument("--recursive=", optarg, &arg_recursive);
880 if (r < 0)
881 return r;
882
883 arg_recursive_unset = !r;
884 break;
885
886 case 'M':
887 arg_machine = optarg;
888 break;
889
890 case '?':
891 return -EINVAL;
892
893 default:
894 assert_not_reached();
895 }
896
897 if (optind == argc - 1)
898 arg_root = argv[optind];
899 else if (optind < argc)
900 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
901 "Too many arguments.");
902
903 return 1;
904 }
905
906 static const char* counting_what(void) {
907 if (arg_count == COUNT_PIDS)
908 return "tasks";
909 else if (arg_count == COUNT_ALL_PROCESSES)
910 return "all processes (incl. kernel)";
911 else
912 return "userspace processes (excl. kernel)";
913 }
914
915 static int loop(const char *root) {
916 _cleanup_hashmap_free_ Hashmap *a = NULL, *b = NULL;
917 unsigned iteration = 0;
918 usec_t last_refresh = 0;
919 bool immediate_refresh = false;
920 int r;
921
922 a = hashmap_new(&group_hash_ops);
923 b = hashmap_new(&group_hash_ops);
924 if (!a || !b)
925 return log_oom();
926
927 for (;;) {
928 usec_t t;
929 char key;
930
931 t = now(CLOCK_MONOTONIC);
932
933 if (t >= usec_add(last_refresh, arg_delay) || immediate_refresh) {
934
935 r = refresh(root, a, b, iteration++);
936 if (r < 0)
937 return log_error_errno(r, "Failed to refresh: %m");
938
939 hashmap_clear(b);
940 SWAP_TWO(a, b);
941
942 last_refresh = t;
943 immediate_refresh = false;
944 }
945
946 display(b);
947
948 if (arg_iterations && iteration >= arg_iterations)
949 return 0;
950
951 if (!on_tty()) /* non-TTY: Empty newline as delimiter between polls */
952 fputs("\n", stdout);
953 fflush(stdout);
954
955 if (arg_batch)
956 (void) usleep_safe(usec_add(usec_sub_unsigned(last_refresh, t), arg_delay));
957 else {
958 r = read_one_char(stdin, &key, usec_add(usec_sub_unsigned(last_refresh, t), arg_delay), NULL);
959 if (r == -ETIMEDOUT)
960 continue;
961 if (r < 0)
962 return log_error_errno(r, "Couldn't read key: %m");
963 }
964
965 if (on_tty()) { /* TTY: Clear any user keystroke */
966 fputs("\r \r", stdout);
967 fflush(stdout);
968 }
969
970 if (arg_batch)
971 continue;
972
973 switch (key) {
974
975 case ' ':
976 immediate_refresh = true;
977 break;
978
979 case 'q':
980 return 0;
981
982 case 'p':
983 arg_order = ORDER_PATH;
984 break;
985
986 case 't':
987 arg_order = ORDER_TASKS;
988 break;
989
990 case 'c':
991 arg_order = ORDER_CPU;
992 break;
993
994 case 'm':
995 arg_order = ORDER_MEMORY;
996 break;
997
998 case 'i':
999 arg_order = ORDER_IO;
1000 break;
1001
1002 case '%':
1003 arg_cpu_type = arg_cpu_type == CPU_TIME ? CPU_PERCENT : CPU_TIME;
1004 break;
1005
1006 case 'k':
1007 arg_count = arg_count != COUNT_ALL_PROCESSES ? COUNT_ALL_PROCESSES : COUNT_PIDS;
1008 fprintf(stdout, "\nCounting: %s.", counting_what());
1009 fflush(stdout);
1010 sleep(1);
1011 break;
1012
1013 case 'P':
1014 arg_count = arg_count != COUNT_USERSPACE_PROCESSES ? COUNT_USERSPACE_PROCESSES : COUNT_PIDS;
1015 fprintf(stdout, "\nCounting: %s.", counting_what());
1016 fflush(stdout);
1017 sleep(1);
1018 break;
1019
1020 case 'r':
1021 if (arg_count == COUNT_PIDS)
1022 fprintf(stdout, "\n\aCannot toggle recursive counting, not available in task counting mode.");
1023 else {
1024 arg_recursive = !arg_recursive;
1025 fprintf(stdout, "\nRecursive process counting: %s", yes_no(arg_recursive));
1026 }
1027 fflush(stdout);
1028 sleep(1);
1029 break;
1030
1031 case '+':
1032 arg_delay = usec_add(arg_delay, arg_delay < USEC_PER_SEC ? USEC_PER_MSEC * 250 : USEC_PER_SEC);
1033
1034 fprintf(stdout, "\nIncreased delay to %s.", FORMAT_TIMESPAN(arg_delay, 0));
1035 fflush(stdout);
1036 sleep(1);
1037 break;
1038
1039 case '-':
1040 if (arg_delay <= USEC_PER_MSEC*500)
1041 arg_delay = USEC_PER_MSEC*250;
1042 else
1043 arg_delay = usec_sub_unsigned(arg_delay, arg_delay < USEC_PER_MSEC * 1250 ? USEC_PER_MSEC * 250 : USEC_PER_SEC);
1044
1045 fprintf(stdout, "\nDecreased delay to %s.", FORMAT_TIMESPAN(arg_delay, 0));
1046 fflush(stdout);
1047 sleep(1);
1048 break;
1049
1050 case '?':
1051 case 'h':
1052
1053 fprintf(stdout,
1054 "\t<%1$sp%2$s> By path; <%1$st%2$s> By tasks/procs; <%1$sc%2$s> By CPU; <%1$sm%2$s> By memory; <%1$si%2$s> By I/O\n"
1055 "\t<%1$s+%2$s> Inc. delay; <%1$s-%2$s> Dec. delay; <%1$s%%%2$s> Toggle time; <%1$sSPACE%2$s> Refresh\n"
1056 "\t<%1$sP%2$s> Toggle count userspace processes; <%1$sk%2$s> Toggle count all processes\n"
1057 "\t<%1$sr%2$s> Count processes recursively; <%1$sq%2$s> Quit",
1058 ansi_highlight(), ansi_normal());
1059 fflush(stdout);
1060 sleep(3);
1061 break;
1062
1063 default:
1064 if (key < ' ')
1065 fprintf(stdout, "\nUnknown key '\\x%x'. Ignoring.", (unsigned) key);
1066 else
1067 fprintf(stdout, "\nUnknown key '%c'. Ignoring.", key);
1068 fflush(stdout);
1069 sleep(1);
1070 break;
1071 }
1072 }
1073 }
1074
1075 static int run(int argc, char *argv[]) {
1076 _cleanup_free_ char *root = NULL;
1077 CGroupMask mask;
1078 int r;
1079
1080 log_setup();
1081
1082 r = parse_argv(argc, argv);
1083 if (r <= 0)
1084 return r;
1085
1086 r = cg_mask_supported(&mask);
1087 if (r < 0)
1088 return log_error_errno(r, "Failed to determine supported controllers: %m");
1089
1090 /* honor user selection unless pids controller is unavailable */
1091 PidsCount possible_count = (mask & CGROUP_MASK_PIDS) ? COUNT_PIDS : COUNT_ALL_PROCESSES;
1092 arg_count = MIN(possible_count, arg_count);
1093
1094 if (arg_recursive_unset && arg_count == COUNT_PIDS)
1095 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
1096 "Non-recursive counting is only supported when counting processes, not tasks. Use -P or -k.");
1097
1098 r = show_cgroup_get_path_and_warn(arg_machine, arg_root, &root);
1099 if (r < 0)
1100 return log_error_errno(r, "Failed to get root control group path: %m");
1101 log_debug("CGroup path: %s", root);
1102
1103 signal(SIGWINCH, columns_lines_cache_reset);
1104
1105 if (arg_iterations == UINT_MAX)
1106 arg_iterations = on_tty() ? 0 : 1;
1107
1108 return loop(root);
1109 }
1110
1111 DEFINE_MAIN_FUNCTION(run);