1 /* SPDX-License-Identifier: LGPL-2.1+ */
3 This file is part of systemd.
5 Copyright 2012 Lennart Poettering
19 #include "alloc-util.h"
20 #include "bus-error.h"
22 #include "cgroup-show.h"
23 #include "cgroup-util.h"
27 #include "parse-util.h"
28 #include "path-util.h"
29 #include "process-util.h"
30 #include "procfs-util.h"
31 #include "stdio-util.h"
33 #include "terminal-util.h"
34 #include "unit-name.h"
38 typedef struct Group
{
48 unsigned cpu_iteration
;
55 unsigned io_iteration
;
56 uint64_t io_input
, io_output
;
58 uint64_t io_input_bps
, io_output_bps
;
61 static unsigned arg_depth
= 3;
62 static unsigned arg_iterations
= (unsigned) -1;
63 static bool arg_batch
= false;
64 static bool arg_raw
= false;
65 static usec_t arg_delay
= 1*USEC_PER_SEC
;
66 static char* arg_machine
= NULL
;
67 static char* arg_root
= NULL
;
68 static bool arg_recursive
= true;
69 static bool arg_recursive_unset
= false;
73 COUNT_USERSPACE_PROCESSES
,
75 } arg_count
= COUNT_PIDS
;
83 } arg_order
= ORDER_CPU
;
88 } arg_cpu_type
= CPU_PERCENT
;
90 static void group_free(Group
*g
) {
97 static void group_hashmap_clear(Hashmap
*h
) {
98 hashmap_clear_with_destructor(h
, group_free
);
101 static void group_hashmap_free(Hashmap
*h
) {
102 group_hashmap_clear(h
);
106 static const char *maybe_format_bytes(char *buf
, size_t l
, bool is_valid
, uint64_t t
) {
110 snprintf(buf
, l
, "%" PRIu64
, t
);
113 return format_bytes(buf
, l
, t
);
116 static bool is_root_cgroup(const char *path
) {
118 /* Returns true if the specified path belongs to the root cgroup. The root cgroup is special on cgroupsv2 as it
119 * carries only very few attributes in order not to export multiple truth about system state as most
120 * information is available elsewhere in /proc anyway. We need to be able to deal with that, and need to get
121 * our data from different sources in that case.
123 * There's one extra complication in all of this, though 😣: if the path to the cgroup indicates we are in the
124 * root cgroup this might actually not be the case, because cgroup namespacing might be in effect
125 * (CLONE_NEWCGROUP). Since there's no nice way to distuingish a real cgroup root from a fake namespaced one we
126 * do an explicit container check here, under the assumption that CLONE_NEWCGROUP is generally used when
127 * container managers are used too.
129 * Note that checking for a container environment is kinda ugly, since in theory people could use cgtop from
130 * inside a container where cgroup namespacing is turned off to watch the host system. However, that's mostly a
131 * theoretic usecase, and if people actually try all they'll lose is accounting for the top-level cgroup. Which
134 if (detect_container() > 0)
137 return empty_or_root(path
);
141 const char *controller
,
155 all_unified
= cg_all_unified();
159 g
= hashmap_get(a
, path
);
161 g
= hashmap_get(b
, path
);
167 g
->path
= strdup(path
);
173 r
= hashmap_put(a
, g
->path
, g
);
179 r
= hashmap_move_one(a
, b
, path
);
183 g
->cpu_valid
= g
->memory_valid
= g
->io_valid
= g
->n_tasks_valid
= false;
187 if (streq(controller
, SYSTEMD_CGROUP_CONTROLLER
) &&
188 IN_SET(arg_count
, COUNT_ALL_PROCESSES
, COUNT_USERSPACE_PROCESSES
)) {
189 _cleanup_fclose_
FILE *f
= NULL
;
192 r
= cg_enumerate_processes(controller
, path
, &f
);
199 while (cg_read_pid(f
, &pid
) > 0) {
201 if (arg_count
== COUNT_USERSPACE_PROCESSES
&& is_kernel_thread(pid
) > 0)
208 g
->n_tasks_valid
= true;
210 } else if (streq(controller
, "pids") && arg_count
== COUNT_PIDS
) {
212 if (is_root_cgroup(path
)) {
213 r
= procfs_tasks_get_current(&g
->n_tasks
);
217 _cleanup_free_
char *p
= NULL
, *v
= NULL
;
219 r
= cg_get_path(controller
, path
, "pids.current", &p
);
223 r
= read_one_line_file(p
, &v
);
229 r
= safe_atou64(v
, &g
->n_tasks
);
235 g
->n_tasks_valid
= true;
237 } else if (STR_IN_SET(controller
, "cpu", "cpuacct")) {
238 _cleanup_free_
char *p
= NULL
, *v
= NULL
;
242 if (is_root_cgroup(path
)) {
243 r
= procfs_cpu_get_usage(&new_usage
);
246 } else if (all_unified
) {
247 _cleanup_free_
char *val
= NULL
;
249 if (!streq(controller
, "cpu"))
252 r
= cg_get_keyed_attribute("cpu", path
, "cpu.stat", STRV_MAKE("usage_usec"), &val
);
253 if (IN_SET(r
, -ENOENT
, -ENXIO
))
258 r
= safe_atou64(val
, &new_usage
);
262 new_usage
*= NSEC_PER_USEC
;
264 if (!streq(controller
, "cpuacct"))
267 r
= cg_get_path(controller
, path
, "cpuacct.usage", &p
);
271 r
= read_one_line_file(p
, &v
);
277 r
= safe_atou64(v
, &new_usage
);
282 timestamp
= now_nsec(CLOCK_MONOTONIC
);
284 if (g
->cpu_iteration
== iteration
- 1 &&
285 (nsec_t
) new_usage
> g
->cpu_usage
) {
289 x
= timestamp
- g
->cpu_timestamp
;
293 y
= (nsec_t
) new_usage
- g
->cpu_usage
;
294 g
->cpu_fraction
= (double) y
/ (double) x
;
298 g
->cpu_usage
= (nsec_t
) new_usage
;
299 g
->cpu_timestamp
= timestamp
;
300 g
->cpu_iteration
= iteration
;
302 } else if (streq(controller
, "memory")) {
304 if (is_root_cgroup(path
)) {
305 r
= procfs_memory_get_current(&g
->memory
);
309 _cleanup_free_
char *p
= NULL
, *v
= NULL
;
312 r
= cg_get_path(controller
, path
, "memory.current", &p
);
314 r
= cg_get_path(controller
, path
, "memory.usage_in_bytes", &p
);
318 r
= read_one_line_file(p
, &v
);
324 r
= safe_atou64(v
, &g
->memory
);
330 g
->memory_valid
= true;
332 } else if ((streq(controller
, "io") && all_unified
) ||
333 (streq(controller
, "blkio") && !all_unified
)) {
334 _cleanup_fclose_
FILE *f
= NULL
;
335 _cleanup_free_
char *p
= NULL
;
336 uint64_t wr
= 0, rd
= 0;
339 r
= cg_get_path(controller
, path
, all_unified
? "io.stat" : "blkio.io_service_bytes", &p
);
351 char line
[LINE_MAX
], *l
;
354 if (!fgets(line
, sizeof(line
), f
))
357 /* Trim and skip the device */
359 l
+= strcspn(l
, WHITESPACE
);
360 l
+= strspn(l
, WHITESPACE
);
363 while (!isempty(l
)) {
364 if (sscanf(l
, "rbytes=%" SCNu64
, &k
))
366 else if (sscanf(l
, "wbytes=%" SCNu64
, &k
))
369 l
+= strcspn(l
, WHITESPACE
);
370 l
+= strspn(l
, WHITESPACE
);
373 if (first_word(l
, "Read")) {
376 } else if (first_word(l
, "Write")) {
382 l
+= strspn(l
, WHITESPACE
);
383 r
= safe_atou64(l
, &k
);
391 timestamp
= now_nsec(CLOCK_MONOTONIC
);
393 if (g
->io_iteration
== iteration
- 1) {
396 x
= (uint64_t) (timestamp
- g
->io_timestamp
);
400 if (rd
> g
->io_input
)
401 yr
= rd
- g
->io_input
;
405 if (wr
> g
->io_output
)
406 yw
= wr
- g
->io_output
;
410 if (yr
> 0 || yw
> 0) {
411 g
->io_input_bps
= (yr
* 1000000000ULL) / x
;
412 g
->io_output_bps
= (yw
* 1000000000ULL) / x
;
419 g
->io_timestamp
= timestamp
;
420 g
->io_iteration
= iteration
;
429 static int refresh_one(
430 const char *controller
,
438 _cleanup_closedir_
DIR *d
= NULL
;
446 if (depth
> arg_depth
)
449 r
= process(controller
, path
, a
, b
, iteration
, &ours
);
453 r
= cg_enumerate_subgroups(controller
, path
, &d
);
460 _cleanup_free_
char *fn
= NULL
, *p
= NULL
;
463 r
= cg_read_subgroup(d
, &fn
);
469 p
= strjoin(path
, "/", fn
);
473 path_simplify(p
, false);
475 r
= refresh_one(controller
, p
, a
, b
, iteration
, depth
+ 1, &child
);
480 IN_SET(arg_count
, COUNT_ALL_PROCESSES
, COUNT_USERSPACE_PROCESSES
) &&
482 child
->n_tasks_valid
&&
483 streq(controller
, SYSTEMD_CGROUP_CONTROLLER
)) {
485 /* Recursively sum up processes */
487 if (ours
->n_tasks_valid
)
488 ours
->n_tasks
+= child
->n_tasks
;
490 ours
->n_tasks
= child
->n_tasks
;
491 ours
->n_tasks_valid
= true;
502 static int refresh(const char *root
, Hashmap
*a
, Hashmap
*b
, unsigned iteration
) {
507 r
= refresh_one(SYSTEMD_CGROUP_CONTROLLER
, root
, a
, b
, iteration
, 0, NULL
);
510 r
= refresh_one("cpu", root
, a
, b
, iteration
, 0, NULL
);
513 r
= refresh_one("cpuacct", root
, a
, b
, iteration
, 0, NULL
);
516 r
= refresh_one("memory", root
, a
, b
, iteration
, 0, NULL
);
519 r
= refresh_one("io", root
, a
, b
, iteration
, 0, NULL
);
522 r
= refresh_one("blkio", root
, a
, b
, iteration
, 0, NULL
);
525 r
= refresh_one("pids", root
, a
, b
, iteration
, 0, NULL
);
532 static int group_compare(const void*a
, const void *b
) {
533 const Group
*x
= *(Group
**)a
, *y
= *(Group
**)b
;
535 if (arg_order
!= ORDER_TASKS
|| arg_recursive
) {
536 /* Let's make sure that the parent is always before
537 * the child. Except when ordering by tasks and
538 * recursive summing is off, since that is actually
539 * not accumulative for all children. */
541 if (path_startswith(empty_to_root(y
->path
), empty_to_root(x
->path
)))
543 if (path_startswith(empty_to_root(x
->path
), empty_to_root(y
->path
)))
553 if (arg_cpu_type
== CPU_PERCENT
) {
554 if (x
->cpu_valid
&& y
->cpu_valid
) {
555 if (x
->cpu_fraction
> y
->cpu_fraction
)
557 else if (x
->cpu_fraction
< y
->cpu_fraction
)
559 } else if (x
->cpu_valid
)
561 else if (y
->cpu_valid
)
564 if (x
->cpu_usage
> y
->cpu_usage
)
566 else if (x
->cpu_usage
< y
->cpu_usage
)
573 if (x
->n_tasks_valid
&& y
->n_tasks_valid
) {
574 if (x
->n_tasks
> y
->n_tasks
)
576 else if (x
->n_tasks
< y
->n_tasks
)
578 } else if (x
->n_tasks_valid
)
580 else if (y
->n_tasks_valid
)
586 if (x
->memory_valid
&& y
->memory_valid
) {
587 if (x
->memory
> y
->memory
)
589 else if (x
->memory
< y
->memory
)
591 } else if (x
->memory_valid
)
593 else if (y
->memory_valid
)
599 if (x
->io_valid
&& y
->io_valid
) {
600 if (x
->io_input_bps
+ x
->io_output_bps
> y
->io_input_bps
+ y
->io_output_bps
)
602 else if (x
->io_input_bps
+ x
->io_output_bps
< y
->io_input_bps
+ y
->io_output_bps
)
604 } else if (x
->io_valid
)
606 else if (y
->io_valid
)
610 return path_compare(x
->path
, y
->path
);
613 static void display(Hashmap
*a
) {
618 unsigned rows
, n
= 0, j
, maxtcpu
= 0, maxtpath
= 3; /* 3 for ellipsize() to work properly */
619 char buffer
[MAX3(21, FORMAT_BYTES_MAX
, FORMAT_TIMESPAN_MAX
)];
623 if (!terminal_is_dumb())
624 fputs(ANSI_HOME_CLEAR
, stdout
);
626 array
= newa(Group
*, hashmap_size(a
));
628 HASHMAP_FOREACH(g
, a
, i
)
629 if (g
->n_tasks_valid
|| g
->cpu_valid
|| g
->memory_valid
|| g
->io_valid
)
632 qsort_safe(array
, n
, sizeof(Group
*), group_compare
);
634 /* Find the longest names in one run */
635 for (j
= 0; j
< n
; j
++) {
636 unsigned cputlen
, pathtlen
;
638 format_timespan(buffer
, sizeof(buffer
), (usec_t
) (array
[j
]->cpu_usage
/ NSEC_PER_USEC
), 0);
639 cputlen
= strlen(buffer
);
640 maxtcpu
= MAX(maxtcpu
, cputlen
);
642 pathtlen
= strlen(array
[j
]->path
);
643 maxtpath
= MAX(maxtpath
, pathtlen
);
646 if (arg_cpu_type
== CPU_PERCENT
)
647 xsprintf(buffer
, "%6s", "%CPU");
649 xsprintf(buffer
, "%*s", maxtcpu
, "CPU Time");
656 const char *on
, *off
;
658 path_columns
= columns() - 36 - strlen(buffer
);
659 if (path_columns
< 10)
662 on
= ansi_highlight_underline();
663 off
= ansi_underline();
665 printf("%s%s%-*s%s %s%7s%s %s%s%s %s%8s%s %s%8s%s %s%8s%s%s\n",
667 arg_order
== ORDER_PATH
? on
: "", path_columns
, "Control Group",
668 arg_order
== ORDER_PATH
? off
: "",
669 arg_order
== ORDER_TASKS
? on
: "", arg_count
== COUNT_PIDS
? "Tasks" : arg_count
== COUNT_USERSPACE_PROCESSES
? "Procs" : "Proc+",
670 arg_order
== ORDER_TASKS
? off
: "",
671 arg_order
== ORDER_CPU
? on
: "", buffer
,
672 arg_order
== ORDER_CPU
? off
: "",
673 arg_order
== ORDER_MEMORY
? on
: "", "Memory",
674 arg_order
== ORDER_MEMORY
? off
: "",
675 arg_order
== ORDER_IO
? on
: "", "Input/s",
676 arg_order
== ORDER_IO
? off
: "",
677 arg_order
== ORDER_IO
? on
: "", "Output/s",
678 arg_order
== ORDER_IO
? off
: "",
681 path_columns
= maxtpath
;
683 for (j
= 0; j
< n
; j
++) {
684 _cleanup_free_
char *ellipsized
= NULL
;
687 if (on_tty() && j
+ 6 > rows
)
692 path
= empty_to_root(g
->path
);
693 ellipsized
= ellipsize(path
, path_columns
, 33);
694 printf("%-*s", path_columns
, ellipsized
?: path
);
696 if (g
->n_tasks_valid
)
697 printf(" %7" PRIu64
, g
->n_tasks
);
701 if (arg_cpu_type
== CPU_PERCENT
) {
703 printf(" %6.1f", g
->cpu_fraction
*100);
707 printf(" %*s", maxtcpu
, format_timespan(buffer
, sizeof(buffer
), (usec_t
) (g
->cpu_usage
/ NSEC_PER_USEC
), 0));
709 printf(" %8s", maybe_format_bytes(buffer
, sizeof(buffer
), g
->memory_valid
, g
->memory
));
710 printf(" %8s", maybe_format_bytes(buffer
, sizeof(buffer
), g
->io_valid
, g
->io_input_bps
));
711 printf(" %8s", maybe_format_bytes(buffer
, sizeof(buffer
), g
->io_valid
, g
->io_output_bps
));
717 static void help(void) {
718 printf("%s [OPTIONS...] [CGROUP]\n\n"
719 "Show top control groups by their resource usage.\n\n"
720 " -h --help Show this help\n"
721 " --version Show package version\n"
722 " -p --order=path Order by path\n"
723 " -t --order=tasks Order by number of tasks/processes\n"
724 " -c --order=cpu Order by CPU load (default)\n"
725 " -m --order=memory Order by memory load\n"
726 " -i --order=io Order by IO load\n"
727 " -r --raw Provide raw (not human-readable) numbers\n"
728 " --cpu=percentage Show CPU usage as percentage (default)\n"
729 " --cpu=time Show CPU usage as time\n"
730 " -P Count userspace processes instead of tasks (excl. kernel)\n"
731 " -k Count all processes instead of tasks (incl. kernel)\n"
732 " --recursive=BOOL Sum up process count recursively\n"
733 " -d --delay=DELAY Delay between updates\n"
734 " -n --iterations=N Run for N iterations before exiting\n"
735 " -1 Shortcut for --iterations=1\n"
736 " -b --batch Run in batch mode, accepting no input\n"
737 " --depth=DEPTH Maximum traversal depth (default: %u)\n"
738 " -M --machine= Show container\n"
739 , program_invocation_short_name
, arg_depth
);
742 static int parse_argv(int argc
, char *argv
[]) {
752 static const struct option options
[] = {
753 { "help", no_argument
, NULL
, 'h' },
754 { "version", no_argument
, NULL
, ARG_VERSION
},
755 { "delay", required_argument
, NULL
, 'd' },
756 { "iterations", required_argument
, NULL
, 'n' },
757 { "batch", no_argument
, NULL
, 'b' },
758 { "raw", no_argument
, NULL
, 'r' },
759 { "depth", required_argument
, NULL
, ARG_DEPTH
},
760 { "cpu", optional_argument
, NULL
, ARG_CPU_TYPE
},
761 { "order", required_argument
, NULL
, ARG_ORDER
},
762 { "recursive", required_argument
, NULL
, ARG_RECURSIVE
},
763 { "machine", required_argument
, NULL
, 'M' },
772 while ((c
= getopt_long(argc
, argv
, "hptcmin:brd:kPM:1", options
, NULL
)) >= 0)
785 if (streq(optarg
, "time"))
786 arg_cpu_type
= CPU_TIME
;
787 else if (streq(optarg
, "percentage"))
788 arg_cpu_type
= CPU_PERCENT
;
790 log_error("Unknown argument to --cpu=: %s", optarg
);
794 arg_cpu_type
= CPU_TIME
;
799 r
= safe_atou(optarg
, &arg_depth
);
801 return log_error_errno(r
, "Failed to parse depth parameter: %s", optarg
);
806 r
= parse_sec(optarg
, &arg_delay
);
807 if (r
< 0 || arg_delay
<= 0) {
808 log_error("Failed to parse delay parameter: %s", optarg
);
815 r
= safe_atou(optarg
, &arg_iterations
);
817 return log_error_errno(r
, "Failed to parse iterations parameter: %s", optarg
);
834 arg_order
= ORDER_PATH
;
838 arg_order
= ORDER_TASKS
;
842 arg_order
= ORDER_CPU
;
846 arg_order
= ORDER_MEMORY
;
850 arg_order
= ORDER_IO
;
854 if (streq(optarg
, "path"))
855 arg_order
= ORDER_PATH
;
856 else if (streq(optarg
, "tasks"))
857 arg_order
= ORDER_TASKS
;
858 else if (streq(optarg
, "cpu"))
859 arg_order
= ORDER_CPU
;
860 else if (streq(optarg
, "memory"))
861 arg_order
= ORDER_MEMORY
;
862 else if (streq(optarg
, "io"))
863 arg_order
= ORDER_IO
;
865 log_error("Invalid argument to --order=: %s", optarg
);
871 arg_count
= COUNT_ALL_PROCESSES
;
875 arg_count
= COUNT_USERSPACE_PROCESSES
;
879 r
= parse_boolean(optarg
);
881 return log_error_errno(r
, "Failed to parse --recursive= argument: %s", optarg
);
884 arg_recursive_unset
= r
== 0;
888 arg_machine
= optarg
;
895 assert_not_reached("Unhandled option");
898 if (optind
== argc
- 1)
899 arg_root
= argv
[optind
];
900 else if (optind
< argc
) {
901 log_error("Too many arguments.");
908 static const char* counting_what(void) {
909 if (arg_count
== COUNT_PIDS
)
911 else if (arg_count
== COUNT_ALL_PROCESSES
)
912 return "all processes (incl. kernel)";
914 return "userspace processes (excl. kernel)";
917 int main(int argc
, char *argv
[]) {
919 Hashmap
*a
= NULL
, *b
= NULL
;
920 unsigned iteration
= 0;
921 usec_t last_refresh
= 0;
922 bool quit
= false, immediate_refresh
= false;
923 _cleanup_free_
char *root
= NULL
;
926 log_parse_environment();
929 r
= parse_argv(argc
, argv
);
933 r
= cg_mask_supported(&mask
);
935 log_error_errno(r
, "Failed to determine supported controllers: %m");
939 arg_count
= (mask
& CGROUP_MASK_PIDS
) ? COUNT_PIDS
: COUNT_USERSPACE_PROCESSES
;
941 if (arg_recursive_unset
&& arg_count
== COUNT_PIDS
) {
942 log_error("Non-recursive counting is only supported when counting processes, not tasks. Use -P or -k.");
946 r
= show_cgroup_get_path_and_warn(arg_machine
, arg_root
, &root
);
948 log_error_errno(r
, "Failed to get root control group path: %m");
951 log_debug("Cgroup path: %s", root
);
953 a
= hashmap_new(&path_hash_ops
);
954 b
= hashmap_new(&path_hash_ops
);
960 signal(SIGWINCH
, columns_lines_cache_reset
);
962 if (arg_iterations
== (unsigned) -1)
963 arg_iterations
= on_tty() ? 0 : 1;
969 char h
[FORMAT_TIMESPAN_MAX
];
971 t
= now(CLOCK_MONOTONIC
);
973 if (t
>= last_refresh
+ arg_delay
|| immediate_refresh
) {
975 r
= refresh(root
, a
, b
, iteration
++);
977 log_error_errno(r
, "Failed to refresh: %m");
981 group_hashmap_clear(b
);
988 immediate_refresh
= false;
993 if (arg_iterations
&& iteration
>= arg_iterations
)
996 if (!on_tty()) /* non-TTY: Empty newline as delimiter between polls */
1001 (void) usleep(last_refresh
+ arg_delay
- t
);
1003 r
= read_one_char(stdin
, &key
, last_refresh
+ arg_delay
- t
, NULL
);
1004 if (r
== -ETIMEDOUT
)
1007 log_error_errno(r
, "Couldn't read key: %m");
1012 if (on_tty()) { /* TTY: Clear any user keystroke */
1013 fputs("\r \r", stdout
);
1023 immediate_refresh
= true;
1031 arg_order
= ORDER_PATH
;
1035 arg_order
= ORDER_TASKS
;
1039 arg_order
= ORDER_CPU
;
1043 arg_order
= ORDER_MEMORY
;
1047 arg_order
= ORDER_IO
;
1051 arg_cpu_type
= arg_cpu_type
== CPU_TIME
? CPU_PERCENT
: CPU_TIME
;
1055 arg_count
= arg_count
!= COUNT_ALL_PROCESSES
? COUNT_ALL_PROCESSES
: COUNT_PIDS
;
1056 fprintf(stdout
, "\nCounting: %s.", counting_what());
1062 arg_count
= arg_count
!= COUNT_USERSPACE_PROCESSES
? COUNT_USERSPACE_PROCESSES
: COUNT_PIDS
;
1063 fprintf(stdout
, "\nCounting: %s.", counting_what());
1069 if (arg_count
== COUNT_PIDS
)
1070 fprintf(stdout
, "\n\aCannot toggle recursive counting, not available in task counting mode.");
1072 arg_recursive
= !arg_recursive
;
1073 fprintf(stdout
, "\nRecursive process counting: %s", yes_no(arg_recursive
));
1080 if (arg_delay
< USEC_PER_SEC
)
1081 arg_delay
+= USEC_PER_MSEC
*250;
1083 arg_delay
+= USEC_PER_SEC
;
1085 fprintf(stdout
, "\nIncreased delay to %s.", format_timespan(h
, sizeof(h
), arg_delay
, 0));
1091 if (arg_delay
<= USEC_PER_MSEC
*500)
1092 arg_delay
= USEC_PER_MSEC
*250;
1093 else if (arg_delay
< USEC_PER_MSEC
*1250)
1094 arg_delay
-= USEC_PER_MSEC
*250;
1096 arg_delay
-= USEC_PER_SEC
;
1098 fprintf(stdout
, "\nDecreased delay to %s.", format_timespan(h
, sizeof(h
), arg_delay
, 0));
1106 #define ON ANSI_HIGHLIGHT
1107 #define OFF ANSI_NORMAL
1110 "\t<" ON
"p" OFF
"> By path; <" ON
"t" OFF
"> By tasks/procs; <" ON
"c" OFF
"> By CPU; <" ON
"m" OFF
"> By memory; <" ON
"i" OFF
"> By I/O\n"
1111 "\t<" ON
"+" OFF
"> Inc. delay; <" ON
"-" OFF
"> Dec. delay; <" ON
"%%" OFF
"> Toggle time; <" ON
"SPACE" OFF
"> Refresh\n"
1112 "\t<" ON
"P" OFF
"> Toggle count userspace processes; <" ON
"k" OFF
"> Toggle count all processes\n"
1113 "\t<" ON
"r" OFF
"> Count processes recursively; <" ON
"q" OFF
"> Quit");
1120 fprintf(stdout
, "\nUnknown key '\\x%x'. Ignoring.", key
);
1122 fprintf(stdout
, "\nUnknown key '%c'. Ignoring.", key
);
1132 group_hashmap_free(a
);
1133 group_hashmap_free(b
);
1135 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;