1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
12 #include "alloc-util.h"
13 #include "bus-error.h"
15 #include "cgroup-show.h"
16 #include "cgroup-util.h"
20 #include "main-func.h"
21 #include "missing_sched.h"
22 #include "parse-argument.h"
23 #include "parse-util.h"
24 #include "path-util.h"
25 #include "pretty-print.h"
26 #include "process-util.h"
27 #include "procfs-util.h"
28 #include "sort-util.h"
29 #include "stdio-util.h"
31 #include "terminal-util.h"
32 #include "unit-name.h"
35 typedef struct Group
{
45 unsigned cpu_iteration
;
52 unsigned io_iteration
;
53 uint64_t io_input
, io_output
;
55 uint64_t io_input_bps
, io_output_bps
;
58 static unsigned arg_depth
= 3;
59 static unsigned arg_iterations
= UINT_MAX
;
60 static bool arg_batch
= false;
61 static bool arg_raw
= false;
62 static usec_t arg_delay
= 1*USEC_PER_SEC
;
63 static char* arg_machine
= NULL
;
64 static char* arg_root
= NULL
;
65 static bool arg_recursive
= true;
66 static bool arg_recursive_unset
= false;
70 COUNT_USERSPACE_PROCESSES
,
72 } arg_count
= COUNT_PIDS
;
80 } arg_order
= ORDER_CPU
;
85 } arg_cpu_type
= CPU_PERCENT
;
87 static Group
*group_free(Group
*g
) {
96 static const char *maybe_format_timespan(char *buf
, size_t l
, usec_t t
, usec_t accuracy
) {
98 snprintf(buf
, l
, USEC_FMT
, t
);
101 return format_timespan(buf
, l
, t
, accuracy
);
104 static const char *maybe_format_bytes(char *buf
, size_t l
, bool is_valid
, uint64_t t
) {
108 snprintf(buf
, l
, "%" PRIu64
, t
);
111 return format_bytes(buf
, l
, t
);
114 static bool is_root_cgroup(const char *path
) {
116 /* Returns true if the specified path belongs to the root cgroup. The root cgroup is special on cgroup v2 as it
117 * carries only very few attributes in order not to export multiple truth about system state as most
118 * information is available elsewhere in /proc anyway. We need to be able to deal with that, and need to get
119 * our data from different sources in that case.
121 * There's one extra complication in all of this, though 😣: if the path to the cgroup indicates we are in the
122 * root cgroup this might actually not be the case, because cgroup namespacing might be in effect
123 * (CLONE_NEWCGROUP). Since there's no nice way to distinguish a real cgroup root from a fake namespaced one we
124 * do an explicit container check here, under the assumption that CLONE_NEWCGROUP is generally used when
125 * container managers are used too.
127 * Note that checking for a container environment is kinda ugly, since in theory people could use cgtop from
128 * inside a container where cgroup namespacing is turned off to watch the host system. However, that's mostly a
129 * theoretic usecase, and if people actually try all they'll lose is accounting for the top-level cgroup. Which
132 if (detect_container() > 0)
135 return empty_or_root(path
);
139 const char *controller
,
153 all_unified
= cg_all_unified();
157 g
= hashmap_get(a
, path
);
159 g
= hashmap_get(b
, path
);
165 g
->path
= strdup(path
);
171 r
= hashmap_put(a
, g
->path
, g
);
177 r
= hashmap_move_one(a
, b
, path
);
181 g
->cpu_valid
= g
->memory_valid
= g
->io_valid
= g
->n_tasks_valid
= false;
185 if (streq(controller
, SYSTEMD_CGROUP_CONTROLLER
) &&
186 IN_SET(arg_count
, COUNT_ALL_PROCESSES
, COUNT_USERSPACE_PROCESSES
)) {
187 _cleanup_fclose_
FILE *f
= NULL
;
190 r
= cg_enumerate_processes(controller
, path
, &f
);
197 while (cg_read_pid(f
, &pid
) > 0) {
199 if (arg_count
== COUNT_USERSPACE_PROCESSES
&& is_kernel_thread(pid
) > 0)
206 g
->n_tasks_valid
= true;
208 } else if (streq(controller
, "pids") && arg_count
== COUNT_PIDS
) {
210 if (is_root_cgroup(path
)) {
211 r
= procfs_tasks_get_current(&g
->n_tasks
);
215 _cleanup_free_
char *p
= NULL
, *v
= NULL
;
217 r
= cg_get_path(controller
, path
, "pids.current", &p
);
221 r
= read_one_line_file(p
, &v
);
227 r
= safe_atou64(v
, &g
->n_tasks
);
233 g
->n_tasks_valid
= true;
235 } else if (streq(controller
, "memory")) {
237 if (is_root_cgroup(path
)) {
238 r
= procfs_memory_get_used(&g
->memory
);
242 _cleanup_free_
char *p
= NULL
, *v
= NULL
;
245 r
= cg_get_path(controller
, path
, "memory.current", &p
);
247 r
= cg_get_path(controller
, path
, "memory.usage_in_bytes", &p
);
251 r
= read_one_line_file(p
, &v
);
257 r
= safe_atou64(v
, &g
->memory
);
263 g
->memory_valid
= true;
265 } else if ((streq(controller
, "io") && all_unified
) ||
266 (streq(controller
, "blkio") && !all_unified
)) {
267 _cleanup_fclose_
FILE *f
= NULL
;
268 _cleanup_free_
char *p
= NULL
;
269 uint64_t wr
= 0, rd
= 0;
272 r
= cg_get_path(controller
, path
, all_unified
? "io.stat" : "blkio.io_service_bytes", &p
);
284 _cleanup_free_
char *line
= NULL
;
288 r
= read_line(f
, LONG_LINE_MAX
, &line
);
294 /* Trim and skip the device */
296 l
+= strcspn(l
, WHITESPACE
);
297 l
+= strspn(l
, WHITESPACE
);
300 while (!isempty(l
)) {
301 if (sscanf(l
, "rbytes=%" SCNu64
, &k
))
303 else if (sscanf(l
, "wbytes=%" SCNu64
, &k
))
306 l
+= strcspn(l
, WHITESPACE
);
307 l
+= strspn(l
, WHITESPACE
);
310 if (first_word(l
, "Read")) {
313 } else if (first_word(l
, "Write")) {
319 l
+= strspn(l
, WHITESPACE
);
320 r
= safe_atou64(l
, &k
);
328 timestamp
= now_nsec(CLOCK_MONOTONIC
);
330 if (g
->io_iteration
== iteration
- 1) {
333 x
= (uint64_t) (timestamp
- g
->io_timestamp
);
337 if (rd
> g
->io_input
)
338 yr
= rd
- g
->io_input
;
342 if (wr
> g
->io_output
)
343 yw
= wr
- g
->io_output
;
347 if (yr
> 0 || yw
> 0) {
348 g
->io_input_bps
= (yr
* 1000000000ULL) / x
;
349 g
->io_output_bps
= (yw
* 1000000000ULL) / x
;
356 g
->io_timestamp
= timestamp
;
357 g
->io_iteration
= iteration
;
358 } else if (STR_IN_SET(controller
, "cpu", "cpuacct") || cpu_accounting_is_cheap()) {
359 _cleanup_free_
char *p
= NULL
, *v
= NULL
;
363 if (is_root_cgroup(path
)) {
364 r
= procfs_cpu_get_usage(&new_usage
);
367 } else if (all_unified
) {
368 _cleanup_free_
char *val
= NULL
;
370 if (!streq(controller
, "cpu"))
373 r
= cg_get_keyed_attribute("cpu", path
, "cpu.stat", STRV_MAKE("usage_usec"), &val
);
374 if (IN_SET(r
, -ENOENT
, -ENXIO
))
379 r
= safe_atou64(val
, &new_usage
);
383 new_usage
*= NSEC_PER_USEC
;
385 if (!streq(controller
, "cpuacct"))
388 r
= cg_get_path(controller
, path
, "cpuacct.usage", &p
);
392 r
= read_one_line_file(p
, &v
);
398 r
= safe_atou64(v
, &new_usage
);
403 timestamp
= now_nsec(CLOCK_MONOTONIC
);
405 if (g
->cpu_iteration
== iteration
- 1 &&
406 (nsec_t
) new_usage
> g
->cpu_usage
) {
410 x
= timestamp
- g
->cpu_timestamp
;
414 y
= (nsec_t
) new_usage
- g
->cpu_usage
;
415 g
->cpu_fraction
= (double) y
/ (double) x
;
419 g
->cpu_usage
= (nsec_t
) new_usage
;
420 g
->cpu_timestamp
= timestamp
;
421 g
->cpu_iteration
= iteration
;
431 static int refresh_one(
432 const char *controller
,
440 _cleanup_closedir_
DIR *d
= NULL
;
448 if (depth
> arg_depth
)
451 r
= process(controller
, path
, a
, b
, iteration
, &ours
);
455 r
= cg_enumerate_subgroups(controller
, path
, &d
);
462 _cleanup_free_
char *fn
= NULL
, *p
= NULL
;
465 r
= cg_read_subgroup(d
, &fn
);
471 p
= path_join(path
, fn
);
477 r
= refresh_one(controller
, p
, a
, b
, iteration
, depth
+ 1, &child
);
482 IN_SET(arg_count
, COUNT_ALL_PROCESSES
, COUNT_USERSPACE_PROCESSES
) &&
484 child
->n_tasks_valid
&&
485 streq(controller
, SYSTEMD_CGROUP_CONTROLLER
)) {
487 /* Recursively sum up processes */
489 if (ours
->n_tasks_valid
)
490 ours
->n_tasks
+= child
->n_tasks
;
492 ours
->n_tasks
= child
->n_tasks
;
493 ours
->n_tasks_valid
= true;
504 static int refresh(const char *root
, Hashmap
*a
, Hashmap
*b
, unsigned iteration
) {
508 FOREACH_STRING(c
, SYSTEMD_CGROUP_CONTROLLER
, "cpu", "cpuacct", "memory", "io", "blkio", "pids") {
509 r
= refresh_one(c
, root
, a
, b
, iteration
, 0, NULL
);
517 static int group_compare(Group
* const *a
, Group
* const *b
) {
518 const Group
*x
= *a
, *y
= *b
;
521 if (arg_order
!= ORDER_TASKS
|| arg_recursive
) {
522 /* Let's make sure that the parent is always before
523 * the child. Except when ordering by tasks and
524 * recursive summing is off, since that is actually
525 * not accumulative for all children. */
527 if (path_startswith(empty_to_root(y
->path
), empty_to_root(x
->path
)))
529 if (path_startswith(empty_to_root(x
->path
), empty_to_root(y
->path
)))
539 if (arg_cpu_type
== CPU_PERCENT
) {
540 if (x
->cpu_valid
&& y
->cpu_valid
) {
541 r
= CMP(y
->cpu_fraction
, x
->cpu_fraction
);
544 } else if (x
->cpu_valid
)
546 else if (y
->cpu_valid
)
549 r
= CMP(y
->cpu_usage
, x
->cpu_usage
);
557 if (x
->n_tasks_valid
&& y
->n_tasks_valid
) {
558 r
= CMP(y
->n_tasks
, x
->n_tasks
);
561 } else if (x
->n_tasks_valid
)
563 else if (y
->n_tasks_valid
)
569 if (x
->memory_valid
&& y
->memory_valid
) {
570 r
= CMP(y
->memory
, x
->memory
);
573 } else if (x
->memory_valid
)
575 else if (y
->memory_valid
)
581 if (x
->io_valid
&& y
->io_valid
) {
582 r
= CMP(y
->io_input_bps
+ y
->io_output_bps
, x
->io_input_bps
+ x
->io_output_bps
);
585 } else if (x
->io_valid
)
587 else if (y
->io_valid
)
591 return path_compare(x
->path
, y
->path
);
594 static void display(Hashmap
*a
) {
598 unsigned rows
, n
= 0, j
, maxtcpu
= 0, maxtpath
= 3; /* 3 for ellipsize() to work properly */
599 char buffer
[MAX4(21U, FORMAT_BYTES_MAX
, FORMAT_TIMESPAN_MAX
, DECIMAL_STR_MAX(usec_t
))];
603 if (!terminal_is_dumb())
604 fputs(ANSI_HOME_CLEAR
, stdout
);
606 array
= newa(Group
*, hashmap_size(a
));
608 HASHMAP_FOREACH(g
, a
)
609 if (g
->n_tasks_valid
|| g
->cpu_valid
|| g
->memory_valid
|| g
->io_valid
)
612 typesafe_qsort(array
, n
, group_compare
);
614 /* Find the longest names in one run */
615 for (j
= 0; j
< n
; j
++) {
616 unsigned cputlen
, pathtlen
;
618 maybe_format_timespan(buffer
, sizeof(buffer
), (usec_t
) (array
[j
]->cpu_usage
/ NSEC_PER_USEC
), 0);
619 cputlen
= strlen(buffer
);
620 maxtcpu
= MAX(maxtcpu
, cputlen
);
622 pathtlen
= strlen(array
[j
]->path
);
623 maxtpath
= MAX(maxtpath
, pathtlen
);
626 if (arg_cpu_type
== CPU_PERCENT
)
627 xsprintf(buffer
, "%6s", "%CPU");
629 xsprintf(buffer
, "%*s", maxtcpu
, "CPU Time");
636 const char *on
, *off
;
638 path_columns
= columns() - 36 - strlen(buffer
);
639 if (path_columns
< 10)
642 on
= ansi_highlight_underline();
643 off
= ansi_underline();
645 printf("%s%s%-*s%s %s%7s%s %s%s%s %s%8s%s %s%8s%s %s%8s%s%s\n",
647 arg_order
== ORDER_PATH
? on
: "", path_columns
, "Control Group",
648 arg_order
== ORDER_PATH
? off
: "",
649 arg_order
== ORDER_TASKS
? on
: "", arg_count
== COUNT_PIDS
? "Tasks" : arg_count
== COUNT_USERSPACE_PROCESSES
? "Procs" : "Proc+",
650 arg_order
== ORDER_TASKS
? off
: "",
651 arg_order
== ORDER_CPU
? on
: "", buffer
,
652 arg_order
== ORDER_CPU
? off
: "",
653 arg_order
== ORDER_MEMORY
? on
: "", "Memory",
654 arg_order
== ORDER_MEMORY
? off
: "",
655 arg_order
== ORDER_IO
? on
: "", "Input/s",
656 arg_order
== ORDER_IO
? off
: "",
657 arg_order
== ORDER_IO
? on
: "", "Output/s",
658 arg_order
== ORDER_IO
? off
: "",
661 path_columns
= maxtpath
;
663 for (j
= 0; j
< n
; j
++) {
664 _cleanup_free_
char *ellipsized
= NULL
;
667 if (on_tty() && j
+ 6 > rows
)
672 path
= empty_to_root(g
->path
);
673 ellipsized
= ellipsize(path
, path_columns
, 33);
674 printf("%-*s", path_columns
, ellipsized
?: path
);
676 if (g
->n_tasks_valid
)
677 printf(" %7" PRIu64
, g
->n_tasks
);
681 if (arg_cpu_type
== CPU_PERCENT
) {
683 printf(" %6.1f", g
->cpu_fraction
*100);
687 printf(" %*s", maxtcpu
, maybe_format_timespan(buffer
, sizeof(buffer
), (usec_t
) (g
->cpu_usage
/ NSEC_PER_USEC
), 0));
689 printf(" %8s", maybe_format_bytes(buffer
, sizeof(buffer
), g
->memory_valid
, g
->memory
));
690 printf(" %8s", maybe_format_bytes(buffer
, sizeof(buffer
), g
->io_valid
, g
->io_input_bps
));
691 printf(" %8s", maybe_format_bytes(buffer
, sizeof(buffer
), g
->io_valid
, g
->io_output_bps
));
697 static int help(void) {
698 _cleanup_free_
char *link
= NULL
;
701 r
= terminal_urlify_man("systemd-cgtop", "1", &link
);
705 printf("%s [OPTIONS...] [CGROUP]\n\n"
706 "Show top control groups by their resource usage.\n\n"
707 " -h --help Show this help\n"
708 " --version Show package version\n"
709 " -p --order=path Order by path\n"
710 " -t --order=tasks Order by number of tasks/processes\n"
711 " -c --order=cpu Order by CPU load (default)\n"
712 " -m --order=memory Order by memory load\n"
713 " -i --order=io Order by IO load\n"
714 " -r --raw Provide raw (not human-readable) numbers\n"
715 " --cpu=percentage Show CPU usage as percentage (default)\n"
716 " --cpu=time Show CPU usage as time\n"
717 " -P Count userspace processes instead of tasks (excl. kernel)\n"
718 " -k Count all processes instead of tasks (incl. kernel)\n"
719 " --recursive=BOOL Sum up process count recursively\n"
720 " -d --delay=DELAY Delay between updates\n"
721 " -n --iterations=N Run for N iterations before exiting\n"
722 " -1 Shortcut for --iterations=1\n"
723 " -b --batch Run in batch mode, accepting no input\n"
724 " --depth=DEPTH Maximum traversal depth (default: %u)\n"
725 " -M --machine= Show container\n"
726 "\nSee the %s for details.\n",
727 program_invocation_short_name
,
734 static int parse_argv(int argc
, char *argv
[]) {
743 static const struct option options
[] = {
744 { "help", no_argument
, NULL
, 'h' },
745 { "version", no_argument
, NULL
, ARG_VERSION
},
746 { "delay", required_argument
, NULL
, 'd' },
747 { "iterations", required_argument
, NULL
, 'n' },
748 { "batch", no_argument
, NULL
, 'b' },
749 { "raw", no_argument
, NULL
, 'r' },
750 { "depth", required_argument
, NULL
, ARG_DEPTH
},
751 { "cpu", optional_argument
, NULL
, ARG_CPU_TYPE
},
752 { "order", required_argument
, NULL
, ARG_ORDER
},
753 { "recursive", required_argument
, NULL
, ARG_RECURSIVE
},
754 { "machine", required_argument
, NULL
, 'M' },
763 while ((c
= getopt_long(argc
, argv
, "hptcmin:brd:kPM:1", options
, NULL
)) >= 0)
775 if (streq(optarg
, "time"))
776 arg_cpu_type
= CPU_TIME
;
777 else if (streq(optarg
, "percentage"))
778 arg_cpu_type
= CPU_PERCENT
;
780 return log_error_errno(SYNTHETIC_ERRNO(EINVAL
),
781 "Unknown argument to --cpu=: %s",
784 arg_cpu_type
= CPU_TIME
;
789 r
= safe_atou(optarg
, &arg_depth
);
791 return log_error_errno(r
, "Failed to parse depth parameter '%s': %m", optarg
);
796 r
= parse_sec(optarg
, &arg_delay
);
798 return log_error_errno(r
, "Failed to parse delay parameter '%s': %m", optarg
);
800 return log_error_errno(SYNTHETIC_ERRNO(EINVAL
),
801 "Invalid delay parameter '%s'",
807 r
= safe_atou(optarg
, &arg_iterations
);
809 return log_error_errno(r
, "Failed to parse iterations parameter '%s': %m", optarg
);
826 arg_order
= ORDER_PATH
;
830 arg_order
= ORDER_TASKS
;
834 arg_order
= ORDER_CPU
;
838 arg_order
= ORDER_MEMORY
;
842 arg_order
= ORDER_IO
;
846 if (streq(optarg
, "path"))
847 arg_order
= ORDER_PATH
;
848 else if (streq(optarg
, "tasks"))
849 arg_order
= ORDER_TASKS
;
850 else if (streq(optarg
, "cpu"))
851 arg_order
= ORDER_CPU
;
852 else if (streq(optarg
, "memory"))
853 arg_order
= ORDER_MEMORY
;
854 else if (streq(optarg
, "io"))
855 arg_order
= ORDER_IO
;
857 return log_error_errno(SYNTHETIC_ERRNO(EINVAL
),
858 "Invalid argument to --order=: %s",
863 arg_count
= COUNT_ALL_PROCESSES
;
867 arg_count
= COUNT_USERSPACE_PROCESSES
;
871 r
= parse_boolean_argument("--recursive=", optarg
, &arg_recursive
);
875 arg_recursive_unset
= !r
;
879 arg_machine
= optarg
;
886 assert_not_reached("Unhandled option");
889 if (optind
== argc
- 1)
890 arg_root
= argv
[optind
];
891 else if (optind
< argc
)
892 return log_error_errno(SYNTHETIC_ERRNO(EINVAL
),
893 "Too many arguments.");
898 static const char* counting_what(void) {
899 if (arg_count
== COUNT_PIDS
)
901 else if (arg_count
== COUNT_ALL_PROCESSES
)
902 return "all processes (incl. kernel)";
904 return "userspace processes (excl. kernel)";
907 DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(group_hash_ops
, char, path_hash_func
, path_compare
, Group
, group_free
);
909 static int run(int argc
, char *argv
[]) {
910 _cleanup_hashmap_free_ Hashmap
*a
= NULL
, *b
= NULL
;
911 unsigned iteration
= 0;
912 usec_t last_refresh
= 0;
913 bool quit
= false, immediate_refresh
= false;
914 _cleanup_free_
char *root
= NULL
;
920 r
= parse_argv(argc
, argv
);
924 r
= cg_mask_supported(&mask
);
926 return log_error_errno(r
, "Failed to determine supported controllers: %m");
928 arg_count
= (mask
& CGROUP_MASK_PIDS
) ? COUNT_PIDS
: COUNT_USERSPACE_PROCESSES
;
930 if (arg_recursive_unset
&& arg_count
== COUNT_PIDS
)
931 return log_error_errno(SYNTHETIC_ERRNO(EINVAL
),
932 "Non-recursive counting is only supported when counting processes, not tasks. Use -P or -k.");
934 r
= show_cgroup_get_path_and_warn(arg_machine
, arg_root
, &root
);
936 return log_error_errno(r
, "Failed to get root control group path: %m");
937 log_debug("CGroup path: %s", root
);
939 a
= hashmap_new(&group_hash_ops
);
940 b
= hashmap_new(&group_hash_ops
);
944 signal(SIGWINCH
, columns_lines_cache_reset
);
946 if (arg_iterations
== UINT_MAX
)
947 arg_iterations
= on_tty() ? 0 : 1;
952 char h
[FORMAT_TIMESPAN_MAX
];
954 t
= now(CLOCK_MONOTONIC
);
956 if (t
>= usec_add(last_refresh
, arg_delay
) || immediate_refresh
) {
958 r
= refresh(root
, a
, b
, iteration
++);
960 return log_error_errno(r
, "Failed to refresh: %m");
966 immediate_refresh
= false;
971 if (arg_iterations
&& iteration
>= arg_iterations
)
974 if (!on_tty()) /* non-TTY: Empty newline as delimiter between polls */
979 (void) usleep(usec_add(usec_sub_unsigned(last_refresh
, t
), arg_delay
));
981 r
= read_one_char(stdin
, &key
, usec_add(usec_sub_unsigned(last_refresh
, t
), arg_delay
), NULL
);
985 return log_error_errno(r
, "Couldn't read key: %m");
988 if (on_tty()) { /* TTY: Clear any user keystroke */
989 fputs("\r \r", stdout
);
999 immediate_refresh
= true;
1007 arg_order
= ORDER_PATH
;
1011 arg_order
= ORDER_TASKS
;
1015 arg_order
= ORDER_CPU
;
1019 arg_order
= ORDER_MEMORY
;
1023 arg_order
= ORDER_IO
;
1027 arg_cpu_type
= arg_cpu_type
== CPU_TIME
? CPU_PERCENT
: CPU_TIME
;
1031 arg_count
= arg_count
!= COUNT_ALL_PROCESSES
? COUNT_ALL_PROCESSES
: COUNT_PIDS
;
1032 fprintf(stdout
, "\nCounting: %s.", counting_what());
1038 arg_count
= arg_count
!= COUNT_USERSPACE_PROCESSES
? COUNT_USERSPACE_PROCESSES
: COUNT_PIDS
;
1039 fprintf(stdout
, "\nCounting: %s.", counting_what());
1045 if (arg_count
== COUNT_PIDS
)
1046 fprintf(stdout
, "\n\aCannot toggle recursive counting, not available in task counting mode.");
1048 arg_recursive
= !arg_recursive
;
1049 fprintf(stdout
, "\nRecursive process counting: %s", yes_no(arg_recursive
));
1056 arg_delay
= usec_add(arg_delay
, arg_delay
< USEC_PER_SEC
? USEC_PER_MSEC
* 250 : USEC_PER_SEC
);
1058 fprintf(stdout
, "\nIncreased delay to %s.", format_timespan(h
, sizeof(h
), arg_delay
, 0));
1064 if (arg_delay
<= USEC_PER_MSEC
*500)
1065 arg_delay
= USEC_PER_MSEC
*250;
1067 arg_delay
= usec_sub_unsigned(arg_delay
, arg_delay
< USEC_PER_MSEC
* 1250 ? USEC_PER_MSEC
* 250 : USEC_PER_SEC
);
1069 fprintf(stdout
, "\nDecreased delay to %s.", format_timespan(h
, sizeof(h
), arg_delay
, 0));
1078 "\t<%1$sp%2$s> By path; <%1$st%2$s> By tasks/procs; <%1$sc%2$s> By CPU; <%1$sm%2$s> By memory; <%1$si%2$s> By I/O\n"
1079 "\t<%1$s+%2$s> Inc. delay; <%1$s-%2$s> Dec. delay; <%1$s%%%2$s> Toggle time; <%1$sSPACE%2$s> Refresh\n"
1080 "\t<%1$sP%2$s> Toggle count userspace processes; <%1$sk%2$s> Toggle count all processes\n"
1081 "\t<%1$sr%2$s> Count processes recursively; <%1$sq%2$s> Quit",
1082 ansi_highlight(), ansi_normal());
1089 fprintf(stdout
, "\nUnknown key '\\x%x'. Ignoring.", key
);
1091 fprintf(stdout
, "\nUnknown key '%c'. Ignoring.", key
);
1101 DEFINE_MAIN_FUNCTION(run
);