]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - io_uring/fdinfo.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[thirdparty/kernel/linux.git] / io_uring / fdinfo.c
CommitLineData
a4ad4f74
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/proc_fs.h>
7#include <linux/seq_file.h>
8#include <linux/io_uring.h>
9
10#include <uapi/linux/io_uring.h>
11
a4ad4f74
JA
12#include "io_uring.h"
13#include "sqpoll.h"
14#include "fdinfo.h"
38513c46 15#include "cancel.h"
ad163a7e 16#include "rsrc.h"
a4ad4f74
JA
17
18#ifdef CONFIG_PROC_FS
19static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
20 const struct cred *cred)
21{
22 struct user_namespace *uns = seq_user_ns(m);
23 struct group_info *gi;
24 kernel_cap_t cap;
a4ad4f74
JA
25 int g;
26
27 seq_printf(m, "%5d\n", id);
28 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
29 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
30 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
31 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
32 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
33 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
34 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
35 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
36 seq_puts(m, "\n\tGroups:\t");
37 gi = cred->group_info;
38 for (g = 0; g < gi->ngroups; g++) {
39 seq_put_decimal_ull(m, g ? " " : "",
40 from_kgid_munged(uns, gi->gid[g]));
41 }
42 seq_puts(m, "\n\tCapEff:\t");
43 cap = cred->cap_effective;
f122a08b 44 seq_put_hex_ll(m, NULL, cap.val, 16);
a4ad4f74
JA
45 seq_putc(m, '\n');
46 return 0;
47}
48
3aaf22b6
JA
49/*
50 * Caller holds a reference to the file already, we don't need to do
51 * anything else to get an extra reference.
52 */
53__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
a4ad4f74 54{
3aaf22b6 55 struct io_ring_ctx *ctx = f->private_data;
a4ad4f74
JA
56 struct io_overflow_cqe *ocqe;
57 struct io_rings *r = ctx->rings;
3fcb9d17 58 struct rusage sq_usage;
a4ad4f74
JA
59 unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
60 unsigned int sq_head = READ_ONCE(r->sq.head);
61 unsigned int sq_tail = READ_ONCE(r->sq.tail);
62 unsigned int cq_head = READ_ONCE(r->cq.head);
63 unsigned int cq_tail = READ_ONCE(r->cq.tail);
64 unsigned int cq_shift = 0;
3b8fdd1d 65 unsigned int sq_shift = 0;
a4ad4f74 66 unsigned int sq_entries, cq_entries;
7644b1a1 67 int sq_pid = -1, sq_cpu = -1;
3fcb9d17 68 u64 sq_total_time = 0, sq_work_time = 0;
a4ad4f74 69 bool has_lock;
a4ad4f74
JA
70 unsigned int i;
71
4f731705 72 if (ctx->flags & IORING_SETUP_CQE32)
a4ad4f74 73 cq_shift = 1;
3b8fdd1d
JA
74 if (ctx->flags & IORING_SETUP_SQE128)
75 sq_shift = 1;
a4ad4f74
JA
76
77 /*
78 * we may get imprecise sqe and cqe info if uring is actively running
79 * since we get cached_sq_head and cached_cq_tail without uring_lock
80 * and sq_tail and cq_head are changed by userspace. But it's ok since
81 * we usually use these info when it is stuck.
82 */
83 seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
84 seq_printf(m, "SqHead:\t%u\n", sq_head);
85 seq_printf(m, "SqTail:\t%u\n", sq_tail);
86 seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
87 seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
88 seq_printf(m, "CqHead:\t%u\n", cq_head);
89 seq_printf(m, "CqTail:\t%u\n", cq_tail);
90 seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
3b8fdd1d 91 seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head);
a4ad4f74
JA
92 sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
93 for (i = 0; i < sq_entries; i++) {
94 unsigned int entry = i + sq_head;
a4ad4f74 95 struct io_uring_sqe *sqe;
3b8fdd1d 96 unsigned int sq_idx;
a4ad4f74 97
32f5dea0
JA
98 if (ctx->flags & IORING_SETUP_NO_SQARRAY)
99 break;
3b8fdd1d 100 sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
a4ad4f74
JA
101 if (sq_idx > sq_mask)
102 continue;
00927931 103 sqe = &ctx->sq_sqes[sq_idx << sq_shift];
3b8fdd1d
JA
104 seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, "
105 "addr:0x%llx, rw_flags:0x%x, buf_index:%d "
106 "user_data:%llu",
107 sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd,
108 sqe->flags, (unsigned long long) sqe->off,
109 (unsigned long long) sqe->addr, sqe->rw_flags,
110 sqe->buf_index, sqe->user_data);
111 if (sq_shift) {
112 u64 *sqeb = (void *) (sqe + 1);
113 int size = sizeof(struct io_uring_sqe) / sizeof(u64);
114 int j;
115
116 for (j = 0; j < size; j++) {
117 seq_printf(m, ", e%d:0x%llx", j,
118 (unsigned long long) *sqeb);
119 sqeb++;
120 }
121 }
122 seq_printf(m, "\n");
a4ad4f74
JA
123 }
124 seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
125 cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
126 for (i = 0; i < cq_entries; i++) {
127 unsigned int entry = i + cq_head;
128 struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift];
129
4f731705 130 seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x",
a4ad4f74
JA
131 entry & cq_mask, cqe->user_data, cqe->res,
132 cqe->flags);
4f731705
JA
133 if (cq_shift)
134 seq_printf(m, ", extra1:%llu, extra2:%llu\n",
135 cqe->big_cqe[0], cqe->big_cqe[1]);
136 seq_printf(m, "\n");
a4ad4f74
JA
137 }
138
139 /*
140 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
141 * since fdinfo case grabs it in the opposite direction of normal use
142 * cases. If we fail to get the lock, we just don't iterate any
143 * structures that could be going away outside the io_uring mutex.
144 */
145 has_lock = mutex_trylock(&ctx->uring_lock);
146
147 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
7644b1a1
JA
148 struct io_sq_data *sq = ctx->sq_data;
149
606559dc
GKB
150 /*
151 * sq->thread might be NULL if we raced with the sqpoll
152 * thread termination.
153 */
154 if (sq->thread) {
155 sq_pid = sq->task_pid;
156 sq_cpu = sq->sq_cpu;
157 getrusage(sq->thread, RUSAGE_SELF, &sq_usage);
158 sq_total_time = (sq_usage.ru_stime.tv_sec * 1000000
159 + sq_usage.ru_stime.tv_usec);
160 sq_work_time = sq->work_time;
161 }
a4ad4f74
JA
162 }
163
7644b1a1
JA
164 seq_printf(m, "SqThread:\t%d\n", sq_pid);
165 seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
3fcb9d17
XL
166 seq_printf(m, "SqTotalTime:\t%llu\n", sq_total_time);
167 seq_printf(m, "SqWorkTime:\t%llu\n", sq_work_time);
a4ad4f74
JA
168 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
169 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
170 struct file *f = io_file_from_index(&ctx->file_table, i);
171
172 if (f)
173 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
174 else
175 seq_printf(m, "%5u: <none>\n", i);
176 }
177 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
178 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
179 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
180 unsigned int len = buf->ubuf_end - buf->ubuf;
181
182 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
183 }
184 if (has_lock && !xa_empty(&ctx->personalities)) {
185 unsigned long index;
186 const struct cred *cred;
187
188 seq_printf(m, "Personalities:\n");
189 xa_for_each(&ctx->personalities, index, cred)
190 io_uring_show_cred(m, index, cred);
191 }
a4ad4f74
JA
192
193 seq_puts(m, "PollList:\n");
e6f89be6
PB
194 for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
195 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
ea97cbeb 196 struct io_hash_bucket *hbl = &ctx->cancel_table_locked.hbs[i];
a4ad4f74
JA
197 struct io_kiocb *req;
198
38513c46
HX
199 spin_lock(&hb->lock);
200 hlist_for_each_entry(req, &hb->list, hash_node)
a4ad4f74
JA
201 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
202 task_work_pending(req->task));
38513c46 203 spin_unlock(&hb->lock);
ea97cbeb
JA
204
205 if (!has_lock)
206 continue;
207 hlist_for_each_entry(req, &hbl->list, hash_node)
208 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
209 task_work_pending(req->task));
a4ad4f74
JA
210 }
211
ea97cbeb
JA
212 if (has_lock)
213 mutex_unlock(&ctx->uring_lock);
214
a4ad4f74 215 seq_puts(m, "CqOverflowList:\n");
38513c46 216 spin_lock(&ctx->completion_lock);
a4ad4f74
JA
217 list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
218 struct io_uring_cqe *cqe = &ocqe->cqe;
219
220 seq_printf(m, " user_data=%llu, res=%d, flags=%x\n",
221 cqe->user_data, cqe->res, cqe->flags);
222
223 }
224
225 spin_unlock(&ctx->completion_lock);
226}
a4ad4f74 227#endif