]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/nvme/target/trace.c
Merge tag 'io_uring-6.9-20240322' of git://git.kernel.dk/linux
[thirdparty/linux.git] / drivers / nvme / target / trace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVM Express target device driver tracepoints
4 * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
5 */
6
7 #include <asm/unaligned.h>
8 #include "trace.h"
9
10 static const char *nvmet_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
11 {
12 const char *ret = trace_seq_buffer_ptr(p);
13 u8 cns = cdw10[0];
14 u16 ctrlid = get_unaligned_le16(cdw10 + 2);
15
16 trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
17 trace_seq_putc(p, 0);
18
19 return ret;
20 }
21
22 static const char *nvmet_trace_admin_get_features(struct trace_seq *p,
23 u8 *cdw10)
24 {
25 const char *ret = trace_seq_buffer_ptr(p);
26 u8 fid = cdw10[0];
27 u8 sel = cdw10[1] & 0x7;
28 u32 cdw11 = get_unaligned_le32(cdw10 + 4);
29
30 trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11);
31 trace_seq_putc(p, 0);
32
33 return ret;
34 }
35
36 static const char *nvmet_trace_get_lba_status(struct trace_seq *p,
37 u8 *cdw10)
38 {
39 const char *ret = trace_seq_buffer_ptr(p);
40 u64 slba = get_unaligned_le64(cdw10);
41 u32 mndw = get_unaligned_le32(cdw10 + 8);
42 u16 rl = get_unaligned_le16(cdw10 + 12);
43 u8 atype = cdw10[15];
44
45 trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u",
46 slba, mndw, rl, atype);
47 trace_seq_putc(p, 0);
48
49 return ret;
50 }
51
52 static const char *nvmet_trace_admin_set_features(struct trace_seq *p,
53 u8 *cdw10)
54 {
55 const char *ret = trace_seq_buffer_ptr(p);
56 u8 fid = cdw10[0];
57 u8 sv = cdw10[3] & 0x8;
58 u32 cdw11 = get_unaligned_le32(cdw10 + 4);
59
60 trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11);
61 trace_seq_putc(p, 0);
62
63 return ret;
64 }
65
66 static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10)
67 {
68 const char *ret = trace_seq_buffer_ptr(p);
69 u64 slba = get_unaligned_le64(cdw10);
70 u16 length = get_unaligned_le16(cdw10 + 8);
71 u16 control = get_unaligned_le16(cdw10 + 10);
72 u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
73 u32 reftag = get_unaligned_le32(cdw10 + 16);
74
75 trace_seq_printf(p,
76 "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
77 slba, length, control, dsmgmt, reftag);
78 trace_seq_putc(p, 0);
79
80 return ret;
81 }
82
83 static const char *nvmet_trace_dsm(struct trace_seq *p, u8 *cdw10)
84 {
85 const char *ret = trace_seq_buffer_ptr(p);
86
87 trace_seq_printf(p, "nr=%u, attributes=%u",
88 get_unaligned_le32(cdw10),
89 get_unaligned_le32(cdw10 + 4));
90 trace_seq_putc(p, 0);
91
92 return ret;
93 }
94
95 static const char *nvmet_trace_common(struct trace_seq *p, u8 *cdw10)
96 {
97 const char *ret = trace_seq_buffer_ptr(p);
98
99 trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
100 trace_seq_putc(p, 0);
101
102 return ret;
103 }
104
105 const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
106 u8 opcode, u8 *cdw10)
107 {
108 switch (opcode) {
109 case nvme_admin_identify:
110 return nvmet_trace_admin_identify(p, cdw10);
111 case nvme_admin_set_features:
112 return nvmet_trace_admin_set_features(p, cdw10);
113 case nvme_admin_get_features:
114 return nvmet_trace_admin_get_features(p, cdw10);
115 case nvme_admin_get_lba_status:
116 return nvmet_trace_get_lba_status(p, cdw10);
117 default:
118 return nvmet_trace_common(p, cdw10);
119 }
120 }
121
122 const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
123 u8 opcode, u8 *cdw10)
124 {
125 switch (opcode) {
126 case nvme_cmd_read:
127 case nvme_cmd_write:
128 case nvme_cmd_write_zeroes:
129 return nvmet_trace_read_write(p, cdw10);
130 case nvme_cmd_dsm:
131 return nvmet_trace_dsm(p, cdw10);
132 default:
133 return nvmet_trace_common(p, cdw10);
134 }
135 }
136
137 static const char *nvmet_trace_fabrics_property_set(struct trace_seq *p,
138 u8 *spc)
139 {
140 const char *ret = trace_seq_buffer_ptr(p);
141 u8 attrib = spc[0];
142 u32 ofst = get_unaligned_le32(spc + 4);
143 u64 value = get_unaligned_le64(spc + 8);
144
145 trace_seq_printf(p, "attrib=%u, ofst=0x%x, value=0x%llx",
146 attrib, ofst, value);
147 trace_seq_putc(p, 0);
148 return ret;
149 }
150
151 static const char *nvmet_trace_fabrics_connect(struct trace_seq *p,
152 u8 *spc)
153 {
154 const char *ret = trace_seq_buffer_ptr(p);
155 u16 recfmt = get_unaligned_le16(spc);
156 u16 qid = get_unaligned_le16(spc + 2);
157 u16 sqsize = get_unaligned_le16(spc + 4);
158 u8 cattr = spc[6];
159 u32 kato = get_unaligned_le32(spc + 8);
160
161 trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u",
162 recfmt, qid, sqsize, cattr, kato);
163 trace_seq_putc(p, 0);
164 return ret;
165 }
166
167 static const char *nvmet_trace_fabrics_property_get(struct trace_seq *p,
168 u8 *spc)
169 {
170 const char *ret = trace_seq_buffer_ptr(p);
171 u8 attrib = spc[0];
172 u32 ofst = get_unaligned_le32(spc + 4);
173
174 trace_seq_printf(p, "attrib=%u, ofst=0x%x", attrib, ofst);
175 trace_seq_putc(p, 0);
176 return ret;
177 }
178
179 static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc)
180 {
181 const char *ret = trace_seq_buffer_ptr(p);
182
183 trace_seq_printf(p, "specific=%*ph", 24, spc);
184 trace_seq_putc(p, 0);
185 return ret;
186 }
187
188 const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p,
189 u8 fctype, u8 *spc)
190 {
191 switch (fctype) {
192 case nvme_fabrics_type_property_set:
193 return nvmet_trace_fabrics_property_set(p, spc);
194 case nvme_fabrics_type_connect:
195 return nvmet_trace_fabrics_connect(p, spc);
196 case nvme_fabrics_type_property_get:
197 return nvmet_trace_fabrics_property_get(p, spc);
198 default:
199 return nvmet_trace_fabrics_common(p, spc);
200 }
201 }
202
203 const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
204 {
205 const char *ret = trace_seq_buffer_ptr(p);
206
207 if (*name)
208 trace_seq_printf(p, "disk=%s, ", name);
209 trace_seq_putc(p, 0);
210
211 return ret;
212 }
213
214 const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id)
215 {
216 const char *ret = trace_seq_buffer_ptr(p);
217
218 /*
219 * XXX: We don't know the controller instance before executing the
220 * connect command itself because the connect command for the admin
221 * queue will not provide the cntlid which will be allocated in this
222 * command. In case of io queues, the controller instance will be
223 * mapped by the extra data of the connect command.
224 * If we can know the extra data of the connect command in this stage,
225 * we can update this print statement later.
226 */
227 if (ctrl_id)
228 trace_seq_printf(p, "%d", ctrl_id);
229 else
230 trace_seq_printf(p, "_");
231 trace_seq_putc(p, 0);
232
233 return ret;
234 }
235