]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/drm_dp_mst_topology.c
Merge tag 'drm-next-2020-06-02' of git://anongit.freedesktop.org/drm/drm
[thirdparty/linux.git] / drivers / gpu / drm / drm_dp_mst_topology.c
1 /*
2 * Copyright © 2014 Red Hat
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/i2c.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
30 #include <linux/iopoll.h>
31
32 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
33 #include <linux/stacktrace.h>
34 #include <linux/sort.h>
35 #include <linux/timekeeping.h>
36 #include <linux/math64.h>
37 #endif
38
39 #include <drm/drm_atomic.h>
40 #include <drm/drm_atomic_helper.h>
41 #include <drm/drm_dp_mst_helper.h>
42 #include <drm/drm_drv.h>
43 #include <drm/drm_print.h>
44 #include <drm/drm_probe_helper.h>
45
46 #include "drm_crtc_helper_internal.h"
47 #include "drm_dp_mst_topology_internal.h"
48
49 /**
50 * DOC: dp mst helper
51 *
52 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
53 * protocol. The helpers contain a topology manager and bandwidth manager.
54 * The helpers encapsulate the sending and received of sideband msgs.
55 */
56 struct drm_dp_pending_up_req {
57 struct drm_dp_sideband_msg_hdr hdr;
58 struct drm_dp_sideband_msg_req_body msg;
59 struct list_head next;
60 };
61
62 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
63 char *buf);
64
65 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
66
67 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
68 int id,
69 struct drm_dp_payload *payload);
70
71 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
72 struct drm_dp_mst_port *port,
73 int offset, int size, u8 *bytes);
74 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
75 struct drm_dp_mst_port *port,
76 int offset, int size, u8 *bytes);
77
78 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
79 struct drm_dp_mst_branch *mstb);
80
81 static void
82 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
83 struct drm_dp_mst_branch *mstb);
84
85 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
86 struct drm_dp_mst_branch *mstb,
87 struct drm_dp_mst_port *port);
88 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
89 u8 *guid);
90
91 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
92 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
93 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
94
95 #define DBG_PREFIX "[dp_mst]"
96
97 #define DP_STR(x) [DP_ ## x] = #x
98
99 static const char *drm_dp_mst_req_type_str(u8 req_type)
100 {
101 static const char * const req_type_str[] = {
102 DP_STR(GET_MSG_TRANSACTION_VERSION),
103 DP_STR(LINK_ADDRESS),
104 DP_STR(CONNECTION_STATUS_NOTIFY),
105 DP_STR(ENUM_PATH_RESOURCES),
106 DP_STR(ALLOCATE_PAYLOAD),
107 DP_STR(QUERY_PAYLOAD),
108 DP_STR(RESOURCE_STATUS_NOTIFY),
109 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
110 DP_STR(REMOTE_DPCD_READ),
111 DP_STR(REMOTE_DPCD_WRITE),
112 DP_STR(REMOTE_I2C_READ),
113 DP_STR(REMOTE_I2C_WRITE),
114 DP_STR(POWER_UP_PHY),
115 DP_STR(POWER_DOWN_PHY),
116 DP_STR(SINK_EVENT_NOTIFY),
117 DP_STR(QUERY_STREAM_ENC_STATUS),
118 };
119
120 if (req_type >= ARRAY_SIZE(req_type_str) ||
121 !req_type_str[req_type])
122 return "unknown";
123
124 return req_type_str[req_type];
125 }
126
127 #undef DP_STR
128 #define DP_STR(x) [DP_NAK_ ## x] = #x
129
130 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
131 {
132 static const char * const nak_reason_str[] = {
133 DP_STR(WRITE_FAILURE),
134 DP_STR(INVALID_READ),
135 DP_STR(CRC_FAILURE),
136 DP_STR(BAD_PARAM),
137 DP_STR(DEFER),
138 DP_STR(LINK_FAILURE),
139 DP_STR(NO_RESOURCES),
140 DP_STR(DPCD_FAIL),
141 DP_STR(I2C_NAK),
142 DP_STR(ALLOCATE_FAIL),
143 };
144
145 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
146 !nak_reason_str[nak_reason])
147 return "unknown";
148
149 return nak_reason_str[nak_reason];
150 }
151
152 #undef DP_STR
153 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
154
155 static const char *drm_dp_mst_sideband_tx_state_str(int state)
156 {
157 static const char * const sideband_reason_str[] = {
158 DP_STR(QUEUED),
159 DP_STR(START_SEND),
160 DP_STR(SENT),
161 DP_STR(RX),
162 DP_STR(TIMEOUT),
163 };
164
165 if (state >= ARRAY_SIZE(sideband_reason_str) ||
166 !sideband_reason_str[state])
167 return "unknown";
168
169 return sideband_reason_str[state];
170 }
171
172 static int
173 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
174 {
175 int i;
176 u8 unpacked_rad[16];
177
178 for (i = 0; i < lct; i++) {
179 if (i % 2)
180 unpacked_rad[i] = rad[i / 2] >> 4;
181 else
182 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
183 }
184
185 /* TODO: Eventually add something to printk so we can format the rad
186 * like this: 1.2.3
187 */
188 return snprintf(out, len, "%*phC", lct, unpacked_rad);
189 }
190
191 /* sideband msg handling */
192 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
193 {
194 u8 bitmask = 0x80;
195 u8 bitshift = 7;
196 u8 array_index = 0;
197 int number_of_bits = num_nibbles * 4;
198 u8 remainder = 0;
199
200 while (number_of_bits != 0) {
201 number_of_bits--;
202 remainder <<= 1;
203 remainder |= (data[array_index] & bitmask) >> bitshift;
204 bitmask >>= 1;
205 bitshift--;
206 if (bitmask == 0) {
207 bitmask = 0x80;
208 bitshift = 7;
209 array_index++;
210 }
211 if ((remainder & 0x10) == 0x10)
212 remainder ^= 0x13;
213 }
214
215 number_of_bits = 4;
216 while (number_of_bits != 0) {
217 number_of_bits--;
218 remainder <<= 1;
219 if ((remainder & 0x10) != 0)
220 remainder ^= 0x13;
221 }
222
223 return remainder;
224 }
225
226 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
227 {
228 u8 bitmask = 0x80;
229 u8 bitshift = 7;
230 u8 array_index = 0;
231 int number_of_bits = number_of_bytes * 8;
232 u16 remainder = 0;
233
234 while (number_of_bits != 0) {
235 number_of_bits--;
236 remainder <<= 1;
237 remainder |= (data[array_index] & bitmask) >> bitshift;
238 bitmask >>= 1;
239 bitshift--;
240 if (bitmask == 0) {
241 bitmask = 0x80;
242 bitshift = 7;
243 array_index++;
244 }
245 if ((remainder & 0x100) == 0x100)
246 remainder ^= 0xd5;
247 }
248
249 number_of_bits = 8;
250 while (number_of_bits != 0) {
251 number_of_bits--;
252 remainder <<= 1;
253 if ((remainder & 0x100) != 0)
254 remainder ^= 0xd5;
255 }
256
257 return remainder & 0xff;
258 }
259 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
260 {
261 u8 size = 3;
262 size += (hdr->lct / 2);
263 return size;
264 }
265
266 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
267 u8 *buf, int *len)
268 {
269 int idx = 0;
270 int i;
271 u8 crc4;
272 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
273 for (i = 0; i < (hdr->lct / 2); i++)
274 buf[idx++] = hdr->rad[i];
275 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
276 (hdr->msg_len & 0x3f);
277 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
278
279 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
280 buf[idx - 1] |= (crc4 & 0xf);
281
282 *len = idx;
283 }
284
285 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
286 u8 *buf, int buflen, u8 *hdrlen)
287 {
288 u8 crc4;
289 u8 len;
290 int i;
291 u8 idx;
292 if (buf[0] == 0)
293 return false;
294 len = 3;
295 len += ((buf[0] & 0xf0) >> 4) / 2;
296 if (len > buflen)
297 return false;
298 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
299
300 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
301 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
302 return false;
303 }
304
305 hdr->lct = (buf[0] & 0xf0) >> 4;
306 hdr->lcr = (buf[0] & 0xf);
307 idx = 1;
308 for (i = 0; i < (hdr->lct / 2); i++)
309 hdr->rad[i] = buf[idx++];
310 hdr->broadcast = (buf[idx] >> 7) & 0x1;
311 hdr->path_msg = (buf[idx] >> 6) & 0x1;
312 hdr->msg_len = buf[idx] & 0x3f;
313 idx++;
314 hdr->somt = (buf[idx] >> 7) & 0x1;
315 hdr->eomt = (buf[idx] >> 6) & 0x1;
316 hdr->seqno = (buf[idx] >> 4) & 0x1;
317 idx++;
318 *hdrlen = idx;
319 return true;
320 }
321
322 void
323 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
324 struct drm_dp_sideband_msg_tx *raw)
325 {
326 int idx = 0;
327 int i;
328 u8 *buf = raw->msg;
329 buf[idx++] = req->req_type & 0x7f;
330
331 switch (req->req_type) {
332 case DP_ENUM_PATH_RESOURCES:
333 case DP_POWER_DOWN_PHY:
334 case DP_POWER_UP_PHY:
335 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
336 idx++;
337 break;
338 case DP_ALLOCATE_PAYLOAD:
339 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
340 (req->u.allocate_payload.number_sdp_streams & 0xf);
341 idx++;
342 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
343 idx++;
344 buf[idx] = (req->u.allocate_payload.pbn >> 8);
345 idx++;
346 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
347 idx++;
348 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
349 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
350 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
351 idx++;
352 }
353 if (req->u.allocate_payload.number_sdp_streams & 1) {
354 i = req->u.allocate_payload.number_sdp_streams - 1;
355 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
356 idx++;
357 }
358 break;
359 case DP_QUERY_PAYLOAD:
360 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
361 idx++;
362 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
363 idx++;
364 break;
365 case DP_REMOTE_DPCD_READ:
366 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
367 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
368 idx++;
369 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
370 idx++;
371 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
372 idx++;
373 buf[idx] = (req->u.dpcd_read.num_bytes);
374 idx++;
375 break;
376
377 case DP_REMOTE_DPCD_WRITE:
378 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
379 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
380 idx++;
381 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
382 idx++;
383 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
384 idx++;
385 buf[idx] = (req->u.dpcd_write.num_bytes);
386 idx++;
387 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
388 idx += req->u.dpcd_write.num_bytes;
389 break;
390 case DP_REMOTE_I2C_READ:
391 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
392 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
393 idx++;
394 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
395 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
396 idx++;
397 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
398 idx++;
399 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
400 idx += req->u.i2c_read.transactions[i].num_bytes;
401
402 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
403 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
404 idx++;
405 }
406 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
407 idx++;
408 buf[idx] = (req->u.i2c_read.num_bytes_read);
409 idx++;
410 break;
411
412 case DP_REMOTE_I2C_WRITE:
413 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
414 idx++;
415 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
416 idx++;
417 buf[idx] = (req->u.i2c_write.num_bytes);
418 idx++;
419 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
420 idx += req->u.i2c_write.num_bytes;
421 break;
422 }
423 raw->cur_len = idx;
424 }
425 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
426
427 /* Decode a sideband request we've encoded, mainly used for debugging */
428 int
429 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
430 struct drm_dp_sideband_msg_req_body *req)
431 {
432 const u8 *buf = raw->msg;
433 int i, idx = 0;
434
435 req->req_type = buf[idx++] & 0x7f;
436 switch (req->req_type) {
437 case DP_ENUM_PATH_RESOURCES:
438 case DP_POWER_DOWN_PHY:
439 case DP_POWER_UP_PHY:
440 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
441 break;
442 case DP_ALLOCATE_PAYLOAD:
443 {
444 struct drm_dp_allocate_payload *a =
445 &req->u.allocate_payload;
446
447 a->number_sdp_streams = buf[idx] & 0xf;
448 a->port_number = (buf[idx] >> 4) & 0xf;
449
450 WARN_ON(buf[++idx] & 0x80);
451 a->vcpi = buf[idx] & 0x7f;
452
453 a->pbn = buf[++idx] << 8;
454 a->pbn |= buf[++idx];
455
456 idx++;
457 for (i = 0; i < a->number_sdp_streams; i++) {
458 a->sdp_stream_sink[i] =
459 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
460 }
461 }
462 break;
463 case DP_QUERY_PAYLOAD:
464 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
465 WARN_ON(buf[++idx] & 0x80);
466 req->u.query_payload.vcpi = buf[idx] & 0x7f;
467 break;
468 case DP_REMOTE_DPCD_READ:
469 {
470 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
471
472 r->port_number = (buf[idx] >> 4) & 0xf;
473
474 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
475 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
476 r->dpcd_address |= buf[++idx] & 0xff;
477
478 r->num_bytes = buf[++idx];
479 }
480 break;
481 case DP_REMOTE_DPCD_WRITE:
482 {
483 struct drm_dp_remote_dpcd_write *w =
484 &req->u.dpcd_write;
485
486 w->port_number = (buf[idx] >> 4) & 0xf;
487
488 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
489 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
490 w->dpcd_address |= buf[++idx] & 0xff;
491
492 w->num_bytes = buf[++idx];
493
494 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
495 GFP_KERNEL);
496 if (!w->bytes)
497 return -ENOMEM;
498 }
499 break;
500 case DP_REMOTE_I2C_READ:
501 {
502 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
503 struct drm_dp_remote_i2c_read_tx *tx;
504 bool failed = false;
505
506 r->num_transactions = buf[idx] & 0x3;
507 r->port_number = (buf[idx] >> 4) & 0xf;
508 for (i = 0; i < r->num_transactions; i++) {
509 tx = &r->transactions[i];
510
511 tx->i2c_dev_id = buf[++idx] & 0x7f;
512 tx->num_bytes = buf[++idx];
513 tx->bytes = kmemdup(&buf[++idx],
514 tx->num_bytes,
515 GFP_KERNEL);
516 if (!tx->bytes) {
517 failed = true;
518 break;
519 }
520 idx += tx->num_bytes;
521 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
522 tx->i2c_transaction_delay = buf[idx] & 0xf;
523 }
524
525 if (failed) {
526 for (i = 0; i < r->num_transactions; i++) {
527 tx = &r->transactions[i];
528 kfree(tx->bytes);
529 }
530 return -ENOMEM;
531 }
532
533 r->read_i2c_device_id = buf[++idx] & 0x7f;
534 r->num_bytes_read = buf[++idx];
535 }
536 break;
537 case DP_REMOTE_I2C_WRITE:
538 {
539 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
540
541 w->port_number = (buf[idx] >> 4) & 0xf;
542 w->write_i2c_device_id = buf[++idx] & 0x7f;
543 w->num_bytes = buf[++idx];
544 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
545 GFP_KERNEL);
546 if (!w->bytes)
547 return -ENOMEM;
548 }
549 break;
550 }
551
552 return 0;
553 }
554 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
555
556 void
557 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
558 int indent, struct drm_printer *printer)
559 {
560 int i;
561
562 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
563 if (req->req_type == DP_LINK_ADDRESS) {
564 /* No contents to print */
565 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
566 return;
567 }
568
569 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
570 indent++;
571
572 switch (req->req_type) {
573 case DP_ENUM_PATH_RESOURCES:
574 case DP_POWER_DOWN_PHY:
575 case DP_POWER_UP_PHY:
576 P("port=%d\n", req->u.port_num.port_number);
577 break;
578 case DP_ALLOCATE_PAYLOAD:
579 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
580 req->u.allocate_payload.port_number,
581 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
582 req->u.allocate_payload.number_sdp_streams,
583 req->u.allocate_payload.number_sdp_streams,
584 req->u.allocate_payload.sdp_stream_sink);
585 break;
586 case DP_QUERY_PAYLOAD:
587 P("port=%d vcpi=%d\n",
588 req->u.query_payload.port_number,
589 req->u.query_payload.vcpi);
590 break;
591 case DP_REMOTE_DPCD_READ:
592 P("port=%d dpcd_addr=%05x len=%d\n",
593 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
594 req->u.dpcd_read.num_bytes);
595 break;
596 case DP_REMOTE_DPCD_WRITE:
597 P("port=%d addr=%05x len=%d: %*ph\n",
598 req->u.dpcd_write.port_number,
599 req->u.dpcd_write.dpcd_address,
600 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
601 req->u.dpcd_write.bytes);
602 break;
603 case DP_REMOTE_I2C_READ:
604 P("port=%d num_tx=%d id=%d size=%d:\n",
605 req->u.i2c_read.port_number,
606 req->u.i2c_read.num_transactions,
607 req->u.i2c_read.read_i2c_device_id,
608 req->u.i2c_read.num_bytes_read);
609
610 indent++;
611 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
612 const struct drm_dp_remote_i2c_read_tx *rtx =
613 &req->u.i2c_read.transactions[i];
614
615 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
616 i, rtx->i2c_dev_id, rtx->num_bytes,
617 rtx->no_stop_bit, rtx->i2c_transaction_delay,
618 rtx->num_bytes, rtx->bytes);
619 }
620 break;
621 case DP_REMOTE_I2C_WRITE:
622 P("port=%d id=%d size=%d: %*ph\n",
623 req->u.i2c_write.port_number,
624 req->u.i2c_write.write_i2c_device_id,
625 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
626 req->u.i2c_write.bytes);
627 break;
628 default:
629 P("???\n");
630 break;
631 }
632 #undef P
633 }
634 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
635
636 static inline void
637 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
638 const struct drm_dp_sideband_msg_tx *txmsg)
639 {
640 struct drm_dp_sideband_msg_req_body req;
641 char buf[64];
642 int ret;
643 int i;
644
645 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
646 sizeof(buf));
647 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
648 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
649 drm_dp_mst_sideband_tx_state_str(txmsg->state),
650 txmsg->path_msg, buf);
651
652 ret = drm_dp_decode_sideband_req(txmsg, &req);
653 if (ret) {
654 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
655 return;
656 }
657 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
658
659 switch (req.req_type) {
660 case DP_REMOTE_DPCD_WRITE:
661 kfree(req.u.dpcd_write.bytes);
662 break;
663 case DP_REMOTE_I2C_READ:
664 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
665 kfree(req.u.i2c_read.transactions[i].bytes);
666 break;
667 case DP_REMOTE_I2C_WRITE:
668 kfree(req.u.i2c_write.bytes);
669 break;
670 }
671 }
672
673 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
674 {
675 u8 crc4;
676 crc4 = drm_dp_msg_data_crc4(msg, len);
677 msg[len] = crc4;
678 }
679
680 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
681 struct drm_dp_sideband_msg_tx *raw)
682 {
683 int idx = 0;
684 u8 *buf = raw->msg;
685
686 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
687
688 raw->cur_len = idx;
689 }
690
691 static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
692 struct drm_dp_sideband_msg_hdr *hdr,
693 u8 hdrlen)
694 {
695 /*
696 * ignore out-of-order messages or messages that are part of a
697 * failed transaction
698 */
699 if (!hdr->somt && !msg->have_somt)
700 return false;
701
702 /* get length contained in this portion */
703 msg->curchunk_idx = 0;
704 msg->curchunk_len = hdr->msg_len;
705 msg->curchunk_hdrlen = hdrlen;
706
707 /* we have already gotten an somt - don't bother parsing */
708 if (hdr->somt && msg->have_somt)
709 return false;
710
711 if (hdr->somt) {
712 memcpy(&msg->initial_hdr, hdr,
713 sizeof(struct drm_dp_sideband_msg_hdr));
714 msg->have_somt = true;
715 }
716 if (hdr->eomt)
717 msg->have_eomt = true;
718
719 return true;
720 }
721
722 /* this adds a chunk of msg to the builder to get the final msg */
723 static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
724 u8 *replybuf, u8 replybuflen)
725 {
726 u8 crc4;
727
728 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
729 msg->curchunk_idx += replybuflen;
730
731 if (msg->curchunk_idx >= msg->curchunk_len) {
732 /* do CRC */
733 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
734 if (crc4 != msg->chunk[msg->curchunk_len - 1])
735 print_hex_dump(KERN_DEBUG, "wrong crc",
736 DUMP_PREFIX_NONE, 16, 1,
737 msg->chunk, msg->curchunk_len, false);
738 /* copy chunk into bigger msg */
739 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
740 msg->curlen += msg->curchunk_len - 1;
741 }
742 return true;
743 }
744
745 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
746 struct drm_dp_sideband_msg_reply_body *repmsg)
747 {
748 int idx = 1;
749 int i;
750 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
751 idx += 16;
752 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
753 idx++;
754 if (idx > raw->curlen)
755 goto fail_len;
756 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
757 if (raw->msg[idx] & 0x80)
758 repmsg->u.link_addr.ports[i].input_port = 1;
759
760 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
761 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
762
763 idx++;
764 if (idx > raw->curlen)
765 goto fail_len;
766 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
767 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
768 if (repmsg->u.link_addr.ports[i].input_port == 0)
769 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
770 idx++;
771 if (idx > raw->curlen)
772 goto fail_len;
773 if (repmsg->u.link_addr.ports[i].input_port == 0) {
774 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
775 idx++;
776 if (idx > raw->curlen)
777 goto fail_len;
778 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
779 idx += 16;
780 if (idx > raw->curlen)
781 goto fail_len;
782 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
783 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
784 idx++;
785
786 }
787 if (idx > raw->curlen)
788 goto fail_len;
789 }
790
791 return true;
792 fail_len:
793 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
794 return false;
795 }
796
797 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
798 struct drm_dp_sideband_msg_reply_body *repmsg)
799 {
800 int idx = 1;
801 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
802 idx++;
803 if (idx > raw->curlen)
804 goto fail_len;
805 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
806 idx++;
807 if (idx > raw->curlen)
808 goto fail_len;
809
810 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
811 return true;
812 fail_len:
813 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
814 return false;
815 }
816
817 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
818 struct drm_dp_sideband_msg_reply_body *repmsg)
819 {
820 int idx = 1;
821 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
822 idx++;
823 if (idx > raw->curlen)
824 goto fail_len;
825 return true;
826 fail_len:
827 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
828 return false;
829 }
830
831 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
832 struct drm_dp_sideband_msg_reply_body *repmsg)
833 {
834 int idx = 1;
835
836 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
837 idx++;
838 if (idx > raw->curlen)
839 goto fail_len;
840 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
841 idx++;
842 /* TODO check */
843 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
844 return true;
845 fail_len:
846 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
847 return false;
848 }
849
850 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
851 struct drm_dp_sideband_msg_reply_body *repmsg)
852 {
853 int idx = 1;
854 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
855 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
856 idx++;
857 if (idx > raw->curlen)
858 goto fail_len;
859 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
860 idx += 2;
861 if (idx > raw->curlen)
862 goto fail_len;
863 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
864 idx += 2;
865 if (idx > raw->curlen)
866 goto fail_len;
867 return true;
868 fail_len:
869 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
870 return false;
871 }
872
873 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
874 struct drm_dp_sideband_msg_reply_body *repmsg)
875 {
876 int idx = 1;
877 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
878 idx++;
879 if (idx > raw->curlen)
880 goto fail_len;
881 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
882 idx++;
883 if (idx > raw->curlen)
884 goto fail_len;
885 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
886 idx += 2;
887 if (idx > raw->curlen)
888 goto fail_len;
889 return true;
890 fail_len:
891 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
892 return false;
893 }
894
895 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
896 struct drm_dp_sideband_msg_reply_body *repmsg)
897 {
898 int idx = 1;
899 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
900 idx++;
901 if (idx > raw->curlen)
902 goto fail_len;
903 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
904 idx += 2;
905 if (idx > raw->curlen)
906 goto fail_len;
907 return true;
908 fail_len:
909 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
910 return false;
911 }
912
913 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
914 struct drm_dp_sideband_msg_reply_body *repmsg)
915 {
916 int idx = 1;
917
918 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
919 idx++;
920 if (idx > raw->curlen) {
921 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
922 idx, raw->curlen);
923 return false;
924 }
925 return true;
926 }
927
928 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
929 struct drm_dp_sideband_msg_reply_body *msg)
930 {
931 memset(msg, 0, sizeof(*msg));
932 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
933 msg->req_type = (raw->msg[0] & 0x7f);
934
935 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
936 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
937 msg->u.nak.reason = raw->msg[17];
938 msg->u.nak.nak_data = raw->msg[18];
939 return false;
940 }
941
942 switch (msg->req_type) {
943 case DP_LINK_ADDRESS:
944 return drm_dp_sideband_parse_link_address(raw, msg);
945 case DP_QUERY_PAYLOAD:
946 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
947 case DP_REMOTE_DPCD_READ:
948 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
949 case DP_REMOTE_DPCD_WRITE:
950 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
951 case DP_REMOTE_I2C_READ:
952 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
953 case DP_ENUM_PATH_RESOURCES:
954 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
955 case DP_ALLOCATE_PAYLOAD:
956 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
957 case DP_POWER_DOWN_PHY:
958 case DP_POWER_UP_PHY:
959 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
960 case DP_CLEAR_PAYLOAD_ID_TABLE:
961 return true; /* since there's nothing to parse */
962 default:
963 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
964 drm_dp_mst_req_type_str(msg->req_type));
965 return false;
966 }
967 }
968
969 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
970 struct drm_dp_sideband_msg_req_body *msg)
971 {
972 int idx = 1;
973
974 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
975 idx++;
976 if (idx > raw->curlen)
977 goto fail_len;
978
979 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
980 idx += 16;
981 if (idx > raw->curlen)
982 goto fail_len;
983
984 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
985 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
986 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
987 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
988 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
989 idx++;
990 return true;
991 fail_len:
992 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
993 return false;
994 }
995
996 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
997 struct drm_dp_sideband_msg_req_body *msg)
998 {
999 int idx = 1;
1000
1001 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1002 idx++;
1003 if (idx > raw->curlen)
1004 goto fail_len;
1005
1006 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
1007 idx += 16;
1008 if (idx > raw->curlen)
1009 goto fail_len;
1010
1011 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1012 idx++;
1013 return true;
1014 fail_len:
1015 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
1016 return false;
1017 }
1018
1019 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
1020 struct drm_dp_sideband_msg_req_body *msg)
1021 {
1022 memset(msg, 0, sizeof(*msg));
1023 msg->req_type = (raw->msg[0] & 0x7f);
1024
1025 switch (msg->req_type) {
1026 case DP_CONNECTION_STATUS_NOTIFY:
1027 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1028 case DP_RESOURCE_STATUS_NOTIFY:
1029 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1030 default:
1031 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1032 drm_dp_mst_req_type_str(msg->req_type));
1033 return false;
1034 }
1035 }
1036
1037 static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1038 u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1039 {
1040 struct drm_dp_sideband_msg_req_body req;
1041
1042 req.req_type = DP_REMOTE_DPCD_WRITE;
1043 req.u.dpcd_write.port_number = port_num;
1044 req.u.dpcd_write.dpcd_address = offset;
1045 req.u.dpcd_write.num_bytes = num_bytes;
1046 req.u.dpcd_write.bytes = bytes;
1047 drm_dp_encode_sideband_req(&req, msg);
1048 }
1049
1050 static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
1051 {
1052 struct drm_dp_sideband_msg_req_body req;
1053
1054 req.req_type = DP_LINK_ADDRESS;
1055 drm_dp_encode_sideband_req(&req, msg);
1056 }
1057
1058 static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1059 {
1060 struct drm_dp_sideband_msg_req_body req;
1061
1062 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1063 drm_dp_encode_sideband_req(&req, msg);
1064 }
1065
1066 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1067 int port_num)
1068 {
1069 struct drm_dp_sideband_msg_req_body req;
1070
1071 req.req_type = DP_ENUM_PATH_RESOURCES;
1072 req.u.port_num.port_number = port_num;
1073 drm_dp_encode_sideband_req(&req, msg);
1074 msg->path_msg = true;
1075 return 0;
1076 }
1077
1078 static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1079 int port_num,
1080 u8 vcpi, uint16_t pbn,
1081 u8 number_sdp_streams,
1082 u8 *sdp_stream_sink)
1083 {
1084 struct drm_dp_sideband_msg_req_body req;
1085 memset(&req, 0, sizeof(req));
1086 req.req_type = DP_ALLOCATE_PAYLOAD;
1087 req.u.allocate_payload.port_number = port_num;
1088 req.u.allocate_payload.vcpi = vcpi;
1089 req.u.allocate_payload.pbn = pbn;
1090 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1091 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1092 number_sdp_streams);
1093 drm_dp_encode_sideband_req(&req, msg);
1094 msg->path_msg = true;
1095 }
1096
1097 static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1098 int port_num, bool power_up)
1099 {
1100 struct drm_dp_sideband_msg_req_body req;
1101
1102 if (power_up)
1103 req.req_type = DP_POWER_UP_PHY;
1104 else
1105 req.req_type = DP_POWER_DOWN_PHY;
1106
1107 req.u.port_num.port_number = port_num;
1108 drm_dp_encode_sideband_req(&req, msg);
1109 msg->path_msg = true;
1110 }
1111
1112 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1113 struct drm_dp_vcpi *vcpi)
1114 {
1115 int ret, vcpi_ret;
1116
1117 mutex_lock(&mgr->payload_lock);
1118 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1119 if (ret > mgr->max_payloads) {
1120 ret = -EINVAL;
1121 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1122 goto out_unlock;
1123 }
1124
1125 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1126 if (vcpi_ret > mgr->max_payloads) {
1127 ret = -EINVAL;
1128 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1129 goto out_unlock;
1130 }
1131
1132 set_bit(ret, &mgr->payload_mask);
1133 set_bit(vcpi_ret, &mgr->vcpi_mask);
1134 vcpi->vcpi = vcpi_ret + 1;
1135 mgr->proposed_vcpis[ret - 1] = vcpi;
1136 out_unlock:
1137 mutex_unlock(&mgr->payload_lock);
1138 return ret;
1139 }
1140
1141 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1142 int vcpi)
1143 {
1144 int i;
1145 if (vcpi == 0)
1146 return;
1147
1148 mutex_lock(&mgr->payload_lock);
1149 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1150 clear_bit(vcpi - 1, &mgr->vcpi_mask);
1151
1152 for (i = 0; i < mgr->max_payloads; i++) {
1153 if (mgr->proposed_vcpis[i] &&
1154 mgr->proposed_vcpis[i]->vcpi == vcpi) {
1155 mgr->proposed_vcpis[i] = NULL;
1156 clear_bit(i + 1, &mgr->payload_mask);
1157 }
1158 }
1159 mutex_unlock(&mgr->payload_lock);
1160 }
1161
1162 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1163 struct drm_dp_sideband_msg_tx *txmsg)
1164 {
1165 unsigned int state;
1166
1167 /*
1168 * All updates to txmsg->state are protected by mgr->qlock, and the two
1169 * cases we check here are terminal states. For those the barriers
1170 * provided by the wake_up/wait_event pair are enough.
1171 */
1172 state = READ_ONCE(txmsg->state);
1173 return (state == DRM_DP_SIDEBAND_TX_RX ||
1174 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1175 }
1176
1177 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1178 struct drm_dp_sideband_msg_tx *txmsg)
1179 {
1180 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1181 int ret;
1182
1183 ret = wait_event_timeout(mgr->tx_waitq,
1184 check_txmsg_state(mgr, txmsg),
1185 (4 * HZ));
1186 mutex_lock(&mstb->mgr->qlock);
1187 if (ret > 0) {
1188 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1189 ret = -EIO;
1190 goto out;
1191 }
1192 } else {
1193 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1194
1195 /* dump some state */
1196 ret = -EIO;
1197
1198 /* remove from q */
1199 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1200 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND)
1201 list_del(&txmsg->next);
1202 }
1203 out:
1204 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1205 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1206
1207 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1208 }
1209 mutex_unlock(&mgr->qlock);
1210
1211 drm_dp_mst_kick_tx(mgr);
1212 return ret;
1213 }
1214
1215 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1216 {
1217 struct drm_dp_mst_branch *mstb;
1218
1219 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1220 if (!mstb)
1221 return NULL;
1222
1223 mstb->lct = lct;
1224 if (lct > 1)
1225 memcpy(mstb->rad, rad, lct / 2);
1226 INIT_LIST_HEAD(&mstb->ports);
1227 kref_init(&mstb->topology_kref);
1228 kref_init(&mstb->malloc_kref);
1229 return mstb;
1230 }
1231
1232 static void drm_dp_free_mst_branch_device(struct kref *kref)
1233 {
1234 struct drm_dp_mst_branch *mstb =
1235 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1236
1237 if (mstb->port_parent)
1238 drm_dp_mst_put_port_malloc(mstb->port_parent);
1239
1240 kfree(mstb);
1241 }
1242
1243 /**
1244 * DOC: Branch device and port refcounting
1245 *
1246 * Topology refcount overview
1247 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1248 *
1249 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1250 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1251 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1252 *
1253 * Topology refcounts are not exposed to drivers, and are handled internally
1254 * by the DP MST helpers. The helpers use them in order to prevent the
1255 * in-memory topology state from being changed in the middle of critical
1256 * operations like changing the internal state of payload allocations. This
1257 * means each branch and port will be considered to be connected to the rest
1258 * of the topology until its topology refcount reaches zero. Additionally,
1259 * for ports this means that their associated &struct drm_connector will stay
1260 * registered with userspace until the port's refcount reaches 0.
1261 *
1262 * Malloc refcount overview
1263 * ~~~~~~~~~~~~~~~~~~~~~~~~
1264 *
1265 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1266 * drm_dp_mst_branch allocated even after all of its topology references have
1267 * been dropped, so that the driver or MST helpers can safely access each
1268 * branch's last known state before it was disconnected from the topology.
1269 * When the malloc refcount of a port or branch reaches 0, the memory
1270 * allocation containing the &struct drm_dp_mst_branch or &struct
1271 * drm_dp_mst_port respectively will be freed.
1272 *
1273 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1274 * to drivers. As of writing this documentation, there are no drivers that
1275 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1276 * helpers. Exposing this API to drivers in a race-free manner would take more
1277 * tweaking of the refcounting scheme, however patches are welcome provided
1278 * there is a legitimate driver usecase for this.
1279 *
1280 * Refcount relationships in a topology
1281 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1282 *
1283 * Let's take a look at why the relationship between topology and malloc
1284 * refcounts is designed the way it is.
1285 *
1286 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1287 *
1288 * An example of topology and malloc refs in a DP MST topology with two
1289 * active payloads. Topology refcount increments are indicated by solid
1290 * lines, and malloc refcount increments are indicated by dashed lines.
1291 * Each starts from the branch which incremented the refcount, and ends at
1292 * the branch to which the refcount belongs to, i.e. the arrow points the
1293 * same way as the C pointers used to reference a structure.
1294 *
1295 * As you can see in the above figure, every branch increments the topology
1296 * refcount of its children, and increments the malloc refcount of its
1297 * parent. Additionally, every payload increments the malloc refcount of its
1298 * assigned port by 1.
1299 *
1300 * So, what would happen if MSTB #3 from the above figure was unplugged from
1301 * the system, but the driver hadn't yet removed payload #2 from port #3? The
1302 * topology would start to look like the figure below.
1303 *
1304 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1305 *
1306 * Ports and branch devices which have been released from memory are
1307 * colored grey, and references which have been removed are colored red.
1308 *
1309 * Whenever a port or branch device's topology refcount reaches zero, it will
1310 * decrement the topology refcounts of all its children, the malloc refcount
1311 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1312 * #4, this means they both have been disconnected from the topology and freed
1313 * from memory. But, because payload #2 is still holding a reference to port
1314 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1315 * is still accessible from memory. This also means port #3 has not yet
1316 * decremented the malloc refcount of MSTB #3, so its &struct
1317 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1318 * malloc refcount reaches 0.
1319 *
1320 * This relationship is necessary because in order to release payload #2, we
1321 * need to be able to figure out the last relative of port #3 that's still
1322 * connected to the topology. In this case, we would travel up the topology as
1323 * shown below.
1324 *
1325 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1326 *
1327 * And finally, remove payload #2 by communicating with port #2 through
1328 * sideband transactions.
1329 */
1330
1331 /**
1332 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1333 * device
1334 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1335 *
1336 * Increments &drm_dp_mst_branch.malloc_kref. When
1337 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1338 * will be released and @mstb may no longer be used.
1339 *
1340 * See also: drm_dp_mst_put_mstb_malloc()
1341 */
1342 static void
1343 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1344 {
1345 kref_get(&mstb->malloc_kref);
1346 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1347 }
1348
1349 /**
1350 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1351 * device
1352 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1353 *
1354 * Decrements &drm_dp_mst_branch.malloc_kref. When
1355 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1356 * will be released and @mstb may no longer be used.
1357 *
1358 * See also: drm_dp_mst_get_mstb_malloc()
1359 */
1360 static void
1361 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1362 {
1363 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1364 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1365 }
1366
1367 static void drm_dp_free_mst_port(struct kref *kref)
1368 {
1369 struct drm_dp_mst_port *port =
1370 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1371
1372 drm_dp_mst_put_mstb_malloc(port->parent);
1373 kfree(port);
1374 }
1375
1376 /**
1377 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1378 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1379 *
1380 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1381 * reaches 0, the memory allocation for @port will be released and @port may
1382 * no longer be used.
1383 *
1384 * Because @port could potentially be freed at any time by the DP MST helpers
1385 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1386 * function, drivers that which to make use of &struct drm_dp_mst_port should
1387 * ensure that they grab at least one main malloc reference to their MST ports
1388 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1389 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1390 *
1391 * See also: drm_dp_mst_put_port_malloc()
1392 */
1393 void
1394 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1395 {
1396 kref_get(&port->malloc_kref);
1397 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1398 }
1399 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1400
1401 /**
1402 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1403 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1404 *
1405 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1406 * reaches 0, the memory allocation for @port will be released and @port may
1407 * no longer be used.
1408 *
1409 * See also: drm_dp_mst_get_port_malloc()
1410 */
1411 void
1412 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1413 {
1414 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1415 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1416 }
1417 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1418
1419 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1420
1421 #define STACK_DEPTH 8
1422
1423 static noinline void
1424 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1425 struct drm_dp_mst_topology_ref_history *history,
1426 enum drm_dp_mst_topology_ref_type type)
1427 {
1428 struct drm_dp_mst_topology_ref_entry *entry = NULL;
1429 depot_stack_handle_t backtrace;
1430 ulong stack_entries[STACK_DEPTH];
1431 uint n;
1432 int i;
1433
1434 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1435 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1436 if (!backtrace)
1437 return;
1438
1439 /* Try to find an existing entry for this backtrace */
1440 for (i = 0; i < history->len; i++) {
1441 if (history->entries[i].backtrace == backtrace) {
1442 entry = &history->entries[i];
1443 break;
1444 }
1445 }
1446
1447 /* Otherwise add one */
1448 if (!entry) {
1449 struct drm_dp_mst_topology_ref_entry *new;
1450 int new_len = history->len + 1;
1451
1452 new = krealloc(history->entries, sizeof(*new) * new_len,
1453 GFP_KERNEL);
1454 if (!new)
1455 return;
1456
1457 entry = &new[history->len];
1458 history->len = new_len;
1459 history->entries = new;
1460
1461 entry->backtrace = backtrace;
1462 entry->type = type;
1463 entry->count = 0;
1464 }
1465 entry->count++;
1466 entry->ts_nsec = ktime_get_ns();
1467 }
1468
1469 static int
1470 topology_ref_history_cmp(const void *a, const void *b)
1471 {
1472 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1473
1474 if (entry_a->ts_nsec > entry_b->ts_nsec)
1475 return 1;
1476 else if (entry_a->ts_nsec < entry_b->ts_nsec)
1477 return -1;
1478 else
1479 return 0;
1480 }
1481
1482 static inline const char *
1483 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1484 {
1485 if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1486 return "get";
1487 else
1488 return "put";
1489 }
1490
1491 static void
1492 __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1493 void *ptr, const char *type_str)
1494 {
1495 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1496 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1497 int i;
1498
1499 if (!buf)
1500 return;
1501
1502 if (!history->len)
1503 goto out;
1504
1505 /* First, sort the list so that it goes from oldest to newest
1506 * reference entry
1507 */
1508 sort(history->entries, history->len, sizeof(*history->entries),
1509 topology_ref_history_cmp, NULL);
1510
1511 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1512 type_str, ptr);
1513
1514 for (i = 0; i < history->len; i++) {
1515 const struct drm_dp_mst_topology_ref_entry *entry =
1516 &history->entries[i];
1517 ulong *entries;
1518 uint nr_entries;
1519 u64 ts_nsec = entry->ts_nsec;
1520 u32 rem_nsec = do_div(ts_nsec, 1000000000);
1521
1522 nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1523 stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1524
1525 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1526 entry->count,
1527 topology_ref_type_to_str(entry->type),
1528 ts_nsec, rem_nsec / 1000, buf);
1529 }
1530
1531 /* Now free the history, since this is the only time we expose it */
1532 kfree(history->entries);
1533 out:
1534 kfree(buf);
1535 }
1536
1537 static __always_inline void
1538 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1539 {
1540 __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1541 "MSTB");
1542 }
1543
1544 static __always_inline void
1545 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1546 {
1547 __dump_topology_ref_history(&port->topology_ref_history, port,
1548 "Port");
1549 }
1550
1551 static __always_inline void
1552 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1553 enum drm_dp_mst_topology_ref_type type)
1554 {
1555 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1556 }
1557
1558 static __always_inline void
1559 save_port_topology_ref(struct drm_dp_mst_port *port,
1560 enum drm_dp_mst_topology_ref_type type)
1561 {
1562 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1563 }
1564
1565 static inline void
1566 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1567 {
1568 mutex_lock(&mgr->topology_ref_history_lock);
1569 }
1570
1571 static inline void
1572 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1573 {
1574 mutex_unlock(&mgr->topology_ref_history_lock);
1575 }
1576 #else
1577 static inline void
1578 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1579 static inline void
1580 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1581 static inline void
1582 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1583 static inline void
1584 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1585 #define save_mstb_topology_ref(mstb, type)
1586 #define save_port_topology_ref(port, type)
1587 #endif
1588
1589 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1590 {
1591 struct drm_dp_mst_branch *mstb =
1592 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1593 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1594
1595 drm_dp_mst_dump_mstb_topology_history(mstb);
1596
1597 INIT_LIST_HEAD(&mstb->destroy_next);
1598
1599 /*
1600 * This can get called under mgr->mutex, so we need to perform the
1601 * actual destruction of the mstb in another worker
1602 */
1603 mutex_lock(&mgr->delayed_destroy_lock);
1604 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1605 mutex_unlock(&mgr->delayed_destroy_lock);
1606 schedule_work(&mgr->delayed_destroy_work);
1607 }
1608
1609 /**
1610 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1611 * branch device unless it's zero
1612 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1613 *
1614 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1615 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1616 * reached 0). Holding a topology reference implies that a malloc reference
1617 * will be held to @mstb as long as the user holds the topology reference.
1618 *
1619 * Care should be taken to ensure that the user has at least one malloc
1620 * reference to @mstb. If you already have a topology reference to @mstb, you
1621 * should use drm_dp_mst_topology_get_mstb() instead.
1622 *
1623 * See also:
1624 * drm_dp_mst_topology_get_mstb()
1625 * drm_dp_mst_topology_put_mstb()
1626 *
1627 * Returns:
1628 * * 1: A topology reference was grabbed successfully
1629 * * 0: @port is no longer in the topology, no reference was grabbed
1630 */
1631 static int __must_check
1632 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1633 {
1634 int ret;
1635
1636 topology_ref_history_lock(mstb->mgr);
1637 ret = kref_get_unless_zero(&mstb->topology_kref);
1638 if (ret) {
1639 DRM_DEBUG("mstb %p (%d)\n",
1640 mstb, kref_read(&mstb->topology_kref));
1641 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1642 }
1643
1644 topology_ref_history_unlock(mstb->mgr);
1645
1646 return ret;
1647 }
1648
1649 /**
1650 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1651 * branch device
1652 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1653 *
1654 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1655 * not it's already reached 0. This is only valid to use in scenarios where
1656 * you are already guaranteed to have at least one active topology reference
1657 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1658 *
1659 * See also:
1660 * drm_dp_mst_topology_try_get_mstb()
1661 * drm_dp_mst_topology_put_mstb()
1662 */
1663 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1664 {
1665 topology_ref_history_lock(mstb->mgr);
1666
1667 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1668 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1669 kref_get(&mstb->topology_kref);
1670 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1671
1672 topology_ref_history_unlock(mstb->mgr);
1673 }
1674
1675 /**
1676 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1677 * device
1678 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1679 *
1680 * Releases a topology reference from @mstb by decrementing
1681 * &drm_dp_mst_branch.topology_kref.
1682 *
1683 * See also:
1684 * drm_dp_mst_topology_try_get_mstb()
1685 * drm_dp_mst_topology_get_mstb()
1686 */
1687 static void
1688 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1689 {
1690 topology_ref_history_lock(mstb->mgr);
1691
1692 DRM_DEBUG("mstb %p (%d)\n",
1693 mstb, kref_read(&mstb->topology_kref) - 1);
1694 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1695
1696 topology_ref_history_unlock(mstb->mgr);
1697 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1698 }
1699
1700 static void drm_dp_destroy_port(struct kref *kref)
1701 {
1702 struct drm_dp_mst_port *port =
1703 container_of(kref, struct drm_dp_mst_port, topology_kref);
1704 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1705
1706 drm_dp_mst_dump_port_topology_history(port);
1707
1708 /* There's nothing that needs locking to destroy an input port yet */
1709 if (port->input) {
1710 drm_dp_mst_put_port_malloc(port);
1711 return;
1712 }
1713
1714 kfree(port->cached_edid);
1715
1716 /*
1717 * we can't destroy the connector here, as we might be holding the
1718 * mode_config.mutex from an EDID retrieval
1719 */
1720 mutex_lock(&mgr->delayed_destroy_lock);
1721 list_add(&port->next, &mgr->destroy_port_list);
1722 mutex_unlock(&mgr->delayed_destroy_lock);
1723 schedule_work(&mgr->delayed_destroy_work);
1724 }
1725
1726 /**
1727 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1728 * port unless it's zero
1729 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1730 *
1731 * Attempts to grab a topology reference to @port, if it hasn't yet been
1732 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1733 * 0). Holding a topology reference implies that a malloc reference will be
1734 * held to @port as long as the user holds the topology reference.
1735 *
1736 * Care should be taken to ensure that the user has at least one malloc
1737 * reference to @port. If you already have a topology reference to @port, you
1738 * should use drm_dp_mst_topology_get_port() instead.
1739 *
1740 * See also:
1741 * drm_dp_mst_topology_get_port()
1742 * drm_dp_mst_topology_put_port()
1743 *
1744 * Returns:
1745 * * 1: A topology reference was grabbed successfully
1746 * * 0: @port is no longer in the topology, no reference was grabbed
1747 */
1748 static int __must_check
1749 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1750 {
1751 int ret;
1752
1753 topology_ref_history_lock(port->mgr);
1754 ret = kref_get_unless_zero(&port->topology_kref);
1755 if (ret) {
1756 DRM_DEBUG("port %p (%d)\n",
1757 port, kref_read(&port->topology_kref));
1758 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1759 }
1760
1761 topology_ref_history_unlock(port->mgr);
1762 return ret;
1763 }
1764
1765 /**
1766 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1767 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1768 *
1769 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1770 * not it's already reached 0. This is only valid to use in scenarios where
1771 * you are already guaranteed to have at least one active topology reference
1772 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1773 *
1774 * See also:
1775 * drm_dp_mst_topology_try_get_port()
1776 * drm_dp_mst_topology_put_port()
1777 */
1778 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1779 {
1780 topology_ref_history_lock(port->mgr);
1781
1782 WARN_ON(kref_read(&port->topology_kref) == 0);
1783 kref_get(&port->topology_kref);
1784 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1785 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1786
1787 topology_ref_history_unlock(port->mgr);
1788 }
1789
1790 /**
1791 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1792 * @port: The &struct drm_dp_mst_port to release the topology reference from
1793 *
1794 * Releases a topology reference from @port by decrementing
1795 * &drm_dp_mst_port.topology_kref.
1796 *
1797 * See also:
1798 * drm_dp_mst_topology_try_get_port()
1799 * drm_dp_mst_topology_get_port()
1800 */
1801 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1802 {
1803 topology_ref_history_lock(port->mgr);
1804
1805 DRM_DEBUG("port %p (%d)\n",
1806 port, kref_read(&port->topology_kref) - 1);
1807 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1808
1809 topology_ref_history_unlock(port->mgr);
1810 kref_put(&port->topology_kref, drm_dp_destroy_port);
1811 }
1812
1813 static struct drm_dp_mst_branch *
1814 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1815 struct drm_dp_mst_branch *to_find)
1816 {
1817 struct drm_dp_mst_port *port;
1818 struct drm_dp_mst_branch *rmstb;
1819
1820 if (to_find == mstb)
1821 return mstb;
1822
1823 list_for_each_entry(port, &mstb->ports, next) {
1824 if (port->mstb) {
1825 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1826 port->mstb, to_find);
1827 if (rmstb)
1828 return rmstb;
1829 }
1830 }
1831 return NULL;
1832 }
1833
1834 static struct drm_dp_mst_branch *
1835 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1836 struct drm_dp_mst_branch *mstb)
1837 {
1838 struct drm_dp_mst_branch *rmstb = NULL;
1839
1840 mutex_lock(&mgr->lock);
1841 if (mgr->mst_primary) {
1842 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1843 mgr->mst_primary, mstb);
1844
1845 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1846 rmstb = NULL;
1847 }
1848 mutex_unlock(&mgr->lock);
1849 return rmstb;
1850 }
1851
1852 static struct drm_dp_mst_port *
1853 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1854 struct drm_dp_mst_port *to_find)
1855 {
1856 struct drm_dp_mst_port *port, *mport;
1857
1858 list_for_each_entry(port, &mstb->ports, next) {
1859 if (port == to_find)
1860 return port;
1861
1862 if (port->mstb) {
1863 mport = drm_dp_mst_topology_get_port_validated_locked(
1864 port->mstb, to_find);
1865 if (mport)
1866 return mport;
1867 }
1868 }
1869 return NULL;
1870 }
1871
1872 static struct drm_dp_mst_port *
1873 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1874 struct drm_dp_mst_port *port)
1875 {
1876 struct drm_dp_mst_port *rport = NULL;
1877
1878 mutex_lock(&mgr->lock);
1879 if (mgr->mst_primary) {
1880 rport = drm_dp_mst_topology_get_port_validated_locked(
1881 mgr->mst_primary, port);
1882
1883 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1884 rport = NULL;
1885 }
1886 mutex_unlock(&mgr->lock);
1887 return rport;
1888 }
1889
1890 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1891 {
1892 struct drm_dp_mst_port *port;
1893 int ret;
1894
1895 list_for_each_entry(port, &mstb->ports, next) {
1896 if (port->port_num == port_num) {
1897 ret = drm_dp_mst_topology_try_get_port(port);
1898 return ret ? port : NULL;
1899 }
1900 }
1901
1902 return NULL;
1903 }
1904
1905 /*
1906 * calculate a new RAD for this MST branch device
1907 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1908 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1909 */
1910 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1911 u8 *rad)
1912 {
1913 int parent_lct = port->parent->lct;
1914 int shift = 4;
1915 int idx = (parent_lct - 1) / 2;
1916 if (parent_lct > 1) {
1917 memcpy(rad, port->parent->rad, idx + 1);
1918 shift = (parent_lct % 2) ? 4 : 0;
1919 } else
1920 rad[0] = 0;
1921
1922 rad[idx] |= port->port_num << shift;
1923 return parent_lct + 1;
1924 }
1925
1926 static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
1927 {
1928 switch (pdt) {
1929 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1930 case DP_PEER_DEVICE_SST_SINK:
1931 return true;
1932 case DP_PEER_DEVICE_MST_BRANCHING:
1933 /* For sst branch device */
1934 if (!mcs)
1935 return true;
1936
1937 return false;
1938 }
1939 return true;
1940 }
1941
1942 static int
1943 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
1944 bool new_mcs)
1945 {
1946 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1947 struct drm_dp_mst_branch *mstb;
1948 u8 rad[8], lct;
1949 int ret = 0;
1950
1951 if (port->pdt == new_pdt && port->mcs == new_mcs)
1952 return 0;
1953
1954 /* Teardown the old pdt, if there is one */
1955 if (port->pdt != DP_PEER_DEVICE_NONE) {
1956 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
1957 /*
1958 * If the new PDT would also have an i2c bus,
1959 * don't bother with reregistering it
1960 */
1961 if (new_pdt != DP_PEER_DEVICE_NONE &&
1962 drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
1963 port->pdt = new_pdt;
1964 port->mcs = new_mcs;
1965 return 0;
1966 }
1967
1968 /* remove i2c over sideband */
1969 drm_dp_mst_unregister_i2c_bus(&port->aux);
1970 } else {
1971 mutex_lock(&mgr->lock);
1972 drm_dp_mst_topology_put_mstb(port->mstb);
1973 port->mstb = NULL;
1974 mutex_unlock(&mgr->lock);
1975 }
1976 }
1977
1978 port->pdt = new_pdt;
1979 port->mcs = new_mcs;
1980
1981 if (port->pdt != DP_PEER_DEVICE_NONE) {
1982 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
1983 /* add i2c over sideband */
1984 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1985 } else {
1986 lct = drm_dp_calculate_rad(port, rad);
1987 mstb = drm_dp_add_mst_branch_device(lct, rad);
1988 if (!mstb) {
1989 ret = -ENOMEM;
1990 DRM_ERROR("Failed to create MSTB for port %p",
1991 port);
1992 goto out;
1993 }
1994
1995 mutex_lock(&mgr->lock);
1996 port->mstb = mstb;
1997 mstb->mgr = port->mgr;
1998 mstb->port_parent = port;
1999
2000 /*
2001 * Make sure this port's memory allocation stays
2002 * around until its child MSTB releases it
2003 */
2004 drm_dp_mst_get_port_malloc(port);
2005 mutex_unlock(&mgr->lock);
2006
2007 /* And make sure we send a link address for this */
2008 ret = 1;
2009 }
2010 }
2011
2012 out:
2013 if (ret < 0)
2014 port->pdt = DP_PEER_DEVICE_NONE;
2015 return ret;
2016 }
2017
2018 /**
2019 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2020 * @aux: Fake sideband AUX CH
2021 * @offset: address of the (first) register to read
2022 * @buffer: buffer to store the register values
2023 * @size: number of bytes in @buffer
2024 *
2025 * Performs the same functionality for remote devices via
2026 * sideband messaging as drm_dp_dpcd_read() does for local
2027 * devices via actual AUX CH.
2028 *
2029 * Return: Number of bytes read, or negative error code on failure.
2030 */
2031 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2032 unsigned int offset, void *buffer, size_t size)
2033 {
2034 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2035 aux);
2036
2037 return drm_dp_send_dpcd_read(port->mgr, port,
2038 offset, size, buffer);
2039 }
2040
2041 /**
2042 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2043 * @aux: Fake sideband AUX CH
2044 * @offset: address of the (first) register to write
2045 * @buffer: buffer containing the values to write
2046 * @size: number of bytes in @buffer
2047 *
2048 * Performs the same functionality for remote devices via
2049 * sideband messaging as drm_dp_dpcd_write() does for local
2050 * devices via actual AUX CH.
2051 *
2052 * Return: number of bytes written on success, negative error code on failure.
2053 */
2054 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2055 unsigned int offset, void *buffer, size_t size)
2056 {
2057 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2058 aux);
2059
2060 return drm_dp_send_dpcd_write(port->mgr, port,
2061 offset, size, buffer);
2062 }
2063
2064 static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2065 {
2066 int ret = 0;
2067
2068 memcpy(mstb->guid, guid, 16);
2069
2070 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2071 if (mstb->port_parent) {
2072 ret = drm_dp_send_dpcd_write(mstb->mgr,
2073 mstb->port_parent,
2074 DP_GUID, 16, mstb->guid);
2075 } else {
2076 ret = drm_dp_dpcd_write(mstb->mgr->aux,
2077 DP_GUID, mstb->guid, 16);
2078 }
2079 }
2080
2081 if (ret < 16 && ret > 0)
2082 return -EPROTO;
2083
2084 return ret == 16 ? 0 : ret;
2085 }
2086
2087 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2088 int pnum,
2089 char *proppath,
2090 size_t proppath_size)
2091 {
2092 int i;
2093 char temp[8];
2094 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2095 for (i = 0; i < (mstb->lct - 1); i++) {
2096 int shift = (i % 2) ? 0 : 4;
2097 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2098 snprintf(temp, sizeof(temp), "-%d", port_num);
2099 strlcat(proppath, temp, proppath_size);
2100 }
2101 snprintf(temp, sizeof(temp), "-%d", pnum);
2102 strlcat(proppath, temp, proppath_size);
2103 }
2104
2105 /**
2106 * drm_dp_mst_connector_late_register() - Late MST connector registration
2107 * @connector: The MST connector
2108 * @port: The MST port for this connector
2109 *
2110 * Helper to register the remote aux device for this MST port. Drivers should
2111 * call this from their mst connector's late_register hook to enable MST aux
2112 * devices.
2113 *
2114 * Return: 0 on success, negative error code on failure.
2115 */
2116 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2117 struct drm_dp_mst_port *port)
2118 {
2119 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2120 port->aux.name, connector->kdev->kobj.name);
2121
2122 port->aux.dev = connector->kdev;
2123 return drm_dp_aux_register_devnode(&port->aux);
2124 }
2125 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2126
2127 /**
2128 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2129 * @connector: The MST connector
2130 * @port: The MST port for this connector
2131 *
2132 * Helper to unregister the remote aux device for this MST port, registered by
2133 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2134 * connector's early_unregister hook.
2135 */
2136 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2137 struct drm_dp_mst_port *port)
2138 {
2139 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2140 port->aux.name, connector->kdev->kobj.name);
2141 drm_dp_aux_unregister_devnode(&port->aux);
2142 }
2143 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2144
2145 static void
2146 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2147 struct drm_dp_mst_port *port)
2148 {
2149 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2150 char proppath[255];
2151 int ret;
2152
2153 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2154 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2155 if (!port->connector) {
2156 ret = -ENOMEM;
2157 goto error;
2158 }
2159
2160 if (port->pdt != DP_PEER_DEVICE_NONE &&
2161 drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2162 port->cached_edid = drm_get_edid(port->connector,
2163 &port->aux.ddc);
2164 drm_connector_set_tile_property(port->connector);
2165 }
2166
2167 drm_connector_register(port->connector);
2168 return;
2169
2170 error:
2171 DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2172 }
2173
2174 /*
2175 * Drop a topology reference, and unlink the port from the in-memory topology
2176 * layout
2177 */
2178 static void
2179 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2180 struct drm_dp_mst_port *port)
2181 {
2182 mutex_lock(&mgr->lock);
2183 port->parent->num_ports--;
2184 list_del(&port->next);
2185 mutex_unlock(&mgr->lock);
2186 drm_dp_mst_topology_put_port(port);
2187 }
2188
2189 static struct drm_dp_mst_port *
2190 drm_dp_mst_add_port(struct drm_device *dev,
2191 struct drm_dp_mst_topology_mgr *mgr,
2192 struct drm_dp_mst_branch *mstb, u8 port_number)
2193 {
2194 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2195
2196 if (!port)
2197 return NULL;
2198
2199 kref_init(&port->topology_kref);
2200 kref_init(&port->malloc_kref);
2201 port->parent = mstb;
2202 port->port_num = port_number;
2203 port->mgr = mgr;
2204 port->aux.name = "DPMST";
2205 port->aux.dev = dev->dev;
2206 port->aux.is_remote = true;
2207
2208 /* initialize the MST downstream port's AUX crc work queue */
2209 drm_dp_remote_aux_init(&port->aux);
2210
2211 /*
2212 * Make sure the memory allocation for our parent branch stays
2213 * around until our own memory allocation is released
2214 */
2215 drm_dp_mst_get_mstb_malloc(mstb);
2216
2217 return port;
2218 }
2219
2220 static int
2221 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2222 struct drm_device *dev,
2223 struct drm_dp_link_addr_reply_port *port_msg)
2224 {
2225 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2226 struct drm_dp_mst_port *port;
2227 int old_ddps = 0, ret;
2228 u8 new_pdt = DP_PEER_DEVICE_NONE;
2229 bool new_mcs = 0;
2230 bool created = false, send_link_addr = false, changed = false;
2231
2232 port = drm_dp_get_port(mstb, port_msg->port_number);
2233 if (!port) {
2234 port = drm_dp_mst_add_port(dev, mgr, mstb,
2235 port_msg->port_number);
2236 if (!port)
2237 return -ENOMEM;
2238 created = true;
2239 changed = true;
2240 } else if (!port->input && port_msg->input_port && port->connector) {
2241 /* Since port->connector can't be changed here, we create a
2242 * new port if input_port changes from 0 to 1
2243 */
2244 drm_dp_mst_topology_unlink_port(mgr, port);
2245 drm_dp_mst_topology_put_port(port);
2246 port = drm_dp_mst_add_port(dev, mgr, mstb,
2247 port_msg->port_number);
2248 if (!port)
2249 return -ENOMEM;
2250 changed = true;
2251 created = true;
2252 } else if (port->input && !port_msg->input_port) {
2253 changed = true;
2254 } else if (port->connector) {
2255 /* We're updating a port that's exposed to userspace, so do it
2256 * under lock
2257 */
2258 drm_modeset_lock(&mgr->base.lock, NULL);
2259
2260 old_ddps = port->ddps;
2261 changed = port->ddps != port_msg->ddps ||
2262 (port->ddps &&
2263 (port->ldps != port_msg->legacy_device_plug_status ||
2264 port->dpcd_rev != port_msg->dpcd_revision ||
2265 port->mcs != port_msg->mcs ||
2266 port->pdt != port_msg->peer_device_type ||
2267 port->num_sdp_stream_sinks !=
2268 port_msg->num_sdp_stream_sinks));
2269 }
2270
2271 port->input = port_msg->input_port;
2272 if (!port->input)
2273 new_pdt = port_msg->peer_device_type;
2274 new_mcs = port_msg->mcs;
2275 port->ddps = port_msg->ddps;
2276 port->ldps = port_msg->legacy_device_plug_status;
2277 port->dpcd_rev = port_msg->dpcd_revision;
2278 port->num_sdp_streams = port_msg->num_sdp_streams;
2279 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2280
2281 /* manage mstb port lists with mgr lock - take a reference
2282 for this list */
2283 if (created) {
2284 mutex_lock(&mgr->lock);
2285 drm_dp_mst_topology_get_port(port);
2286 list_add(&port->next, &mstb->ports);
2287 mstb->num_ports++;
2288 mutex_unlock(&mgr->lock);
2289 }
2290
2291 /*
2292 * Reprobe PBN caps on both hotplug, and when re-probing the link
2293 * for our parent mstb
2294 */
2295 if (old_ddps != port->ddps || !created) {
2296 if (port->ddps && !port->input) {
2297 ret = drm_dp_send_enum_path_resources(mgr, mstb,
2298 port);
2299 if (ret == 1)
2300 changed = true;
2301 } else {
2302 port->full_pbn = 0;
2303 }
2304 }
2305
2306 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2307 if (ret == 1) {
2308 send_link_addr = true;
2309 } else if (ret < 0) {
2310 DRM_ERROR("Failed to change PDT on port %p: %d\n",
2311 port, ret);
2312 goto fail;
2313 }
2314
2315 /*
2316 * If this port wasn't just created, then we're reprobing because
2317 * we're coming out of suspend. In this case, always resend the link
2318 * address if there's an MSTB on this port
2319 */
2320 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2321 port->mcs)
2322 send_link_addr = true;
2323
2324 if (port->connector)
2325 drm_modeset_unlock(&mgr->base.lock);
2326 else if (!port->input)
2327 drm_dp_mst_port_add_connector(mstb, port);
2328
2329 if (send_link_addr && port->mstb) {
2330 ret = drm_dp_send_link_address(mgr, port->mstb);
2331 if (ret == 1) /* MSTB below us changed */
2332 changed = true;
2333 else if (ret < 0)
2334 goto fail_put;
2335 }
2336
2337 /* put reference to this port */
2338 drm_dp_mst_topology_put_port(port);
2339 return changed;
2340
2341 fail:
2342 drm_dp_mst_topology_unlink_port(mgr, port);
2343 if (port->connector)
2344 drm_modeset_unlock(&mgr->base.lock);
2345 fail_put:
2346 drm_dp_mst_topology_put_port(port);
2347 return ret;
2348 }
2349
2350 static void
2351 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2352 struct drm_dp_connection_status_notify *conn_stat)
2353 {
2354 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2355 struct drm_dp_mst_port *port;
2356 int old_ddps, old_input, ret, i;
2357 u8 new_pdt;
2358 bool new_mcs;
2359 bool dowork = false, create_connector = false;
2360
2361 port = drm_dp_get_port(mstb, conn_stat->port_number);
2362 if (!port)
2363 return;
2364
2365 if (port->connector) {
2366 if (!port->input && conn_stat->input_port) {
2367 /*
2368 * We can't remove a connector from an already exposed
2369 * port, so just throw the port out and make sure we
2370 * reprobe the link address of it's parent MSTB
2371 */
2372 drm_dp_mst_topology_unlink_port(mgr, port);
2373 mstb->link_address_sent = false;
2374 dowork = true;
2375 goto out;
2376 }
2377
2378 /* Locking is only needed if the port's exposed to userspace */
2379 drm_modeset_lock(&mgr->base.lock, NULL);
2380 } else if (port->input && !conn_stat->input_port) {
2381 create_connector = true;
2382 /* Reprobe link address so we get num_sdp_streams */
2383 mstb->link_address_sent = false;
2384 dowork = true;
2385 }
2386
2387 old_ddps = port->ddps;
2388 old_input = port->input;
2389 port->input = conn_stat->input_port;
2390 port->ldps = conn_stat->legacy_device_plug_status;
2391 port->ddps = conn_stat->displayport_device_plug_status;
2392
2393 if (old_ddps != port->ddps) {
2394 if (port->ddps && !port->input)
2395 drm_dp_send_enum_path_resources(mgr, mstb, port);
2396 else
2397 port->full_pbn = 0;
2398 }
2399
2400 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2401 new_mcs = conn_stat->message_capability_status;
2402 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2403 if (ret == 1) {
2404 dowork = true;
2405 } else if (ret < 0) {
2406 DRM_ERROR("Failed to change PDT for port %p: %d\n",
2407 port, ret);
2408 dowork = false;
2409 }
2410
2411 if (!old_input && old_ddps != port->ddps && !port->ddps) {
2412 for (i = 0; i < mgr->max_payloads; i++) {
2413 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2414 struct drm_dp_mst_port *port_validated;
2415
2416 if (!vcpi)
2417 continue;
2418
2419 port_validated =
2420 container_of(vcpi, struct drm_dp_mst_port, vcpi);
2421 port_validated =
2422 drm_dp_mst_topology_get_port_validated(mgr, port_validated);
2423 if (!port_validated) {
2424 mutex_lock(&mgr->payload_lock);
2425 vcpi->num_slots = 0;
2426 mutex_unlock(&mgr->payload_lock);
2427 } else {
2428 drm_dp_mst_topology_put_port(port_validated);
2429 }
2430 }
2431 }
2432
2433 if (port->connector)
2434 drm_modeset_unlock(&mgr->base.lock);
2435 else if (create_connector)
2436 drm_dp_mst_port_add_connector(mstb, port);
2437
2438 out:
2439 drm_dp_mst_topology_put_port(port);
2440 if (dowork)
2441 queue_work(system_long_wq, &mstb->mgr->work);
2442 }
2443
2444 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2445 u8 lct, u8 *rad)
2446 {
2447 struct drm_dp_mst_branch *mstb;
2448 struct drm_dp_mst_port *port;
2449 int i, ret;
2450 /* find the port by iterating down */
2451
2452 mutex_lock(&mgr->lock);
2453 mstb = mgr->mst_primary;
2454
2455 if (!mstb)
2456 goto out;
2457
2458 for (i = 0; i < lct - 1; i++) {
2459 int shift = (i % 2) ? 0 : 4;
2460 int port_num = (rad[i / 2] >> shift) & 0xf;
2461
2462 list_for_each_entry(port, &mstb->ports, next) {
2463 if (port->port_num == port_num) {
2464 mstb = port->mstb;
2465 if (!mstb) {
2466 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2467 goto out;
2468 }
2469
2470 break;
2471 }
2472 }
2473 }
2474 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2475 if (!ret)
2476 mstb = NULL;
2477 out:
2478 mutex_unlock(&mgr->lock);
2479 return mstb;
2480 }
2481
2482 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2483 struct drm_dp_mst_branch *mstb,
2484 const uint8_t *guid)
2485 {
2486 struct drm_dp_mst_branch *found_mstb;
2487 struct drm_dp_mst_port *port;
2488
2489 if (memcmp(mstb->guid, guid, 16) == 0)
2490 return mstb;
2491
2492
2493 list_for_each_entry(port, &mstb->ports, next) {
2494 if (!port->mstb)
2495 continue;
2496
2497 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2498
2499 if (found_mstb)
2500 return found_mstb;
2501 }
2502
2503 return NULL;
2504 }
2505
2506 static struct drm_dp_mst_branch *
2507 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2508 const uint8_t *guid)
2509 {
2510 struct drm_dp_mst_branch *mstb;
2511 int ret;
2512
2513 /* find the port by iterating down */
2514 mutex_lock(&mgr->lock);
2515
2516 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2517 if (mstb) {
2518 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2519 if (!ret)
2520 mstb = NULL;
2521 }
2522
2523 mutex_unlock(&mgr->lock);
2524 return mstb;
2525 }
2526
2527 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2528 struct drm_dp_mst_branch *mstb)
2529 {
2530 struct drm_dp_mst_port *port;
2531 int ret;
2532 bool changed = false;
2533
2534 if (!mstb->link_address_sent) {
2535 ret = drm_dp_send_link_address(mgr, mstb);
2536 if (ret == 1)
2537 changed = true;
2538 else if (ret < 0)
2539 return ret;
2540 }
2541
2542 list_for_each_entry(port, &mstb->ports, next) {
2543 struct drm_dp_mst_branch *mstb_child = NULL;
2544
2545 if (port->input || !port->ddps)
2546 continue;
2547
2548 if (port->mstb)
2549 mstb_child = drm_dp_mst_topology_get_mstb_validated(
2550 mgr, port->mstb);
2551
2552 if (mstb_child) {
2553 ret = drm_dp_check_and_send_link_address(mgr,
2554 mstb_child);
2555 drm_dp_mst_topology_put_mstb(mstb_child);
2556 if (ret == 1)
2557 changed = true;
2558 else if (ret < 0)
2559 return ret;
2560 }
2561 }
2562
2563 return changed;
2564 }
2565
2566 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2567 {
2568 struct drm_dp_mst_topology_mgr *mgr =
2569 container_of(work, struct drm_dp_mst_topology_mgr, work);
2570 struct drm_device *dev = mgr->dev;
2571 struct drm_dp_mst_branch *mstb;
2572 int ret;
2573 bool clear_payload_id_table;
2574
2575 mutex_lock(&mgr->probe_lock);
2576
2577 mutex_lock(&mgr->lock);
2578 clear_payload_id_table = !mgr->payload_id_table_cleared;
2579 mgr->payload_id_table_cleared = true;
2580
2581 mstb = mgr->mst_primary;
2582 if (mstb) {
2583 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2584 if (!ret)
2585 mstb = NULL;
2586 }
2587 mutex_unlock(&mgr->lock);
2588 if (!mstb) {
2589 mutex_unlock(&mgr->probe_lock);
2590 return;
2591 }
2592
2593 /*
2594 * Certain branch devices seem to incorrectly report an available_pbn
2595 * of 0 on downstream sinks, even after clearing the
2596 * DP_PAYLOAD_ALLOCATE_* registers in
2597 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
2598 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
2599 * things work again.
2600 */
2601 if (clear_payload_id_table) {
2602 DRM_DEBUG_KMS("Clearing payload ID table\n");
2603 drm_dp_send_clear_payload_id_table(mgr, mstb);
2604 }
2605
2606 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2607 drm_dp_mst_topology_put_mstb(mstb);
2608
2609 mutex_unlock(&mgr->probe_lock);
2610 if (ret)
2611 drm_kms_helper_hotplug_event(dev);
2612 }
2613
2614 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2615 u8 *guid)
2616 {
2617 u64 salt;
2618
2619 if (memchr_inv(guid, 0, 16))
2620 return true;
2621
2622 salt = get_jiffies_64();
2623
2624 memcpy(&guid[0], &salt, sizeof(u64));
2625 memcpy(&guid[8], &salt, sizeof(u64));
2626
2627 return false;
2628 }
2629
2630 static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2631 u8 port_num, u32 offset, u8 num_bytes)
2632 {
2633 struct drm_dp_sideband_msg_req_body req;
2634
2635 req.req_type = DP_REMOTE_DPCD_READ;
2636 req.u.dpcd_read.port_number = port_num;
2637 req.u.dpcd_read.dpcd_address = offset;
2638 req.u.dpcd_read.num_bytes = num_bytes;
2639 drm_dp_encode_sideband_req(&req, msg);
2640 }
2641
2642 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2643 bool up, u8 *msg, int len)
2644 {
2645 int ret;
2646 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2647 int tosend, total, offset;
2648 int retries = 0;
2649
2650 retry:
2651 total = len;
2652 offset = 0;
2653 do {
2654 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2655
2656 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2657 &msg[offset],
2658 tosend);
2659 if (ret != tosend) {
2660 if (ret == -EIO && retries < 5) {
2661 retries++;
2662 goto retry;
2663 }
2664 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2665
2666 return -EIO;
2667 }
2668 offset += tosend;
2669 total -= tosend;
2670 } while (total > 0);
2671 return 0;
2672 }
2673
2674 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2675 struct drm_dp_sideband_msg_tx *txmsg)
2676 {
2677 struct drm_dp_mst_branch *mstb = txmsg->dst;
2678 u8 req_type;
2679
2680 req_type = txmsg->msg[0] & 0x7f;
2681 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2682 req_type == DP_RESOURCE_STATUS_NOTIFY)
2683 hdr->broadcast = 1;
2684 else
2685 hdr->broadcast = 0;
2686 hdr->path_msg = txmsg->path_msg;
2687 hdr->lct = mstb->lct;
2688 hdr->lcr = mstb->lct - 1;
2689 if (mstb->lct > 1)
2690 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2691
2692 return 0;
2693 }
2694 /*
2695 * process a single block of the next message in the sideband queue
2696 */
2697 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2698 struct drm_dp_sideband_msg_tx *txmsg,
2699 bool up)
2700 {
2701 u8 chunk[48];
2702 struct drm_dp_sideband_msg_hdr hdr;
2703 int len, space, idx, tosend;
2704 int ret;
2705
2706 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
2707 return 0;
2708
2709 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2710
2711 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
2712 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2713
2714 /* make hdr from dst mst */
2715 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2716 if (ret < 0)
2717 return ret;
2718
2719 /* amount left to send in this message */
2720 len = txmsg->cur_len - txmsg->cur_offset;
2721
2722 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2723 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2724
2725 tosend = min(len, space);
2726 if (len == txmsg->cur_len)
2727 hdr.somt = 1;
2728 if (space >= len)
2729 hdr.eomt = 1;
2730
2731
2732 hdr.msg_len = tosend + 1;
2733 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2734 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2735 /* add crc at end */
2736 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2737 idx += tosend + 1;
2738
2739 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2740 if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2741 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2742
2743 drm_printf(&p, "sideband msg failed to send\n");
2744 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2745 return ret;
2746 }
2747
2748 txmsg->cur_offset += tosend;
2749 if (txmsg->cur_offset == txmsg->cur_len) {
2750 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2751 return 1;
2752 }
2753 return 0;
2754 }
2755
2756 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2757 {
2758 struct drm_dp_sideband_msg_tx *txmsg;
2759 int ret;
2760
2761 WARN_ON(!mutex_is_locked(&mgr->qlock));
2762
2763 /* construct a chunk from the first msg in the tx_msg queue */
2764 if (list_empty(&mgr->tx_msg_downq))
2765 return;
2766
2767 txmsg = list_first_entry(&mgr->tx_msg_downq,
2768 struct drm_dp_sideband_msg_tx, next);
2769 ret = process_single_tx_qlock(mgr, txmsg, false);
2770 if (ret < 0) {
2771 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2772 list_del(&txmsg->next);
2773 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2774 wake_up_all(&mgr->tx_waitq);
2775 }
2776 }
2777
2778 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2779 struct drm_dp_sideband_msg_tx *txmsg)
2780 {
2781 mutex_lock(&mgr->qlock);
2782 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2783
2784 if (drm_debug_enabled(DRM_UT_DP)) {
2785 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2786
2787 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2788 }
2789
2790 if (list_is_singular(&mgr->tx_msg_downq))
2791 process_single_down_tx_qlock(mgr);
2792 mutex_unlock(&mgr->qlock);
2793 }
2794
2795 static void
2796 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2797 {
2798 struct drm_dp_link_addr_reply_port *port_reply;
2799 int i;
2800
2801 for (i = 0; i < reply->nports; i++) {
2802 port_reply = &reply->ports[i];
2803 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2804 i,
2805 port_reply->input_port,
2806 port_reply->peer_device_type,
2807 port_reply->port_number,
2808 port_reply->dpcd_revision,
2809 port_reply->mcs,
2810 port_reply->ddps,
2811 port_reply->legacy_device_plug_status,
2812 port_reply->num_sdp_streams,
2813 port_reply->num_sdp_stream_sinks);
2814 }
2815 }
2816
2817 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2818 struct drm_dp_mst_branch *mstb)
2819 {
2820 struct drm_dp_sideband_msg_tx *txmsg;
2821 struct drm_dp_link_address_ack_reply *reply;
2822 struct drm_dp_mst_port *port, *tmp;
2823 int i, ret, port_mask = 0;
2824 bool changed = false;
2825
2826 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2827 if (!txmsg)
2828 return -ENOMEM;
2829
2830 txmsg->dst = mstb;
2831 build_link_address(txmsg);
2832
2833 mstb->link_address_sent = true;
2834 drm_dp_queue_down_tx(mgr, txmsg);
2835
2836 /* FIXME: Actually do some real error handling here */
2837 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2838 if (ret <= 0) {
2839 DRM_ERROR("Sending link address failed with %d\n", ret);
2840 goto out;
2841 }
2842 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2843 DRM_ERROR("link address NAK received\n");
2844 ret = -EIO;
2845 goto out;
2846 }
2847
2848 reply = &txmsg->reply.u.link_addr;
2849 DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2850 drm_dp_dump_link_address(reply);
2851
2852 ret = drm_dp_check_mstb_guid(mstb, reply->guid);
2853 if (ret) {
2854 char buf[64];
2855
2856 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
2857 DRM_ERROR("GUID check on %s failed: %d\n",
2858 buf, ret);
2859 goto out;
2860 }
2861
2862 for (i = 0; i < reply->nports; i++) {
2863 port_mask |= BIT(reply->ports[i].port_number);
2864 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2865 &reply->ports[i]);
2866 if (ret == 1)
2867 changed = true;
2868 else if (ret < 0)
2869 goto out;
2870 }
2871
2872 /* Prune any ports that are currently a part of mstb in our in-memory
2873 * topology, but were not seen in this link address. Usually this
2874 * means that they were removed while the topology was out of sync,
2875 * e.g. during suspend/resume
2876 */
2877 mutex_lock(&mgr->lock);
2878 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
2879 if (port_mask & BIT(port->port_num))
2880 continue;
2881
2882 DRM_DEBUG_KMS("port %d was not in link address, removing\n",
2883 port->port_num);
2884 list_del(&port->next);
2885 drm_dp_mst_topology_put_port(port);
2886 changed = true;
2887 }
2888 mutex_unlock(&mgr->lock);
2889
2890 out:
2891 if (ret <= 0)
2892 mstb->link_address_sent = false;
2893 kfree(txmsg);
2894 return ret < 0 ? ret : changed;
2895 }
2896
2897 void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
2898 struct drm_dp_mst_branch *mstb)
2899 {
2900 struct drm_dp_sideband_msg_tx *txmsg;
2901 int ret;
2902
2903 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2904 if (!txmsg)
2905 return;
2906
2907 txmsg->dst = mstb;
2908 build_clear_payload_id_table(txmsg);
2909
2910 drm_dp_queue_down_tx(mgr, txmsg);
2911
2912 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2913 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2914 DRM_DEBUG_KMS("clear payload table id nak received\n");
2915
2916 kfree(txmsg);
2917 }
2918
2919 static int
2920 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2921 struct drm_dp_mst_branch *mstb,
2922 struct drm_dp_mst_port *port)
2923 {
2924 struct drm_dp_enum_path_resources_ack_reply *path_res;
2925 struct drm_dp_sideband_msg_tx *txmsg;
2926 int ret;
2927
2928 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2929 if (!txmsg)
2930 return -ENOMEM;
2931
2932 txmsg->dst = mstb;
2933 build_enum_path_resources(txmsg, port->port_num);
2934
2935 drm_dp_queue_down_tx(mgr, txmsg);
2936
2937 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2938 if (ret > 0) {
2939 ret = 0;
2940 path_res = &txmsg->reply.u.path_resources;
2941
2942 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2943 DRM_DEBUG_KMS("enum path resources nak received\n");
2944 } else {
2945 if (port->port_num != path_res->port_number)
2946 DRM_ERROR("got incorrect port in response\n");
2947
2948 DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
2949 path_res->port_number,
2950 path_res->full_payload_bw_number,
2951 path_res->avail_payload_bw_number);
2952
2953 /*
2954 * If something changed, make sure we send a
2955 * hotplug
2956 */
2957 if (port->full_pbn != path_res->full_payload_bw_number ||
2958 port->fec_capable != path_res->fec_capable)
2959 ret = 1;
2960
2961 port->full_pbn = path_res->full_payload_bw_number;
2962 port->fec_capable = path_res->fec_capable;
2963 }
2964 }
2965
2966 kfree(txmsg);
2967 return ret;
2968 }
2969
2970 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2971 {
2972 if (!mstb->port_parent)
2973 return NULL;
2974
2975 if (mstb->port_parent->mstb != mstb)
2976 return mstb->port_parent;
2977
2978 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2979 }
2980
2981 /*
2982 * Searches upwards in the topology starting from mstb to try to find the
2983 * closest available parent of mstb that's still connected to the rest of the
2984 * topology. This can be used in order to perform operations like releasing
2985 * payloads, where the branch device which owned the payload may no longer be
2986 * around and thus would require that the payload on the last living relative
2987 * be freed instead.
2988 */
2989 static struct drm_dp_mst_branch *
2990 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2991 struct drm_dp_mst_branch *mstb,
2992 int *port_num)
2993 {
2994 struct drm_dp_mst_branch *rmstb = NULL;
2995 struct drm_dp_mst_port *found_port;
2996
2997 mutex_lock(&mgr->lock);
2998 if (!mgr->mst_primary)
2999 goto out;
3000
3001 do {
3002 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3003 if (!found_port)
3004 break;
3005
3006 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3007 rmstb = found_port->parent;
3008 *port_num = found_port->port_num;
3009 } else {
3010 /* Search again, starting from this parent */
3011 mstb = found_port->parent;
3012 }
3013 } while (!rmstb);
3014 out:
3015 mutex_unlock(&mgr->lock);
3016 return rmstb;
3017 }
3018
3019 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3020 struct drm_dp_mst_port *port,
3021 int id,
3022 int pbn)
3023 {
3024 struct drm_dp_sideband_msg_tx *txmsg;
3025 struct drm_dp_mst_branch *mstb;
3026 int ret, port_num;
3027 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3028 int i;
3029
3030 port_num = port->port_num;
3031 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3032 if (!mstb) {
3033 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3034 port->parent,
3035 &port_num);
3036
3037 if (!mstb)
3038 return -EINVAL;
3039 }
3040
3041 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3042 if (!txmsg) {
3043 ret = -ENOMEM;
3044 goto fail_put;
3045 }
3046
3047 for (i = 0; i < port->num_sdp_streams; i++)
3048 sinks[i] = i;
3049
3050 txmsg->dst = mstb;
3051 build_allocate_payload(txmsg, port_num,
3052 id,
3053 pbn, port->num_sdp_streams, sinks);
3054
3055 drm_dp_queue_down_tx(mgr, txmsg);
3056
3057 /*
3058 * FIXME: there is a small chance that between getting the last
3059 * connected mstb and sending the payload message, the last connected
3060 * mstb could also be removed from the topology. In the future, this
3061 * needs to be fixed by restarting the
3062 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3063 * timeout if the topology is still connected to the system.
3064 */
3065 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3066 if (ret > 0) {
3067 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3068 ret = -EINVAL;
3069 else
3070 ret = 0;
3071 }
3072 kfree(txmsg);
3073 fail_put:
3074 drm_dp_mst_topology_put_mstb(mstb);
3075 return ret;
3076 }
3077
3078 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3079 struct drm_dp_mst_port *port, bool power_up)
3080 {
3081 struct drm_dp_sideband_msg_tx *txmsg;
3082 int ret;
3083
3084 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3085 if (!port)
3086 return -EINVAL;
3087
3088 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3089 if (!txmsg) {
3090 drm_dp_mst_topology_put_port(port);
3091 return -ENOMEM;
3092 }
3093
3094 txmsg->dst = port->parent;
3095 build_power_updown_phy(txmsg, port->port_num, power_up);
3096 drm_dp_queue_down_tx(mgr, txmsg);
3097
3098 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3099 if (ret > 0) {
3100 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3101 ret = -EINVAL;
3102 else
3103 ret = 0;
3104 }
3105 kfree(txmsg);
3106 drm_dp_mst_topology_put_port(port);
3107
3108 return ret;
3109 }
3110 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3111
3112 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3113 int id,
3114 struct drm_dp_payload *payload)
3115 {
3116 int ret;
3117
3118 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3119 if (ret < 0) {
3120 payload->payload_state = 0;
3121 return ret;
3122 }
3123 payload->payload_state = DP_PAYLOAD_LOCAL;
3124 return 0;
3125 }
3126
3127 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3128 struct drm_dp_mst_port *port,
3129 int id,
3130 struct drm_dp_payload *payload)
3131 {
3132 int ret;
3133 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3134 if (ret < 0)
3135 return ret;
3136 payload->payload_state = DP_PAYLOAD_REMOTE;
3137 return ret;
3138 }
3139
3140 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3141 struct drm_dp_mst_port *port,
3142 int id,
3143 struct drm_dp_payload *payload)
3144 {
3145 DRM_DEBUG_KMS("\n");
3146 /* it's okay for these to fail */
3147 if (port) {
3148 drm_dp_payload_send_msg(mgr, port, id, 0);
3149 }
3150
3151 drm_dp_dpcd_write_payload(mgr, id, payload);
3152 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3153 return 0;
3154 }
3155
3156 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3157 int id,
3158 struct drm_dp_payload *payload)
3159 {
3160 payload->payload_state = 0;
3161 return 0;
3162 }
3163
3164 /**
3165 * drm_dp_update_payload_part1() - Execute payload update part 1
3166 * @mgr: manager to use.
3167 *
3168 * This iterates over all proposed virtual channels, and tries to
3169 * allocate space in the link for them. For 0->slots transitions,
3170 * this step just writes the VCPI to the MST device. For slots->0
3171 * transitions, this writes the updated VCPIs and removes the
3172 * remote VC payloads.
3173 *
3174 * after calling this the driver should generate ACT and payload
3175 * packets.
3176 */
3177 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
3178 {
3179 struct drm_dp_payload req_payload;
3180 struct drm_dp_mst_port *port;
3181 int i, j;
3182 int cur_slots = 1;
3183
3184 mutex_lock(&mgr->payload_lock);
3185 for (i = 0; i < mgr->max_payloads; i++) {
3186 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3187 struct drm_dp_payload *payload = &mgr->payloads[i];
3188 bool put_port = false;
3189
3190 /* solve the current payloads - compare to the hw ones
3191 - update the hw view */
3192 req_payload.start_slot = cur_slots;
3193 if (vcpi) {
3194 port = container_of(vcpi, struct drm_dp_mst_port,
3195 vcpi);
3196
3197 /* Validated ports don't matter if we're releasing
3198 * VCPI
3199 */
3200 if (vcpi->num_slots) {
3201 port = drm_dp_mst_topology_get_port_validated(
3202 mgr, port);
3203 if (!port) {
3204 mutex_unlock(&mgr->payload_lock);
3205 return -EINVAL;
3206 }
3207 put_port = true;
3208 }
3209
3210 req_payload.num_slots = vcpi->num_slots;
3211 req_payload.vcpi = vcpi->vcpi;
3212 } else {
3213 port = NULL;
3214 req_payload.num_slots = 0;
3215 }
3216
3217 payload->start_slot = req_payload.start_slot;
3218 /* work out what is required to happen with this payload */
3219 if (payload->num_slots != req_payload.num_slots) {
3220
3221 /* need to push an update for this payload */
3222 if (req_payload.num_slots) {
3223 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3224 &req_payload);
3225 payload->num_slots = req_payload.num_slots;
3226 payload->vcpi = req_payload.vcpi;
3227
3228 } else if (payload->num_slots) {
3229 payload->num_slots = 0;
3230 drm_dp_destroy_payload_step1(mgr, port,
3231 payload->vcpi,
3232 payload);
3233 req_payload.payload_state =
3234 payload->payload_state;
3235 payload->start_slot = 0;
3236 }
3237 payload->payload_state = req_payload.payload_state;
3238 }
3239 cur_slots += req_payload.num_slots;
3240
3241 if (put_port)
3242 drm_dp_mst_topology_put_port(port);
3243 }
3244
3245 for (i = 0; i < mgr->max_payloads; /* do nothing */) {
3246 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3247 i++;
3248 continue;
3249 }
3250
3251 DRM_DEBUG_KMS("removing payload %d\n", i);
3252 for (j = i; j < mgr->max_payloads - 1; j++) {
3253 mgr->payloads[j] = mgr->payloads[j + 1];
3254 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3255
3256 if (mgr->proposed_vcpis[j] &&
3257 mgr->proposed_vcpis[j]->num_slots) {
3258 set_bit(j + 1, &mgr->payload_mask);
3259 } else {
3260 clear_bit(j + 1, &mgr->payload_mask);
3261 }
3262 }
3263
3264 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3265 sizeof(struct drm_dp_payload));
3266 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3267 clear_bit(mgr->max_payloads, &mgr->payload_mask);
3268 }
3269 mutex_unlock(&mgr->payload_lock);
3270
3271 return 0;
3272 }
3273 EXPORT_SYMBOL(drm_dp_update_payload_part1);
3274
3275 /**
3276 * drm_dp_update_payload_part2() - Execute payload update part 2
3277 * @mgr: manager to use.
3278 *
3279 * This iterates over all proposed virtual channels, and tries to
3280 * allocate space in the link for them. For 0->slots transitions,
3281 * this step writes the remote VC payload commands. For slots->0
3282 * this just resets some internal state.
3283 */
3284 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3285 {
3286 struct drm_dp_mst_port *port;
3287 int i;
3288 int ret = 0;
3289 mutex_lock(&mgr->payload_lock);
3290 for (i = 0; i < mgr->max_payloads; i++) {
3291
3292 if (!mgr->proposed_vcpis[i])
3293 continue;
3294
3295 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3296
3297 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
3298 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3299 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3300 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3301 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3302 }
3303 if (ret) {
3304 mutex_unlock(&mgr->payload_lock);
3305 return ret;
3306 }
3307 }
3308 mutex_unlock(&mgr->payload_lock);
3309 return 0;
3310 }
3311 EXPORT_SYMBOL(drm_dp_update_payload_part2);
3312
3313 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3314 struct drm_dp_mst_port *port,
3315 int offset, int size, u8 *bytes)
3316 {
3317 int ret = 0;
3318 struct drm_dp_sideband_msg_tx *txmsg;
3319 struct drm_dp_mst_branch *mstb;
3320
3321 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3322 if (!mstb)
3323 return -EINVAL;
3324
3325 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3326 if (!txmsg) {
3327 ret = -ENOMEM;
3328 goto fail_put;
3329 }
3330
3331 build_dpcd_read(txmsg, port->port_num, offset, size);
3332 txmsg->dst = port->parent;
3333
3334 drm_dp_queue_down_tx(mgr, txmsg);
3335
3336 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3337 if (ret < 0)
3338 goto fail_free;
3339
3340 /* DPCD read should never be NACKed */
3341 if (txmsg->reply.reply_type == 1) {
3342 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3343 mstb, port->port_num, offset, size);
3344 ret = -EIO;
3345 goto fail_free;
3346 }
3347
3348 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3349 ret = -EPROTO;
3350 goto fail_free;
3351 }
3352
3353 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3354 size);
3355 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3356
3357 fail_free:
3358 kfree(txmsg);
3359 fail_put:
3360 drm_dp_mst_topology_put_mstb(mstb);
3361
3362 return ret;
3363 }
3364
3365 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3366 struct drm_dp_mst_port *port,
3367 int offset, int size, u8 *bytes)
3368 {
3369 int ret;
3370 struct drm_dp_sideband_msg_tx *txmsg;
3371 struct drm_dp_mst_branch *mstb;
3372
3373 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3374 if (!mstb)
3375 return -EINVAL;
3376
3377 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3378 if (!txmsg) {
3379 ret = -ENOMEM;
3380 goto fail_put;
3381 }
3382
3383 build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3384 txmsg->dst = mstb;
3385
3386 drm_dp_queue_down_tx(mgr, txmsg);
3387
3388 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3389 if (ret > 0) {
3390 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3391 ret = -EIO;
3392 else
3393 ret = size;
3394 }
3395
3396 kfree(txmsg);
3397 fail_put:
3398 drm_dp_mst_topology_put_mstb(mstb);
3399 return ret;
3400 }
3401
3402 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3403 {
3404 struct drm_dp_sideband_msg_reply_body reply;
3405
3406 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3407 reply.req_type = req_type;
3408 drm_dp_encode_sideband_reply(&reply, msg);
3409 return 0;
3410 }
3411
3412 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3413 struct drm_dp_mst_branch *mstb,
3414 int req_type, bool broadcast)
3415 {
3416 struct drm_dp_sideband_msg_tx *txmsg;
3417
3418 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3419 if (!txmsg)
3420 return -ENOMEM;
3421
3422 txmsg->dst = mstb;
3423 drm_dp_encode_up_ack_reply(txmsg, req_type);
3424
3425 mutex_lock(&mgr->qlock);
3426 /* construct a chunk from the first msg in the tx_msg queue */
3427 process_single_tx_qlock(mgr, txmsg, true);
3428 mutex_unlock(&mgr->qlock);
3429
3430 kfree(txmsg);
3431 return 0;
3432 }
3433
3434 static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
3435 {
3436 if (dp_link_bw == 0 || dp_link_count == 0)
3437 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
3438 dp_link_bw, dp_link_count);
3439
3440 return dp_link_bw * dp_link_count / 2;
3441 }
3442
3443 /**
3444 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3445 * @mgr: manager to set state for
3446 * @mst_state: true to enable MST on this connector - false to disable.
3447 *
3448 * This is called by the driver when it detects an MST capable device plugged
3449 * into a DP MST capable port, or when a DP MST capable device is unplugged.
3450 */
3451 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3452 {
3453 int ret = 0;
3454 struct drm_dp_mst_branch *mstb = NULL;
3455
3456 mutex_lock(&mgr->payload_lock);
3457 mutex_lock(&mgr->lock);
3458 if (mst_state == mgr->mst_state)
3459 goto out_unlock;
3460
3461 mgr->mst_state = mst_state;
3462 /* set the device into MST mode */
3463 if (mst_state) {
3464 struct drm_dp_payload reset_pay;
3465
3466 WARN_ON(mgr->mst_primary);
3467
3468 /* get dpcd info */
3469 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3470 if (ret != DP_RECEIVER_CAP_SIZE) {
3471 DRM_DEBUG_KMS("failed to read DPCD\n");
3472 goto out_unlock;
3473 }
3474
3475 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3476 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3477 if (mgr->pbn_div == 0) {
3478 ret = -EINVAL;
3479 goto out_unlock;
3480 }
3481
3482 /* add initial branch device at LCT 1 */
3483 mstb = drm_dp_add_mst_branch_device(1, NULL);
3484 if (mstb == NULL) {
3485 ret = -ENOMEM;
3486 goto out_unlock;
3487 }
3488 mstb->mgr = mgr;
3489
3490 /* give this the main reference */
3491 mgr->mst_primary = mstb;
3492 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3493
3494 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3495 DP_MST_EN |
3496 DP_UP_REQ_EN |
3497 DP_UPSTREAM_IS_SRC);
3498 if (ret < 0)
3499 goto out_unlock;
3500
3501 reset_pay.start_slot = 0;
3502 reset_pay.num_slots = 0x3f;
3503 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3504
3505 queue_work(system_long_wq, &mgr->work);
3506
3507 ret = 0;
3508 } else {
3509 /* disable MST on the device */
3510 mstb = mgr->mst_primary;
3511 mgr->mst_primary = NULL;
3512 /* this can fail if the device is gone */
3513 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3514 ret = 0;
3515 memset(mgr->payloads, 0,
3516 mgr->max_payloads * sizeof(mgr->payloads[0]));
3517 memset(mgr->proposed_vcpis, 0,
3518 mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
3519 mgr->payload_mask = 0;
3520 set_bit(0, &mgr->payload_mask);
3521 mgr->vcpi_mask = 0;
3522 mgr->payload_id_table_cleared = false;
3523 }
3524
3525 out_unlock:
3526 mutex_unlock(&mgr->lock);
3527 mutex_unlock(&mgr->payload_lock);
3528 if (mstb)
3529 drm_dp_mst_topology_put_mstb(mstb);
3530 return ret;
3531
3532 }
3533 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3534
3535 static void
3536 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3537 {
3538 struct drm_dp_mst_port *port;
3539
3540 /* The link address will need to be re-sent on resume */
3541 mstb->link_address_sent = false;
3542
3543 list_for_each_entry(port, &mstb->ports, next)
3544 if (port->mstb)
3545 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3546 }
3547
3548 /**
3549 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3550 * @mgr: manager to suspend
3551 *
3552 * This function tells the MST device that we can't handle UP messages
3553 * anymore. This should stop it from sending any since we are suspended.
3554 */
3555 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3556 {
3557 mutex_lock(&mgr->lock);
3558 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3559 DP_MST_EN | DP_UPSTREAM_IS_SRC);
3560 mutex_unlock(&mgr->lock);
3561 flush_work(&mgr->up_req_work);
3562 flush_work(&mgr->work);
3563 flush_work(&mgr->delayed_destroy_work);
3564
3565 mutex_lock(&mgr->lock);
3566 if (mgr->mst_state && mgr->mst_primary)
3567 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3568 mutex_unlock(&mgr->lock);
3569 }
3570 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3571
3572 /**
3573 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3574 * @mgr: manager to resume
3575 * @sync: whether or not to perform topology reprobing synchronously
3576 *
3577 * This will fetch DPCD and see if the device is still there,
3578 * if it is, it will rewrite the MSTM control bits, and return.
3579 *
3580 * If the device fails this returns -1, and the driver should do
3581 * a full MST reprobe, in case we were undocked.
3582 *
3583 * During system resume (where it is assumed that the driver will be calling
3584 * drm_atomic_helper_resume()) this function should be called beforehand with
3585 * @sync set to true. In contexts like runtime resume where the driver is not
3586 * expected to be calling drm_atomic_helper_resume(), this function should be
3587 * called with @sync set to false in order to avoid deadlocking.
3588 *
3589 * Returns: -1 if the MST topology was removed while we were suspended, 0
3590 * otherwise.
3591 */
3592 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3593 bool sync)
3594 {
3595 int ret;
3596 u8 guid[16];
3597
3598 mutex_lock(&mgr->lock);
3599 if (!mgr->mst_primary)
3600 goto out_fail;
3601
3602 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3603 DP_RECEIVER_CAP_SIZE);
3604 if (ret != DP_RECEIVER_CAP_SIZE) {
3605 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3606 goto out_fail;
3607 }
3608
3609 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3610 DP_MST_EN |
3611 DP_UP_REQ_EN |
3612 DP_UPSTREAM_IS_SRC);
3613 if (ret < 0) {
3614 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3615 goto out_fail;
3616 }
3617
3618 /* Some hubs forget their guids after they resume */
3619 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3620 if (ret != 16) {
3621 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3622 goto out_fail;
3623 }
3624
3625 ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3626 if (ret) {
3627 DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
3628 goto out_fail;
3629 }
3630
3631 /*
3632 * For the final step of resuming the topology, we need to bring the
3633 * state of our in-memory topology back into sync with reality. So,
3634 * restart the probing process as if we're probing a new hub
3635 */
3636 queue_work(system_long_wq, &mgr->work);
3637 mutex_unlock(&mgr->lock);
3638
3639 if (sync) {
3640 DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3641 flush_work(&mgr->work);
3642 }
3643
3644 return 0;
3645
3646 out_fail:
3647 mutex_unlock(&mgr->lock);
3648 return -1;
3649 }
3650 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3651
3652 static bool
3653 drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
3654 struct drm_dp_mst_branch **mstb)
3655 {
3656 int len;
3657 u8 replyblock[32];
3658 int replylen, curreply;
3659 int ret;
3660 u8 hdrlen;
3661 struct drm_dp_sideband_msg_hdr hdr;
3662 struct drm_dp_sideband_msg_rx *msg =
3663 up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3664 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
3665 DP_SIDEBAND_MSG_DOWN_REP_BASE;
3666
3667 if (!up)
3668 *mstb = NULL;
3669
3670 len = min(mgr->max_dpcd_transaction_bytes, 16);
3671 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
3672 if (ret != len) {
3673 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3674 return false;
3675 }
3676
3677 ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
3678 if (ret == false) {
3679 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
3680 1, replyblock, len, false);
3681 DRM_DEBUG_KMS("ERROR: failed header\n");
3682 return false;
3683 }
3684
3685 if (!up) {
3686 /* Caller is responsible for giving back this reference */
3687 *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
3688 if (!*mstb) {
3689 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3690 hdr.lct);
3691 return false;
3692 }
3693 }
3694
3695 if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
3696 DRM_DEBUG_KMS("sideband msg set header failed %d\n",
3697 replyblock[0]);
3698 return false;
3699 }
3700
3701 replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
3702 ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
3703 if (!ret) {
3704 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3705 return false;
3706 }
3707
3708 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
3709 curreply = len;
3710 while (replylen > 0) {
3711 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3712 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3713 replyblock, len);
3714 if (ret != len) {
3715 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3716 len, ret);
3717 return false;
3718 }
3719
3720 ret = drm_dp_sideband_append_payload(msg, replyblock, len);
3721 if (!ret) {
3722 DRM_DEBUG_KMS("failed to build sideband msg\n");
3723 return false;
3724 }
3725
3726 curreply += len;
3727 replylen -= len;
3728 }
3729 return true;
3730 }
3731
3732 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3733 {
3734 struct drm_dp_sideband_msg_tx *txmsg;
3735 struct drm_dp_mst_branch *mstb = NULL;
3736 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
3737
3738 if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
3739 goto out;
3740
3741 /* Multi-packet message transmission, don't clear the reply */
3742 if (!msg->have_eomt)
3743 goto out;
3744
3745 /* find the message */
3746 mutex_lock(&mgr->qlock);
3747 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
3748 struct drm_dp_sideband_msg_tx, next);
3749 mutex_unlock(&mgr->qlock);
3750
3751 /* Were we actually expecting a response, and from this mstb? */
3752 if (!txmsg || txmsg->dst != mstb) {
3753 struct drm_dp_sideband_msg_hdr *hdr;
3754 hdr = &msg->initial_hdr;
3755 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3756 mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3757 msg->msg[0]);
3758 goto out_clear_reply;
3759 }
3760
3761 drm_dp_sideband_parse_reply(msg, &txmsg->reply);
3762
3763 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3764 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3765 txmsg->reply.req_type,
3766 drm_dp_mst_req_type_str(txmsg->reply.req_type),
3767 txmsg->reply.u.nak.reason,
3768 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3769 txmsg->reply.u.nak.nak_data);
3770 }
3771
3772 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
3773 drm_dp_mst_topology_put_mstb(mstb);
3774
3775 mutex_lock(&mgr->qlock);
3776 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3777 list_del(&txmsg->next);
3778 mutex_unlock(&mgr->qlock);
3779
3780 wake_up_all(&mgr->tx_waitq);
3781
3782 return 0;
3783
3784 out_clear_reply:
3785 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
3786 out:
3787 if (mstb)
3788 drm_dp_mst_topology_put_mstb(mstb);
3789
3790 return 0;
3791 }
3792
3793 static inline bool
3794 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
3795 struct drm_dp_pending_up_req *up_req)
3796 {
3797 struct drm_dp_mst_branch *mstb = NULL;
3798 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
3799 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
3800 bool hotplug = false;
3801
3802 if (hdr->broadcast) {
3803 const u8 *guid = NULL;
3804
3805 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
3806 guid = msg->u.conn_stat.guid;
3807 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
3808 guid = msg->u.resource_stat.guid;
3809
3810 if (guid)
3811 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3812 } else {
3813 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3814 }
3815
3816 if (!mstb) {
3817 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3818 hdr->lct);
3819 return false;
3820 }
3821
3822 /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
3823 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
3824 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
3825 hotplug = true;
3826 }
3827
3828 drm_dp_mst_topology_put_mstb(mstb);
3829 return hotplug;
3830 }
3831
3832 static void drm_dp_mst_up_req_work(struct work_struct *work)
3833 {
3834 struct drm_dp_mst_topology_mgr *mgr =
3835 container_of(work, struct drm_dp_mst_topology_mgr,
3836 up_req_work);
3837 struct drm_dp_pending_up_req *up_req;
3838 bool send_hotplug = false;
3839
3840 mutex_lock(&mgr->probe_lock);
3841 while (true) {
3842 mutex_lock(&mgr->up_req_lock);
3843 up_req = list_first_entry_or_null(&mgr->up_req_list,
3844 struct drm_dp_pending_up_req,
3845 next);
3846 if (up_req)
3847 list_del(&up_req->next);
3848 mutex_unlock(&mgr->up_req_lock);
3849
3850 if (!up_req)
3851 break;
3852
3853 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
3854 kfree(up_req);
3855 }
3856 mutex_unlock(&mgr->probe_lock);
3857
3858 if (send_hotplug)
3859 drm_kms_helper_hotplug_event(mgr->dev);
3860 }
3861
3862 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3863 {
3864 struct drm_dp_pending_up_req *up_req;
3865
3866 if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
3867 goto out;
3868
3869 if (!mgr->up_req_recv.have_eomt)
3870 return 0;
3871
3872 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
3873 if (!up_req) {
3874 DRM_ERROR("Not enough memory to process MST up req\n");
3875 return -ENOMEM;
3876 }
3877 INIT_LIST_HEAD(&up_req->next);
3878
3879 drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
3880
3881 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
3882 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
3883 DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
3884 up_req->msg.req_type);
3885 kfree(up_req);
3886 goto out;
3887 }
3888
3889 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
3890 false);
3891
3892 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3893 const struct drm_dp_connection_status_notify *conn_stat =
3894 &up_req->msg.u.conn_stat;
3895
3896 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
3897 conn_stat->port_number,
3898 conn_stat->legacy_device_plug_status,
3899 conn_stat->displayport_device_plug_status,
3900 conn_stat->message_capability_status,
3901 conn_stat->input_port,
3902 conn_stat->peer_device_type);
3903 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3904 const struct drm_dp_resource_status_notify *res_stat =
3905 &up_req->msg.u.resource_stat;
3906
3907 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
3908 res_stat->port_number,
3909 res_stat->available_pbn);
3910 }
3911
3912 up_req->hdr = mgr->up_req_recv.initial_hdr;
3913 mutex_lock(&mgr->up_req_lock);
3914 list_add_tail(&up_req->next, &mgr->up_req_list);
3915 mutex_unlock(&mgr->up_req_lock);
3916 queue_work(system_long_wq, &mgr->up_req_work);
3917
3918 out:
3919 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3920 return 0;
3921 }
3922
3923 /**
3924 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3925 * @mgr: manager to notify irq for.
3926 * @esi: 4 bytes from SINK_COUNT_ESI
3927 * @handled: whether the hpd interrupt was consumed or not
3928 *
3929 * This should be called from the driver when it detects a short IRQ,
3930 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3931 * topology manager will process the sideband messages received as a result
3932 * of this.
3933 */
3934 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3935 {
3936 int ret = 0;
3937 int sc;
3938 *handled = false;
3939 sc = esi[0] & 0x3f;
3940
3941 if (sc != mgr->sink_count) {
3942 mgr->sink_count = sc;
3943 *handled = true;
3944 }
3945
3946 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3947 ret = drm_dp_mst_handle_down_rep(mgr);
3948 *handled = true;
3949 }
3950
3951 if (esi[1] & DP_UP_REQ_MSG_RDY) {
3952 ret |= drm_dp_mst_handle_up_req(mgr);
3953 *handled = true;
3954 }
3955
3956 drm_dp_mst_kick_tx(mgr);
3957 return ret;
3958 }
3959 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3960
3961 /**
3962 * drm_dp_mst_detect_port() - get connection status for an MST port
3963 * @connector: DRM connector for this port
3964 * @ctx: The acquisition context to use for grabbing locks
3965 * @mgr: manager for this port
3966 * @port: pointer to a port
3967 *
3968 * This returns the current connection state for a port.
3969 */
3970 int
3971 drm_dp_mst_detect_port(struct drm_connector *connector,
3972 struct drm_modeset_acquire_ctx *ctx,
3973 struct drm_dp_mst_topology_mgr *mgr,
3974 struct drm_dp_mst_port *port)
3975 {
3976 int ret;
3977
3978 /* we need to search for the port in the mgr in case it's gone */
3979 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3980 if (!port)
3981 return connector_status_disconnected;
3982
3983 ret = drm_modeset_lock(&mgr->base.lock, ctx);
3984 if (ret)
3985 goto out;
3986
3987 ret = connector_status_disconnected;
3988
3989 if (!port->ddps)
3990 goto out;
3991
3992 switch (port->pdt) {
3993 case DP_PEER_DEVICE_NONE:
3994 case DP_PEER_DEVICE_MST_BRANCHING:
3995 if (!port->mcs)
3996 ret = connector_status_connected;
3997 break;
3998
3999 case DP_PEER_DEVICE_SST_SINK:
4000 ret = connector_status_connected;
4001 /* for logical ports - cache the EDID */
4002 if (port->port_num >= 8 && !port->cached_edid) {
4003 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
4004 }
4005 break;
4006 case DP_PEER_DEVICE_DP_LEGACY_CONV:
4007 if (port->ldps)
4008 ret = connector_status_connected;
4009 break;
4010 }
4011 out:
4012 drm_dp_mst_topology_put_port(port);
4013 return ret;
4014 }
4015 EXPORT_SYMBOL(drm_dp_mst_detect_port);
4016
4017 /**
4018 * drm_dp_mst_get_edid() - get EDID for an MST port
4019 * @connector: toplevel connector to get EDID for
4020 * @mgr: manager for this port
4021 * @port: unverified pointer to a port.
4022 *
4023 * This returns an EDID for the port connected to a connector,
4024 * It validates the pointer still exists so the caller doesn't require a
4025 * reference.
4026 */
4027 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4028 {
4029 struct edid *edid = NULL;
4030
4031 /* we need to search for the port in the mgr in case it's gone */
4032 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4033 if (!port)
4034 return NULL;
4035
4036 if (port->cached_edid)
4037 edid = drm_edid_duplicate(port->cached_edid);
4038 else {
4039 edid = drm_get_edid(connector, &port->aux.ddc);
4040 }
4041 port->has_audio = drm_detect_monitor_audio(edid);
4042 drm_dp_mst_topology_put_port(port);
4043 return edid;
4044 }
4045 EXPORT_SYMBOL(drm_dp_mst_get_edid);
4046
4047 /**
4048 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
4049 * @mgr: manager to use
4050 * @pbn: payload bandwidth to convert into slots.
4051 *
4052 * Calculate the number of VCPI slots that will be required for the given PBN
4053 * value. This function is deprecated, and should not be used in atomic
4054 * drivers.
4055 *
4056 * RETURNS:
4057 * The total slots required for this port, or error.
4058 */
4059 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
4060 int pbn)
4061 {
4062 int num_slots;
4063
4064 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4065
4066 /* max. time slots - one slot for MTP header */
4067 if (num_slots > 63)
4068 return -ENOSPC;
4069 return num_slots;
4070 }
4071 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
4072
4073 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4074 struct drm_dp_vcpi *vcpi, int pbn, int slots)
4075 {
4076 int ret;
4077
4078 /* max. time slots - one slot for MTP header */
4079 if (slots > 63)
4080 return -ENOSPC;
4081
4082 vcpi->pbn = pbn;
4083 vcpi->aligned_pbn = slots * mgr->pbn_div;
4084 vcpi->num_slots = slots;
4085
4086 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4087 if (ret < 0)
4088 return ret;
4089 return 0;
4090 }
4091
4092 /**
4093 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
4094 * @state: global atomic state
4095 * @mgr: MST topology manager for the port
4096 * @port: port to find vcpi slots for
4097 * @pbn: bandwidth required for the mode in PBN
4098 * @pbn_div: divider for DSC mode that takes FEC into account
4099 *
4100 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
4101 * may have had. Any atomic drivers which support MST must call this function
4102 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
4103 * current VCPI allocation for the new state, but only when
4104 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
4105 * to ensure compatibility with userspace applications that still use the
4106 * legacy modesetting UAPI.
4107 *
4108 * Allocations set by this function are not checked against the bandwidth
4109 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4110 *
4111 * Additionally, it is OK to call this function multiple times on the same
4112 * @port as needed. It is not OK however, to call this function and
4113 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
4114 *
4115 * See also:
4116 * drm_dp_atomic_release_vcpi_slots()
4117 * drm_dp_mst_atomic_check()
4118 *
4119 * Returns:
4120 * Total slots in the atomic state assigned for this port, or a negative error
4121 * code if the port no longer exists
4122 */
4123 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4124 struct drm_dp_mst_topology_mgr *mgr,
4125 struct drm_dp_mst_port *port, int pbn,
4126 int pbn_div)
4127 {
4128 struct drm_dp_mst_topology_state *topology_state;
4129 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4130 int prev_slots, prev_bw, req_slots;
4131
4132 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4133 if (IS_ERR(topology_state))
4134 return PTR_ERR(topology_state);
4135
4136 /* Find the current allocation for this port, if any */
4137 list_for_each_entry(pos, &topology_state->vcpis, next) {
4138 if (pos->port == port) {
4139 vcpi = pos;
4140 prev_slots = vcpi->vcpi;
4141 prev_bw = vcpi->pbn;
4142
4143 /*
4144 * This should never happen, unless the driver tries
4145 * releasing and allocating the same VCPI allocation,
4146 * which is an error
4147 */
4148 if (WARN_ON(!prev_slots)) {
4149 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4150 port);
4151 return -EINVAL;
4152 }
4153
4154 break;
4155 }
4156 }
4157 if (!vcpi) {
4158 prev_slots = 0;
4159 prev_bw = 0;
4160 }
4161
4162 if (pbn_div <= 0)
4163 pbn_div = mgr->pbn_div;
4164
4165 req_slots = DIV_ROUND_UP(pbn, pbn_div);
4166
4167 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4168 port->connector->base.id, port->connector->name,
4169 port, prev_slots, req_slots);
4170 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4171 port->connector->base.id, port->connector->name,
4172 port, prev_bw, pbn);
4173
4174 /* Add the new allocation to the state */
4175 if (!vcpi) {
4176 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4177 if (!vcpi)
4178 return -ENOMEM;
4179
4180 drm_dp_mst_get_port_malloc(port);
4181 vcpi->port = port;
4182 list_add(&vcpi->next, &topology_state->vcpis);
4183 }
4184 vcpi->vcpi = req_slots;
4185 vcpi->pbn = pbn;
4186
4187 return req_slots;
4188 }
4189 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4190
4191 /**
4192 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
4193 * @state: global atomic state
4194 * @mgr: MST topology manager for the port
4195 * @port: The port to release the VCPI slots from
4196 *
4197 * Releases any VCPI slots that have been allocated to a port in the atomic
4198 * state. Any atomic drivers which support MST must call this function in
4199 * their &drm_connector_helper_funcs.atomic_check() callback when the
4200 * connector will no longer have VCPI allocated (e.g. because its CRTC was
4201 * removed) when it had VCPI allocated in the previous atomic state.
4202 *
4203 * It is OK to call this even if @port has been removed from the system.
4204 * Additionally, it is OK to call this function multiple times on the same
4205 * @port as needed. It is not OK however, to call this function and
4206 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
4207 * phase.
4208 *
4209 * See also:
4210 * drm_dp_atomic_find_vcpi_slots()
4211 * drm_dp_mst_atomic_check()
4212 *
4213 * Returns:
4214 * 0 if all slots for this port were added back to
4215 * &drm_dp_mst_topology_state.avail_slots or negative error code
4216 */
4217 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4218 struct drm_dp_mst_topology_mgr *mgr,
4219 struct drm_dp_mst_port *port)
4220 {
4221 struct drm_dp_mst_topology_state *topology_state;
4222 struct drm_dp_vcpi_allocation *pos;
4223 bool found = false;
4224
4225 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4226 if (IS_ERR(topology_state))
4227 return PTR_ERR(topology_state);
4228
4229 list_for_each_entry(pos, &topology_state->vcpis, next) {
4230 if (pos->port == port) {
4231 found = true;
4232 break;
4233 }
4234 }
4235 if (WARN_ON(!found)) {
4236 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4237 port, &topology_state->base);
4238 return -EINVAL;
4239 }
4240
4241 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4242 if (pos->vcpi) {
4243 drm_dp_mst_put_port_malloc(port);
4244 pos->vcpi = 0;
4245 pos->pbn = 0;
4246 }
4247
4248 return 0;
4249 }
4250 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4251
4252 /**
4253 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
4254 * @mgr: manager for this port
4255 * @port: port to allocate a virtual channel for.
4256 * @pbn: payload bandwidth number to request
4257 * @slots: returned number of slots for this PBN.
4258 */
4259 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4260 struct drm_dp_mst_port *port, int pbn, int slots)
4261 {
4262 int ret;
4263
4264 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4265 if (!port)
4266 return false;
4267
4268 if (slots < 0)
4269 return false;
4270
4271 if (port->vcpi.vcpi > 0) {
4272 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4273 port->vcpi.vcpi, port->vcpi.pbn, pbn);
4274 if (pbn == port->vcpi.pbn) {
4275 drm_dp_mst_topology_put_port(port);
4276 return true;
4277 }
4278 }
4279
4280 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4281 if (ret) {
4282 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
4283 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4284 goto out;
4285 }
4286 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
4287 pbn, port->vcpi.num_slots);
4288
4289 /* Keep port allocated until its payload has been removed */
4290 drm_dp_mst_get_port_malloc(port);
4291 drm_dp_mst_topology_put_port(port);
4292 return true;
4293 out:
4294 return false;
4295 }
4296 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4297
4298 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4299 {
4300 int slots = 0;
4301 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4302 if (!port)
4303 return slots;
4304
4305 slots = port->vcpi.num_slots;
4306 drm_dp_mst_topology_put_port(port);
4307 return slots;
4308 }
4309 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4310
4311 /**
4312 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
4313 * @mgr: manager for this port
4314 * @port: unverified pointer to a port.
4315 *
4316 * This just resets the number of slots for the ports VCPI for later programming.
4317 */
4318 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4319 {
4320 /*
4321 * A port with VCPI will remain allocated until its VCPI is
4322 * released, no verified ref needed
4323 */
4324
4325 port->vcpi.num_slots = 0;
4326 }
4327 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4328
4329 /**
4330 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
4331 * @mgr: manager for this port
4332 * @port: port to deallocate vcpi for
4333 *
4334 * This can be called unconditionally, regardless of whether
4335 * drm_dp_mst_allocate_vcpi() succeeded or not.
4336 */
4337 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4338 struct drm_dp_mst_port *port)
4339 {
4340 if (!port->vcpi.vcpi)
4341 return;
4342
4343 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4344 port->vcpi.num_slots = 0;
4345 port->vcpi.pbn = 0;
4346 port->vcpi.aligned_pbn = 0;
4347 port->vcpi.vcpi = 0;
4348 drm_dp_mst_put_port_malloc(port);
4349 }
4350 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4351
4352 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4353 int id, struct drm_dp_payload *payload)
4354 {
4355 u8 payload_alloc[3], status;
4356 int ret;
4357 int retries = 0;
4358
4359 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4360 DP_PAYLOAD_TABLE_UPDATED);
4361
4362 payload_alloc[0] = id;
4363 payload_alloc[1] = payload->start_slot;
4364 payload_alloc[2] = payload->num_slots;
4365
4366 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4367 if (ret != 3) {
4368 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
4369 goto fail;
4370 }
4371
4372 retry:
4373 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4374 if (ret < 0) {
4375 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4376 goto fail;
4377 }
4378
4379 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4380 retries++;
4381 if (retries < 20) {
4382 usleep_range(10000, 20000);
4383 goto retry;
4384 }
4385 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
4386 ret = -EINVAL;
4387 goto fail;
4388 }
4389 ret = 0;
4390 fail:
4391 return ret;
4392 }
4393
4394 static int do_get_act_status(struct drm_dp_aux *aux)
4395 {
4396 int ret;
4397 u8 status;
4398
4399 ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4400 if (ret < 0)
4401 return ret;
4402
4403 return status;
4404 }
4405
4406 /**
4407 * drm_dp_check_act_status() - Polls for ACT handled status.
4408 * @mgr: manager to use
4409 *
4410 * Tries waiting for the MST hub to finish updating it's payload table by
4411 * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
4412 * take that long).
4413 *
4414 * Returns:
4415 * 0 if the ACT was handled in time, negative error code on failure.
4416 */
4417 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4418 {
4419 /*
4420 * There doesn't seem to be any recommended retry count or timeout in
4421 * the MST specification. Since some hubs have been observed to take
4422 * over 1 second to update their payload allocations under certain
4423 * conditions, we use a rather large timeout value.
4424 */
4425 const int timeout_ms = 3000;
4426 int ret, status;
4427
4428 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
4429 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
4430 200, timeout_ms * USEC_PER_MSEC);
4431 if (ret < 0 && status >= 0) {
4432 DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
4433 timeout_ms, status);
4434 return -EINVAL;
4435 } else if (status < 0) {
4436 /*
4437 * Failure here isn't unexpected - the hub may have
4438 * just been unplugged
4439 */
4440 DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
4441 status);
4442 return status;
4443 }
4444
4445 return 0;
4446 }
4447 EXPORT_SYMBOL(drm_dp_check_act_status);
4448
4449 /**
4450 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4451 * @clock: dot clock for the mode
4452 * @bpp: bpp for the mode.
4453 * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
4454 *
4455 * This uses the formula in the spec to calculate the PBN value for a mode.
4456 */
4457 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
4458 {
4459 /*
4460 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
4461 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
4462 * common multiplier to render an integer PBN for all link rate/lane
4463 * counts combinations
4464 * calculate
4465 * peak_kbps *= (1006/1000)
4466 * peak_kbps *= (64/54)
4467 * peak_kbps *= 8 convert to bytes
4468 *
4469 * If the bpp is in units of 1/16, further divide by 16. Put this
4470 * factor in the numerator rather than the denominator to avoid
4471 * integer overflow
4472 */
4473
4474 if (dsc)
4475 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
4476 8 * 54 * 1000 * 1000);
4477
4478 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4479 8 * 54 * 1000 * 1000);
4480 }
4481 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4482
4483 /* we want to kick the TX after we've ack the up/down IRQs. */
4484 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4485 {
4486 queue_work(system_long_wq, &mgr->tx_work);
4487 }
4488
4489 static void drm_dp_mst_dump_mstb(struct seq_file *m,
4490 struct drm_dp_mst_branch *mstb)
4491 {
4492 struct drm_dp_mst_port *port;
4493 int tabs = mstb->lct;
4494 char prefix[10];
4495 int i;
4496
4497 for (i = 0; i < tabs; i++)
4498 prefix[i] = '\t';
4499 prefix[i] = '\0';
4500
4501 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
4502 list_for_each_entry(port, &mstb->ports, next) {
4503 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
4504 if (port->mstb)
4505 drm_dp_mst_dump_mstb(m, port->mstb);
4506 }
4507 }
4508
4509 #define DP_PAYLOAD_TABLE_SIZE 64
4510
4511 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4512 char *buf)
4513 {
4514 int i;
4515
4516 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4517 if (drm_dp_dpcd_read(mgr->aux,
4518 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4519 &buf[i], 16) != 16)
4520 return false;
4521 }
4522 return true;
4523 }
4524
4525 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4526 struct drm_dp_mst_port *port, char *name,
4527 int namelen)
4528 {
4529 struct edid *mst_edid;
4530
4531 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4532 drm_edid_get_monitor_name(mst_edid, name, namelen);
4533 }
4534
4535 /**
4536 * drm_dp_mst_dump_topology(): dump topology to seq file.
4537 * @m: seq_file to dump output to
4538 * @mgr: manager to dump current topology for.
4539 *
4540 * helper to dump MST topology to a seq file for debugfs.
4541 */
4542 void drm_dp_mst_dump_topology(struct seq_file *m,
4543 struct drm_dp_mst_topology_mgr *mgr)
4544 {
4545 int i;
4546 struct drm_dp_mst_port *port;
4547
4548 mutex_lock(&mgr->lock);
4549 if (mgr->mst_primary)
4550 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4551
4552 /* dump VCPIs */
4553 mutex_unlock(&mgr->lock);
4554
4555 mutex_lock(&mgr->payload_lock);
4556 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
4557 mgr->max_payloads);
4558
4559 for (i = 0; i < mgr->max_payloads; i++) {
4560 if (mgr->proposed_vcpis[i]) {
4561 char name[14];
4562
4563 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4564 fetch_monitor_name(mgr, port, name, sizeof(name));
4565 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
4566 port->port_num, port->vcpi.vcpi,
4567 port->vcpi.num_slots,
4568 (*name != 0) ? name : "Unknown");
4569 } else
4570 seq_printf(m, "vcpi %d:unused\n", i);
4571 }
4572 for (i = 0; i < mgr->max_payloads; i++) {
4573 seq_printf(m, "payload %d: %d, %d, %d\n",
4574 i,
4575 mgr->payloads[i].payload_state,
4576 mgr->payloads[i].start_slot,
4577 mgr->payloads[i].num_slots);
4578
4579
4580 }
4581 mutex_unlock(&mgr->payload_lock);
4582
4583 mutex_lock(&mgr->lock);
4584 if (mgr->mst_primary) {
4585 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4586 int ret;
4587
4588 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4589 if (ret) {
4590 seq_printf(m, "dpcd read failed\n");
4591 goto out;
4592 }
4593 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4594
4595 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4596 if (ret) {
4597 seq_printf(m, "faux/mst read failed\n");
4598 goto out;
4599 }
4600 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4601
4602 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4603 if (ret) {
4604 seq_printf(m, "mst ctrl read failed\n");
4605 goto out;
4606 }
4607 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4608
4609 /* dump the standard OUI branch header */
4610 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4611 if (ret) {
4612 seq_printf(m, "branch oui read failed\n");
4613 goto out;
4614 }
4615 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4616
4617 for (i = 0x3; i < 0x8 && buf[i]; i++)
4618 seq_printf(m, "%c", buf[i]);
4619 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4620 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4621 if (dump_dp_payload_table(mgr, buf))
4622 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4623 }
4624
4625 out:
4626 mutex_unlock(&mgr->lock);
4627
4628 }
4629 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4630
4631 static void drm_dp_tx_work(struct work_struct *work)
4632 {
4633 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4634
4635 mutex_lock(&mgr->qlock);
4636 if (!list_empty(&mgr->tx_msg_downq))
4637 process_single_down_tx_qlock(mgr);
4638 mutex_unlock(&mgr->qlock);
4639 }
4640
4641 static inline void
4642 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4643 {
4644 if (port->connector) {
4645 drm_connector_unregister(port->connector);
4646 drm_connector_put(port->connector);
4647 }
4648
4649 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4650 drm_dp_mst_put_port_malloc(port);
4651 }
4652
4653 static inline void
4654 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4655 {
4656 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4657 struct drm_dp_mst_port *port, *port_tmp;
4658 struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
4659 bool wake_tx = false;
4660
4661 mutex_lock(&mgr->lock);
4662 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
4663 list_del(&port->next);
4664 drm_dp_mst_topology_put_port(port);
4665 }
4666 mutex_unlock(&mgr->lock);
4667
4668 /* drop any tx slot msg */
4669 mutex_lock(&mstb->mgr->qlock);
4670 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
4671 if (txmsg->dst != mstb)
4672 continue;
4673
4674 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4675 list_del(&txmsg->next);
4676 wake_tx = true;
4677 }
4678 mutex_unlock(&mstb->mgr->qlock);
4679
4680 if (wake_tx)
4681 wake_up_all(&mstb->mgr->tx_waitq);
4682
4683 drm_dp_mst_put_mstb_malloc(mstb);
4684 }
4685
4686 static void drm_dp_delayed_destroy_work(struct work_struct *work)
4687 {
4688 struct drm_dp_mst_topology_mgr *mgr =
4689 container_of(work, struct drm_dp_mst_topology_mgr,
4690 delayed_destroy_work);
4691 bool send_hotplug = false, go_again;
4692
4693 /*
4694 * Not a regular list traverse as we have to drop the destroy
4695 * connector lock before destroying the mstb/port, to avoid AB->BA
4696 * ordering between this lock and the config mutex.
4697 */
4698 do {
4699 go_again = false;
4700
4701 for (;;) {
4702 struct drm_dp_mst_branch *mstb;
4703
4704 mutex_lock(&mgr->delayed_destroy_lock);
4705 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4706 struct drm_dp_mst_branch,
4707 destroy_next);
4708 if (mstb)
4709 list_del(&mstb->destroy_next);
4710 mutex_unlock(&mgr->delayed_destroy_lock);
4711
4712 if (!mstb)
4713 break;
4714
4715 drm_dp_delayed_destroy_mstb(mstb);
4716 go_again = true;
4717 }
4718
4719 for (;;) {
4720 struct drm_dp_mst_port *port;
4721
4722 mutex_lock(&mgr->delayed_destroy_lock);
4723 port = list_first_entry_or_null(&mgr->destroy_port_list,
4724 struct drm_dp_mst_port,
4725 next);
4726 if (port)
4727 list_del(&port->next);
4728 mutex_unlock(&mgr->delayed_destroy_lock);
4729
4730 if (!port)
4731 break;
4732
4733 drm_dp_delayed_destroy_port(port);
4734 send_hotplug = true;
4735 go_again = true;
4736 }
4737 } while (go_again);
4738
4739 if (send_hotplug)
4740 drm_kms_helper_hotplug_event(mgr->dev);
4741 }
4742
4743 static struct drm_private_state *
4744 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4745 {
4746 struct drm_dp_mst_topology_state *state, *old_state =
4747 to_dp_mst_topology_state(obj->state);
4748 struct drm_dp_vcpi_allocation *pos, *vcpi;
4749
4750 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4751 if (!state)
4752 return NULL;
4753
4754 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4755
4756 INIT_LIST_HEAD(&state->vcpis);
4757
4758 list_for_each_entry(pos, &old_state->vcpis, next) {
4759 /* Prune leftover freed VCPI allocations */
4760 if (!pos->vcpi)
4761 continue;
4762
4763 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4764 if (!vcpi)
4765 goto fail;
4766
4767 drm_dp_mst_get_port_malloc(vcpi->port);
4768 list_add(&vcpi->next, &state->vcpis);
4769 }
4770
4771 return &state->base;
4772
4773 fail:
4774 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4775 drm_dp_mst_put_port_malloc(pos->port);
4776 kfree(pos);
4777 }
4778 kfree(state);
4779
4780 return NULL;
4781 }
4782
4783 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
4784 struct drm_private_state *state)
4785 {
4786 struct drm_dp_mst_topology_state *mst_state =
4787 to_dp_mst_topology_state(state);
4788 struct drm_dp_vcpi_allocation *pos, *tmp;
4789
4790 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
4791 /* We only keep references to ports with non-zero VCPIs */
4792 if (pos->vcpi)
4793 drm_dp_mst_put_port_malloc(pos->port);
4794 kfree(pos);
4795 }
4796
4797 kfree(mst_state);
4798 }
4799
4800 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
4801 struct drm_dp_mst_branch *branch)
4802 {
4803 while (port->parent) {
4804 if (port->parent == branch)
4805 return true;
4806
4807 if (port->parent->port_parent)
4808 port = port->parent->port_parent;
4809 else
4810 break;
4811 }
4812 return false;
4813 }
4814
4815 static int
4816 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
4817 struct drm_dp_mst_topology_state *state);
4818
4819 static int
4820 drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
4821 struct drm_dp_mst_topology_state *state)
4822 {
4823 struct drm_dp_vcpi_allocation *vcpi;
4824 struct drm_dp_mst_port *port;
4825 int pbn_used = 0, ret;
4826 bool found = false;
4827
4828 /* Check that we have at least one port in our state that's downstream
4829 * of this branch, otherwise we can skip this branch
4830 */
4831 list_for_each_entry(vcpi, &state->vcpis, next) {
4832 if (!vcpi->pbn ||
4833 !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
4834 continue;
4835
4836 found = true;
4837 break;
4838 }
4839 if (!found)
4840 return 0;
4841
4842 if (mstb->port_parent)
4843 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
4844 mstb->port_parent->parent, mstb->port_parent,
4845 mstb);
4846 else
4847 DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
4848 mstb);
4849
4850 list_for_each_entry(port, &mstb->ports, next) {
4851 ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
4852 if (ret < 0)
4853 return ret;
4854
4855 pbn_used += ret;
4856 }
4857
4858 return pbn_used;
4859 }
4860
4861 static int
4862 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
4863 struct drm_dp_mst_topology_state *state)
4864 {
4865 struct drm_dp_vcpi_allocation *vcpi;
4866 int pbn_used = 0;
4867
4868 if (port->pdt == DP_PEER_DEVICE_NONE)
4869 return 0;
4870
4871 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
4872 bool found = false;
4873
4874 list_for_each_entry(vcpi, &state->vcpis, next) {
4875 if (vcpi->port != port)
4876 continue;
4877 if (!vcpi->pbn)
4878 return 0;
4879
4880 found = true;
4881 break;
4882 }
4883 if (!found)
4884 return 0;
4885
4886 /* This should never happen, as it means we tried to
4887 * set a mode before querying the full_pbn
4888 */
4889 if (WARN_ON(!port->full_pbn))
4890 return -EINVAL;
4891
4892 pbn_used = vcpi->pbn;
4893 } else {
4894 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
4895 state);
4896 if (pbn_used <= 0)
4897 return pbn_used;
4898 }
4899
4900 if (pbn_used > port->full_pbn) {
4901 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
4902 port->parent, port, pbn_used,
4903 port->full_pbn);
4904 return -ENOSPC;
4905 }
4906
4907 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
4908 port->parent, port, pbn_used, port->full_pbn);
4909
4910 return pbn_used;
4911 }
4912
4913 static inline int
4914 drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
4915 struct drm_dp_mst_topology_state *mst_state)
4916 {
4917 struct drm_dp_vcpi_allocation *vcpi;
4918 int avail_slots = 63, payload_count = 0;
4919
4920 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4921 /* Releasing VCPI is always OK-even if the port is gone */
4922 if (!vcpi->vcpi) {
4923 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
4924 vcpi->port);
4925 continue;
4926 }
4927
4928 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
4929 vcpi->port, vcpi->vcpi);
4930
4931 avail_slots -= vcpi->vcpi;
4932 if (avail_slots < 0) {
4933 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
4934 vcpi->port, mst_state,
4935 avail_slots + vcpi->vcpi);
4936 return -ENOSPC;
4937 }
4938
4939 if (++payload_count > mgr->max_payloads) {
4940 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
4941 mgr, mst_state, mgr->max_payloads);
4942 return -EINVAL;
4943 }
4944 }
4945 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
4946 mgr, mst_state, avail_slots,
4947 63 - avail_slots);
4948
4949 return 0;
4950 }
4951
4952 /**
4953 * drm_dp_mst_add_affected_dsc_crtcs
4954 * @state: Pointer to the new struct drm_dp_mst_topology_state
4955 * @mgr: MST topology manager
4956 *
4957 * Whenever there is a change in mst topology
4958 * DSC configuration would have to be recalculated
4959 * therefore we need to trigger modeset on all affected
4960 * CRTCs in that topology
4961 *
4962 * See also:
4963 * drm_dp_mst_atomic_enable_dsc()
4964 */
4965 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
4966 {
4967 struct drm_dp_mst_topology_state *mst_state;
4968 struct drm_dp_vcpi_allocation *pos;
4969 struct drm_connector *connector;
4970 struct drm_connector_state *conn_state;
4971 struct drm_crtc *crtc;
4972 struct drm_crtc_state *crtc_state;
4973
4974 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
4975
4976 if (IS_ERR(mst_state))
4977 return -EINVAL;
4978
4979 list_for_each_entry(pos, &mst_state->vcpis, next) {
4980
4981 connector = pos->port->connector;
4982
4983 if (!connector)
4984 return -EINVAL;
4985
4986 conn_state = drm_atomic_get_connector_state(state, connector);
4987
4988 if (IS_ERR(conn_state))
4989 return PTR_ERR(conn_state);
4990
4991 crtc = conn_state->crtc;
4992
4993 if (WARN_ON(!crtc))
4994 return -EINVAL;
4995
4996 if (!drm_dp_mst_dsc_aux_for_port(pos->port))
4997 continue;
4998
4999 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5000
5001 if (IS_ERR(crtc_state))
5002 return PTR_ERR(crtc_state);
5003
5004 DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5005 mgr, crtc);
5006
5007 crtc_state->mode_changed = true;
5008 }
5009 return 0;
5010 }
5011 EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5012
5013 /**
5014 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
5015 * @state: Pointer to the new drm_atomic_state
5016 * @port: Pointer to the affected MST Port
5017 * @pbn: Newly recalculated bw required for link with DSC enabled
5018 * @pbn_div: Divider to calculate correct number of pbn per slot
5019 * @enable: Boolean flag to enable or disable DSC on the port
5020 *
5021 * This function enables DSC on the given Port
5022 * by recalculating its vcpi from pbn provided
5023 * and sets dsc_enable flag to keep track of which
5024 * ports have DSC enabled
5025 *
5026 */
5027 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5028 struct drm_dp_mst_port *port,
5029 int pbn, int pbn_div,
5030 bool enable)
5031 {
5032 struct drm_dp_mst_topology_state *mst_state;
5033 struct drm_dp_vcpi_allocation *pos;
5034 bool found = false;
5035 int vcpi = 0;
5036
5037 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5038
5039 if (IS_ERR(mst_state))
5040 return PTR_ERR(mst_state);
5041
5042 list_for_each_entry(pos, &mst_state->vcpis, next) {
5043 if (pos->port == port) {
5044 found = true;
5045 break;
5046 }
5047 }
5048
5049 if (!found) {
5050 DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
5051 port, mst_state);
5052 return -EINVAL;
5053 }
5054
5055 if (pos->dsc_enabled == enable) {
5056 DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
5057 port, enable, pos->vcpi);
5058 vcpi = pos->vcpi;
5059 }
5060
5061 if (enable) {
5062 vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
5063 DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
5064 port, vcpi);
5065 if (vcpi < 0)
5066 return -EINVAL;
5067 }
5068
5069 pos->dsc_enabled = enable;
5070
5071 return vcpi;
5072 }
5073 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5074 /**
5075 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5076 * atomic update is valid
5077 * @state: Pointer to the new &struct drm_dp_mst_topology_state
5078 *
5079 * Checks the given topology state for an atomic update to ensure that it's
5080 * valid. This includes checking whether there's enough bandwidth to support
5081 * the new VCPI allocations in the atomic update.
5082 *
5083 * Any atomic drivers supporting DP MST must make sure to call this after
5084 * checking the rest of their state in their
5085 * &drm_mode_config_funcs.atomic_check() callback.
5086 *
5087 * See also:
5088 * drm_dp_atomic_find_vcpi_slots()
5089 * drm_dp_atomic_release_vcpi_slots()
5090 *
5091 * Returns:
5092 *
5093 * 0 if the new state is valid, negative error code otherwise.
5094 */
5095 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5096 {
5097 struct drm_dp_mst_topology_mgr *mgr;
5098 struct drm_dp_mst_topology_state *mst_state;
5099 int i, ret = 0;
5100
5101 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5102 if (!mgr->mst_state)
5103 continue;
5104
5105 ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
5106 if (ret)
5107 break;
5108
5109 mutex_lock(&mgr->lock);
5110 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5111 mst_state);
5112 mutex_unlock(&mgr->lock);
5113 if (ret < 0)
5114 break;
5115 else
5116 ret = 0;
5117 }
5118
5119 return ret;
5120 }
5121 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5122
5123 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
5124 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
5125 .atomic_destroy_state = drm_dp_mst_destroy_state,
5126 };
5127 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
5128
5129 /**
5130 * drm_atomic_get_mst_topology_state: get MST topology state
5131 *
5132 * @state: global atomic state
5133 * @mgr: MST topology manager, also the private object in this case
5134 *
5135 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
5136 * state vtable so that the private object state returned is that of a MST
5137 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
5138 * to care of the locking, so warn if don't hold the connection_mutex.
5139 *
5140 * RETURNS:
5141 *
5142 * The MST topology state or error pointer.
5143 */
5144 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
5145 struct drm_dp_mst_topology_mgr *mgr)
5146 {
5147 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
5148 }
5149 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
5150
5151 /**
5152 * drm_dp_mst_topology_mgr_init - initialise a topology manager
5153 * @mgr: manager struct to initialise
5154 * @dev: device providing this structure - for i2c addition.
5155 * @aux: DP helper aux channel to talk to this device
5156 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
5157 * @max_payloads: maximum number of payloads this GPU can source
5158 * @conn_base_id: the connector object ID the MST device is connected to.
5159 *
5160 * Return 0 for success, or negative error code on failure
5161 */
5162 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5163 struct drm_device *dev, struct drm_dp_aux *aux,
5164 int max_dpcd_transaction_bytes,
5165 int max_payloads, int conn_base_id)
5166 {
5167 struct drm_dp_mst_topology_state *mst_state;
5168
5169 mutex_init(&mgr->lock);
5170 mutex_init(&mgr->qlock);
5171 mutex_init(&mgr->payload_lock);
5172 mutex_init(&mgr->delayed_destroy_lock);
5173 mutex_init(&mgr->up_req_lock);
5174 mutex_init(&mgr->probe_lock);
5175 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5176 mutex_init(&mgr->topology_ref_history_lock);
5177 #endif
5178 INIT_LIST_HEAD(&mgr->tx_msg_downq);
5179 INIT_LIST_HEAD(&mgr->destroy_port_list);
5180 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5181 INIT_LIST_HEAD(&mgr->up_req_list);
5182 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5183 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5184 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5185 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5186 init_waitqueue_head(&mgr->tx_waitq);
5187 mgr->dev = dev;
5188 mgr->aux = aux;
5189 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5190 mgr->max_payloads = max_payloads;
5191 mgr->conn_base_id = conn_base_id;
5192 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
5193 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
5194 return -EINVAL;
5195 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
5196 if (!mgr->payloads)
5197 return -ENOMEM;
5198 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
5199 if (!mgr->proposed_vcpis)
5200 return -ENOMEM;
5201 set_bit(0, &mgr->payload_mask);
5202
5203 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
5204 if (mst_state == NULL)
5205 return -ENOMEM;
5206
5207 mst_state->mgr = mgr;
5208 INIT_LIST_HEAD(&mst_state->vcpis);
5209
5210 drm_atomic_private_obj_init(dev, &mgr->base,
5211 &mst_state->base,
5212 &drm_dp_mst_topology_state_funcs);
5213
5214 return 0;
5215 }
5216 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
5217
5218 /**
5219 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
5220 * @mgr: manager to destroy
5221 */
5222 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5223 {
5224 drm_dp_mst_topology_mgr_set_mst(mgr, false);
5225 flush_work(&mgr->work);
5226 cancel_work_sync(&mgr->delayed_destroy_work);
5227 mutex_lock(&mgr->payload_lock);
5228 kfree(mgr->payloads);
5229 mgr->payloads = NULL;
5230 kfree(mgr->proposed_vcpis);
5231 mgr->proposed_vcpis = NULL;
5232 mutex_unlock(&mgr->payload_lock);
5233 mgr->dev = NULL;
5234 mgr->aux = NULL;
5235 drm_atomic_private_obj_fini(&mgr->base);
5236 mgr->funcs = NULL;
5237
5238 mutex_destroy(&mgr->delayed_destroy_lock);
5239 mutex_destroy(&mgr->payload_lock);
5240 mutex_destroy(&mgr->qlock);
5241 mutex_destroy(&mgr->lock);
5242 mutex_destroy(&mgr->up_req_lock);
5243 mutex_destroy(&mgr->probe_lock);
5244 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5245 mutex_destroy(&mgr->topology_ref_history_lock);
5246 #endif
5247 }
5248 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
5249
5250 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
5251 {
5252 int i;
5253
5254 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5255 return false;
5256
5257 for (i = 0; i < num - 1; i++) {
5258 if (msgs[i].flags & I2C_M_RD ||
5259 msgs[i].len > 0xff)
5260 return false;
5261 }
5262
5263 return msgs[num - 1].flags & I2C_M_RD &&
5264 msgs[num - 1].len <= 0xff;
5265 }
5266
5267 /* I2C device */
5268 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
5269 int num)
5270 {
5271 struct drm_dp_aux *aux = adapter->algo_data;
5272 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
5273 struct drm_dp_mst_branch *mstb;
5274 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5275 unsigned int i;
5276 struct drm_dp_sideband_msg_req_body msg;
5277 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5278 int ret;
5279
5280 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5281 if (!mstb)
5282 return -EREMOTEIO;
5283
5284 if (!remote_i2c_read_ok(msgs, num)) {
5285 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
5286 ret = -EIO;
5287 goto out;
5288 }
5289
5290 memset(&msg, 0, sizeof(msg));
5291 msg.req_type = DP_REMOTE_I2C_READ;
5292 msg.u.i2c_read.num_transactions = num - 1;
5293 msg.u.i2c_read.port_number = port->port_num;
5294 for (i = 0; i < num - 1; i++) {
5295 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
5296 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
5297 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
5298 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
5299 }
5300 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5301 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5302
5303 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5304 if (!txmsg) {
5305 ret = -ENOMEM;
5306 goto out;
5307 }
5308
5309 txmsg->dst = mstb;
5310 drm_dp_encode_sideband_req(&msg, txmsg);
5311
5312 drm_dp_queue_down_tx(mgr, txmsg);
5313
5314 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5315 if (ret > 0) {
5316
5317 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5318 ret = -EREMOTEIO;
5319 goto out;
5320 }
5321 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5322 ret = -EIO;
5323 goto out;
5324 }
5325 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5326 ret = num;
5327 }
5328 out:
5329 kfree(txmsg);
5330 drm_dp_mst_topology_put_mstb(mstb);
5331 return ret;
5332 }
5333
5334 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5335 {
5336 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5337 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5338 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5339 I2C_FUNC_10BIT_ADDR;
5340 }
5341
5342 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5343 .functionality = drm_dp_mst_i2c_functionality,
5344 .master_xfer = drm_dp_mst_i2c_xfer,
5345 };
5346
5347 /**
5348 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5349 * @aux: DisplayPort AUX channel
5350 *
5351 * Returns 0 on success or a negative error code on failure.
5352 */
5353 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
5354 {
5355 aux->ddc.algo = &drm_dp_mst_i2c_algo;
5356 aux->ddc.algo_data = aux;
5357 aux->ddc.retries = 3;
5358
5359 aux->ddc.class = I2C_CLASS_DDC;
5360 aux->ddc.owner = THIS_MODULE;
5361 aux->ddc.dev.parent = aux->dev;
5362 aux->ddc.dev.of_node = aux->dev->of_node;
5363
5364 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
5365 sizeof(aux->ddc.name));
5366
5367 return i2c_add_adapter(&aux->ddc);
5368 }
5369
5370 /**
5371 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5372 * @aux: DisplayPort AUX channel
5373 */
5374 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
5375 {
5376 i2c_del_adapter(&aux->ddc);
5377 }
5378
5379 /**
5380 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5381 * @port: The port to check
5382 *
5383 * A single physical MST hub object can be represented in the topology
5384 * by multiple branches, with virtual ports between those branches.
5385 *
5386 * As of DP1.4, An MST hub with internal (virtual) ports must expose
5387 * certain DPCD registers over those ports. See sections 2.6.1.1.1
5388 * and 2.6.1.1.2 of Display Port specification v1.4 for details.
5389 *
5390 * May acquire mgr->lock
5391 *
5392 * Returns:
5393 * true if the port is a virtual DP peer device, false otherwise
5394 */
5395 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5396 {
5397 struct drm_dp_mst_port *downstream_port;
5398
5399 if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5400 return false;
5401
5402 /* Virtual DP Sink (Internal Display Panel) */
5403 if (port->port_num >= 8)
5404 return true;
5405
5406 /* DP-to-HDMI Protocol Converter */
5407 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5408 !port->mcs &&
5409 port->ldps)
5410 return true;
5411
5412 /* DP-to-DP */
5413 mutex_lock(&port->mgr->lock);
5414 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5415 port->mstb &&
5416 port->mstb->num_ports == 2) {
5417 list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5418 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
5419 !downstream_port->input) {
5420 mutex_unlock(&port->mgr->lock);
5421 return true;
5422 }
5423 }
5424 }
5425 mutex_unlock(&port->mgr->lock);
5426
5427 return false;
5428 }
5429
5430 /**
5431 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
5432 * @port: The port to check. A leaf of the MST tree with an attached display.
5433 *
5434 * Depending on the situation, DSC may be enabled via the endpoint aux,
5435 * the immediately upstream aux, or the connector's physical aux.
5436 *
5437 * This is both the correct aux to read DSC_CAPABILITY and the
5438 * correct aux to write DSC_ENABLED.
5439 *
5440 * This operation can be expensive (up to four aux reads), so
5441 * the caller should cache the return.
5442 *
5443 * Returns:
5444 * NULL if DSC cannot be enabled on this port, otherwise the aux device
5445 */
5446 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5447 {
5448 struct drm_dp_mst_port *immediate_upstream_port;
5449 struct drm_dp_mst_port *fec_port;
5450 struct drm_dp_desc desc = { };
5451 u8 endpoint_fec;
5452 u8 endpoint_dsc;
5453
5454 if (!port)
5455 return NULL;
5456
5457 if (port->parent->port_parent)
5458 immediate_upstream_port = port->parent->port_parent;
5459 else
5460 immediate_upstream_port = NULL;
5461
5462 fec_port = immediate_upstream_port;
5463 while (fec_port) {
5464 /*
5465 * Each physical link (i.e. not a virtual port) between the
5466 * output and the primary device must support FEC
5467 */
5468 if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5469 !fec_port->fec_capable)
5470 return NULL;
5471
5472 fec_port = fec_port->parent->port_parent;
5473 }
5474
5475 /* DP-to-DP peer device */
5476 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5477 u8 upstream_dsc;
5478
5479 if (drm_dp_dpcd_read(&port->aux,
5480 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5481 return NULL;
5482 if (drm_dp_dpcd_read(&port->aux,
5483 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5484 return NULL;
5485 if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5486 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
5487 return NULL;
5488
5489 /* Enpoint decompression with DP-to-DP peer device */
5490 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5491 (endpoint_fec & DP_FEC_CAPABLE) &&
5492 (upstream_dsc & 0x2) /* DSC passthrough */)
5493 return &port->aux;
5494
5495 /* Virtual DPCD decompression with DP-to-DP peer device */
5496 return &immediate_upstream_port->aux;
5497 }
5498
5499 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
5500 if (drm_dp_mst_is_virtual_dpcd(port))
5501 return &port->aux;
5502
5503 /*
5504 * Synaptics quirk
5505 * Applies to ports for which:
5506 * - Physical aux has Synaptics OUI
5507 * - DPv1.4 or higher
5508 * - Port is on primary branch device
5509 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
5510 */
5511 if (drm_dp_read_desc(port->mgr->aux, &desc, true))
5512 return NULL;
5513
5514 if (drm_dp_has_quirk(&desc, 0,
5515 DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5516 port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
5517 port->parent == port->mgr->mst_primary) {
5518 u8 downstreamport;
5519
5520 if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
5521 &downstreamport, 1) < 0)
5522 return NULL;
5523
5524 if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
5525 ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
5526 != DP_DWN_STRM_PORT_TYPE_ANALOG))
5527 return port->mgr->aux;
5528 }
5529
5530 /*
5531 * The check below verifies if the MST sink
5532 * connected to the GPU is capable of DSC -
5533 * therefore the endpoint needs to be
5534 * both DSC and FEC capable.
5535 */
5536 if (drm_dp_dpcd_read(&port->aux,
5537 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5538 return NULL;
5539 if (drm_dp_dpcd_read(&port->aux,
5540 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5541 return NULL;
5542 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5543 (endpoint_fec & DP_FEC_CAPABLE))
5544 return &port->aux;
5545
5546 return NULL;
5547 }
5548 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);