2 * Copyright 2011 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
31 #include <linux/dma-mapping.h>
32 #include <linux/hdmi.h>
33 #include <linux/component.h>
34 #include <linux/iopoll.h>
36 #include <drm/display/drm_dp_helper.h>
37 #include <drm/display/drm_scdc_helper.h>
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fb_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_vblank.h>
45 #include <nvif/push507c.h>
47 #include <nvif/class.h>
48 #include <nvif/cl0002.h>
49 #include <nvif/event.h>
50 #include <nvif/if0012.h>
51 #include <nvif/if0014.h>
52 #include <nvif/timer.h>
54 #include <nvhw/class/cl507c.h>
55 #include <nvhw/class/cl507d.h>
56 #include <nvhw/class/cl837d.h>
57 #include <nvhw/class/cl887d.h>
58 #include <nvhw/class/cl907d.h>
59 #include <nvhw/class/cl917d.h>
61 #include "nouveau_drv.h"
62 #include "nouveau_dma.h"
63 #include "nouveau_gem.h"
64 #include "nouveau_connector.h"
65 #include "nouveau_encoder.h"
66 #include "nouveau_fence.h"
67 #include "nv50_display.h"
69 #include <subdev/bios/dp.h>
71 /******************************************************************************
73 *****************************************************************************/
76 nv50_chan_create(struct nvif_device
*device
, struct nvif_object
*disp
,
77 const s32
*oclass
, u8 head
, void *data
, u32 size
,
78 struct nv50_chan
*chan
)
80 struct nvif_sclass
*sclass
;
83 chan
->device
= device
;
85 ret
= n
= nvif_object_sclass_get(disp
, &sclass
);
90 for (i
= 0; i
< n
; i
++) {
91 if (sclass
[i
].oclass
== oclass
[0]) {
92 ret
= nvif_object_ctor(disp
, "kmsChan", 0,
93 oclass
[0], data
, size
,
96 nvif_object_map(&chan
->user
, NULL
, 0);
97 nvif_object_sclass_put(&sclass
);
104 nvif_object_sclass_put(&sclass
);
109 nv50_chan_destroy(struct nv50_chan
*chan
)
111 nvif_object_dtor(&chan
->user
);
114 /******************************************************************************
116 *****************************************************************************/
119 nv50_dmac_destroy(struct nv50_dmac
*dmac
)
121 nvif_object_dtor(&dmac
->vram
);
122 nvif_object_dtor(&dmac
->sync
);
124 nv50_chan_destroy(&dmac
->base
);
126 nvif_mem_dtor(&dmac
->_push
.mem
);
130 nv50_dmac_kick(struct nvif_push
*push
)
132 struct nv50_dmac
*dmac
= container_of(push
, typeof(*dmac
), _push
);
134 dmac
->cur
= push
->cur
- (u32 __iomem
*)dmac
->_push
.mem
.object
.map
.ptr
;
135 if (dmac
->put
!= dmac
->cur
) {
136 /* Push buffer fetches are not coherent with BAR1, we need to ensure
137 * writes have been flushed right through to VRAM before writing PUT.
139 if (dmac
->push
->mem
.type
& NVIF_MEM_VRAM
) {
140 struct nvif_device
*device
= dmac
->base
.device
;
141 nvif_wr32(&device
->object
, 0x070000, 0x00000001);
142 nvif_msec(device
, 2000,
143 if (!(nvif_rd32(&device
->object
, 0x070000) & 0x00000002))
148 NVIF_WV32(&dmac
->base
.user
, NV507C
, PUT
, PTR
, dmac
->cur
);
149 dmac
->put
= dmac
->cur
;
152 push
->bgn
= push
->cur
;
156 nv50_dmac_free(struct nv50_dmac
*dmac
)
158 u32 get
= NVIF_RV32(&dmac
->base
.user
, NV507C
, GET
, PTR
);
159 if (get
> dmac
->cur
) /* NVIDIA stay 5 away from GET, do the same. */
160 return get
- dmac
->cur
- 5;
161 return dmac
->max
- dmac
->cur
;
165 nv50_dmac_wind(struct nv50_dmac
*dmac
)
167 /* Wait for GET to depart from the beginning of the push buffer to
168 * prevent writing PUT == GET, which would be ignored by HW.
170 u32 get
= NVIF_RV32(&dmac
->base
.user
, NV507C
, GET
, PTR
);
172 /* Corner-case, HW idle, but non-committed work pending. */
174 nv50_dmac_kick(dmac
->push
);
176 if (nvif_msec(dmac
->base
.device
, 2000,
177 if (NVIF_TV32(&dmac
->base
.user
, NV507C
, GET
, PTR
, >, 0))
183 PUSH_RSVD(dmac
->push
, PUSH_JUMP(dmac
->push
, 0));
189 nv50_dmac_wait(struct nvif_push
*push
, u32 size
)
191 struct nv50_dmac
*dmac
= container_of(push
, typeof(*dmac
), _push
);
194 if (WARN_ON(size
> dmac
->max
))
197 dmac
->cur
= push
->cur
- (u32 __iomem
*)dmac
->_push
.mem
.object
.map
.ptr
;
198 if (dmac
->cur
+ size
>= dmac
->max
) {
199 int ret
= nv50_dmac_wind(dmac
);
203 push
->cur
= dmac
->_push
.mem
.object
.map
.ptr
;
204 push
->cur
= push
->cur
+ dmac
->cur
;
205 nv50_dmac_kick(push
);
208 if (nvif_msec(dmac
->base
.device
, 2000,
209 if ((free
= nv50_dmac_free(dmac
)) >= size
)
216 push
->bgn
= dmac
->_push
.mem
.object
.map
.ptr
;
217 push
->bgn
= push
->bgn
+ dmac
->cur
;
218 push
->cur
= push
->bgn
;
219 push
->end
= push
->cur
+ free
;
223 MODULE_PARM_DESC(kms_vram_pushbuf
, "Place EVO/NVD push buffers in VRAM (default: auto)");
224 static int nv50_dmac_vram_pushbuf
= -1;
225 module_param_named(kms_vram_pushbuf
, nv50_dmac_vram_pushbuf
, int, 0400);
228 nv50_dmac_create(struct nvif_device
*device
, struct nvif_object
*disp
,
229 const s32
*oclass
, u8 head
, void *data
, u32 size
, s64 syncbuf
,
230 struct nv50_dmac
*dmac
)
232 struct nouveau_cli
*cli
= (void *)device
->object
.client
;
233 struct nvif_disp_chan_v0
*args
= data
;
234 u8 type
= NVIF_MEM_COHERENT
;
237 mutex_init(&dmac
->lock
);
239 /* Pascal added support for 47-bit physical addresses, but some
240 * parts of EVO still only accept 40-bit PAs.
242 * To avoid issues on systems with large amounts of RAM, and on
243 * systems where an IOMMU maps pages at a high address, we need
244 * to allocate push buffers in VRAM instead.
246 * This appears to match NVIDIA's behaviour on Pascal.
248 if ((nv50_dmac_vram_pushbuf
> 0) ||
249 (nv50_dmac_vram_pushbuf
< 0 && device
->info
.family
== NV_DEVICE_INFO_V0_PASCAL
))
250 type
|= NVIF_MEM_VRAM
;
252 ret
= nvif_mem_ctor_map(&cli
->mmu
, "kmsChanPush", type
, 0x1000,
257 dmac
->ptr
= dmac
->_push
.mem
.object
.map
.ptr
;
258 dmac
->_push
.wait
= nv50_dmac_wait
;
259 dmac
->_push
.kick
= nv50_dmac_kick
;
260 dmac
->push
= &dmac
->_push
;
261 dmac
->push
->bgn
= dmac
->_push
.mem
.object
.map
.ptr
;
262 dmac
->push
->cur
= dmac
->push
->bgn
;
263 dmac
->push
->end
= dmac
->push
->bgn
;
264 dmac
->max
= 0x1000/4 - 1;
266 /* EVO channels are affected by a HW bug where the last 12 DWORDs
267 * of the push buffer aren't able to be used safely.
269 if (disp
->oclass
< GV100_DISP
)
272 args
->pushbuf
= nvif_handle(&dmac
->_push
.mem
.object
);
274 ret
= nv50_chan_create(device
, disp
, oclass
, head
, data
, size
,
282 ret
= nvif_object_ctor(&dmac
->base
.user
, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF
,
284 &(struct nv_dma_v0
) {
285 .target
= NV_DMA_V0_TARGET_VRAM
,
286 .access
= NV_DMA_V0_ACCESS_RDWR
,
287 .start
= syncbuf
+ 0x0000,
288 .limit
= syncbuf
+ 0x0fff,
289 }, sizeof(struct nv_dma_v0
),
294 ret
= nvif_object_ctor(&dmac
->base
.user
, "kmsVramCtxDma", NV50_DISP_HANDLE_VRAM
,
296 &(struct nv_dma_v0
) {
297 .target
= NV_DMA_V0_TARGET_VRAM
,
298 .access
= NV_DMA_V0_ACCESS_RDWR
,
300 .limit
= device
->info
.ram_user
- 1,
301 }, sizeof(struct nv_dma_v0
),
309 /******************************************************************************
310 * Output path helpers
311 *****************************************************************************/
313 nv50_outp_dump_caps(struct nouveau_drm
*drm
,
314 struct nouveau_encoder
*outp
)
316 NV_DEBUG(drm
, "%s caps: dp_interlace=%d\n",
317 outp
->base
.base
.name
, outp
->caps
.dp_interlace
);
321 nv50_outp_atomic_check_view(struct drm_encoder
*encoder
,
322 struct drm_crtc_state
*crtc_state
,
323 struct drm_connector_state
*conn_state
,
324 struct drm_display_mode
*native_mode
)
326 struct drm_display_mode
*adjusted_mode
= &crtc_state
->adjusted_mode
;
327 struct drm_display_mode
*mode
= &crtc_state
->mode
;
328 struct drm_connector
*connector
= conn_state
->connector
;
329 struct nouveau_conn_atom
*asyc
= nouveau_conn_atom(conn_state
);
330 struct nouveau_drm
*drm
= nouveau_drm(encoder
->dev
);
332 NV_ATOMIC(drm
, "%s atomic_check\n", encoder
->name
);
333 asyc
->scaler
.full
= false;
337 if (asyc
->scaler
.mode
== DRM_MODE_SCALE_NONE
) {
338 switch (connector
->connector_type
) {
339 case DRM_MODE_CONNECTOR_LVDS
:
340 case DRM_MODE_CONNECTOR_eDP
:
341 /* Don't force scaler for EDID modes with
342 * same size as the native one (e.g. different
345 if (mode
->hdisplay
== native_mode
->hdisplay
&&
346 mode
->vdisplay
== native_mode
->vdisplay
&&
347 mode
->type
& DRM_MODE_TYPE_DRIVER
)
350 asyc
->scaler
.full
= true;
359 if (!drm_mode_equal(adjusted_mode
, mode
)) {
360 drm_mode_copy(adjusted_mode
, mode
);
361 crtc_state
->mode_changed
= true;
368 nv50_outp_atomic_fix_depth(struct drm_encoder
*encoder
, struct drm_crtc_state
*crtc_state
)
370 struct nv50_head_atom
*asyh
= nv50_head_atom(crtc_state
);
371 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
372 struct drm_display_mode
*mode
= &asyh
->state
.adjusted_mode
;
373 unsigned int max_rate
, mode_rate
;
375 switch (nv_encoder
->dcb
->type
) {
377 max_rate
= nv_encoder
->dp
.link_nr
* nv_encoder
->dp
.link_bw
;
379 /* we don't support more than 10 anyway */
380 asyh
->or.bpc
= min_t(u8
, asyh
->or.bpc
, 10);
382 /* reduce the bpc until it works out */
383 while (asyh
->or.bpc
> 6) {
384 mode_rate
= DIV_ROUND_UP(mode
->clock
* asyh
->or.bpc
* 3, 8);
385 if (mode_rate
<= max_rate
)
397 nv50_outp_atomic_check(struct drm_encoder
*encoder
,
398 struct drm_crtc_state
*crtc_state
,
399 struct drm_connector_state
*conn_state
)
401 struct drm_connector
*connector
= conn_state
->connector
;
402 struct nouveau_connector
*nv_connector
= nouveau_connector(connector
);
403 struct nv50_head_atom
*asyh
= nv50_head_atom(crtc_state
);
406 ret
= nv50_outp_atomic_check_view(encoder
, crtc_state
, conn_state
,
407 nv_connector
->native_mode
);
411 if (crtc_state
->mode_changed
|| crtc_state
->connectors_changed
)
412 asyh
->or.bpc
= connector
->display_info
.bpc
;
414 /* We might have to reduce the bpc */
415 nv50_outp_atomic_fix_depth(encoder
, crtc_state
);
420 struct nouveau_connector
*
421 nv50_outp_get_new_connector(struct drm_atomic_state
*state
, struct nouveau_encoder
*outp
)
423 struct drm_connector
*connector
;
424 struct drm_connector_state
*connector_state
;
425 struct drm_encoder
*encoder
= to_drm_encoder(outp
);
428 for_each_new_connector_in_state(state
, connector
, connector_state
, i
) {
429 if (connector_state
->best_encoder
== encoder
)
430 return nouveau_connector(connector
);
436 struct nouveau_connector
*
437 nv50_outp_get_old_connector(struct drm_atomic_state
*state
, struct nouveau_encoder
*outp
)
439 struct drm_connector
*connector
;
440 struct drm_connector_state
*connector_state
;
441 struct drm_encoder
*encoder
= to_drm_encoder(outp
);
444 for_each_old_connector_in_state(state
, connector
, connector_state
, i
) {
445 if (connector_state
->best_encoder
== encoder
)
446 return nouveau_connector(connector
);
452 static struct nouveau_crtc
*
453 nv50_outp_get_new_crtc(const struct drm_atomic_state
*state
, const struct nouveau_encoder
*outp
)
455 struct drm_crtc
*crtc
;
456 struct drm_crtc_state
*crtc_state
;
457 const u32 mask
= drm_encoder_mask(&outp
->base
.base
);
460 for_each_new_crtc_in_state(state
, crtc
, crtc_state
, i
) {
461 if (crtc_state
->encoder_mask
& mask
)
462 return nouveau_crtc(crtc
);
468 /******************************************************************************
470 *****************************************************************************/
472 nv50_dac_atomic_disable(struct drm_encoder
*encoder
, struct drm_atomic_state
*state
)
474 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
475 struct nv50_core
*core
= nv50_disp(encoder
->dev
)->core
;
476 const u32 ctrl
= NVDEF(NV507D
, DAC_SET_CONTROL
, OWNER
, NONE
);
478 core
->func
->dac
->ctrl(core
, nv_encoder
->outp
.or.id
, ctrl
, NULL
);
479 nv_encoder
->crtc
= NULL
;
480 nvif_outp_release(&nv_encoder
->outp
);
484 nv50_dac_atomic_enable(struct drm_encoder
*encoder
, struct drm_atomic_state
*state
)
486 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
487 struct nouveau_crtc
*nv_crtc
= nv50_outp_get_new_crtc(state
, nv_encoder
);
488 struct nv50_head_atom
*asyh
=
489 nv50_head_atom(drm_atomic_get_new_crtc_state(state
, &nv_crtc
->base
));
490 struct nv50_core
*core
= nv50_disp(encoder
->dev
)->core
;
493 switch (nv_crtc
->index
) {
494 case 0: ctrl
|= NVDEF(NV507D
, DAC_SET_CONTROL
, OWNER
, HEAD0
); break;
495 case 1: ctrl
|= NVDEF(NV507D
, DAC_SET_CONTROL
, OWNER
, HEAD1
); break;
496 case 2: ctrl
|= NVDEF(NV907D
, DAC_SET_CONTROL
, OWNER_MASK
, HEAD2
); break;
497 case 3: ctrl
|= NVDEF(NV907D
, DAC_SET_CONTROL
, OWNER_MASK
, HEAD3
); break;
503 ctrl
|= NVDEF(NV507D
, DAC_SET_CONTROL
, PROTOCOL
, RGB_CRT
);
505 nvif_outp_acquire_rgb_crt(&nv_encoder
->outp
);
507 core
->func
->dac
->ctrl(core
, nv_encoder
->outp
.or.id
, ctrl
, asyh
);
510 nv_encoder
->crtc
= &nv_crtc
->base
;
513 static enum drm_connector_status
514 nv50_dac_detect(struct drm_encoder
*encoder
, struct drm_connector
*connector
)
516 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
520 loadval
= nouveau_drm(encoder
->dev
)->vbios
.dactestval
;
524 ret
= nvif_outp_load_detect(&nv_encoder
->outp
, loadval
);
526 return connector_status_disconnected
;
528 return connector_status_connected
;
531 static const struct drm_encoder_helper_funcs
533 .atomic_check
= nv50_outp_atomic_check
,
534 .atomic_enable
= nv50_dac_atomic_enable
,
535 .atomic_disable
= nv50_dac_atomic_disable
,
536 .detect
= nv50_dac_detect
540 nv50_dac_destroy(struct drm_encoder
*encoder
)
542 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
544 nvif_outp_dtor(&nv_encoder
->outp
);
546 drm_encoder_cleanup(encoder
);
550 static const struct drm_encoder_funcs
552 .destroy
= nv50_dac_destroy
,
556 nv50_dac_create(struct drm_connector
*connector
, struct dcb_output
*dcbe
)
558 struct nouveau_drm
*drm
= nouveau_drm(connector
->dev
);
559 struct nv50_disp
*disp
= nv50_disp(connector
->dev
);
560 struct nvkm_i2c
*i2c
= nvxx_i2c(&drm
->client
.device
);
561 struct nvkm_i2c_bus
*bus
;
562 struct nouveau_encoder
*nv_encoder
;
563 struct drm_encoder
*encoder
;
564 int type
= DRM_MODE_ENCODER_DAC
;
566 nv_encoder
= kzalloc(sizeof(*nv_encoder
), GFP_KERNEL
);
569 nv_encoder
->dcb
= dcbe
;
571 bus
= nvkm_i2c_bus_find(i2c
, dcbe
->i2c_index
);
573 nv_encoder
->i2c
= &bus
->i2c
;
575 encoder
= to_drm_encoder(nv_encoder
);
576 encoder
->possible_crtcs
= dcbe
->heads
;
577 encoder
->possible_clones
= 0;
578 drm_encoder_init(connector
->dev
, encoder
, &nv50_dac_func
, type
,
579 "dac-%04x-%04x", dcbe
->hasht
, dcbe
->hashm
);
580 drm_encoder_helper_add(encoder
, &nv50_dac_help
);
582 drm_connector_attach_encoder(connector
, encoder
);
583 return nvif_outp_ctor(disp
->disp
, nv_encoder
->base
.base
.name
, dcbe
->id
, &nv_encoder
->outp
);
587 * audio component binding for ELD notification
590 nv50_audio_component_eld_notify(struct drm_audio_component
*acomp
, int port
,
593 if (acomp
&& acomp
->audio_ops
&& acomp
->audio_ops
->pin_eld_notify
)
594 acomp
->audio_ops
->pin_eld_notify(acomp
->audio_ops
->audio_ptr
,
599 nv50_audio_component_get_eld(struct device
*kdev
, int port
, int dev_id
,
600 bool *enabled
, unsigned char *buf
, int max_bytes
)
602 struct drm_device
*drm_dev
= dev_get_drvdata(kdev
);
603 struct nouveau_drm
*drm
= nouveau_drm(drm_dev
);
604 struct drm_encoder
*encoder
;
605 struct nouveau_encoder
*nv_encoder
;
606 struct nouveau_crtc
*nv_crtc
;
611 mutex_lock(&drm
->audio
.lock
);
613 drm_for_each_encoder(encoder
, drm
->dev
) {
614 struct nouveau_connector
*nv_connector
= NULL
;
616 if (encoder
->encoder_type
== DRM_MODE_ENCODER_DPMST
)
619 nv_encoder
= nouveau_encoder(encoder
);
620 nv_connector
= nouveau_connector(nv_encoder
->audio
.connector
);
621 nv_crtc
= nouveau_crtc(nv_encoder
->crtc
);
623 if (!nv_crtc
|| nv_encoder
->outp
.or.id
!= port
|| nv_crtc
->index
!= dev_id
)
626 *enabled
= nv_encoder
->audio
.enabled
;
628 ret
= drm_eld_size(nv_connector
->base
.eld
);
629 memcpy(buf
, nv_connector
->base
.eld
,
630 min(max_bytes
, ret
));
635 mutex_unlock(&drm
->audio
.lock
);
640 static const struct drm_audio_component_ops nv50_audio_component_ops
= {
641 .get_eld
= nv50_audio_component_get_eld
,
645 nv50_audio_component_bind(struct device
*kdev
, struct device
*hda_kdev
,
648 struct drm_device
*drm_dev
= dev_get_drvdata(kdev
);
649 struct nouveau_drm
*drm
= nouveau_drm(drm_dev
);
650 struct drm_audio_component
*acomp
= data
;
652 if (WARN_ON(!device_link_add(hda_kdev
, kdev
, DL_FLAG_STATELESS
)))
655 drm_modeset_lock_all(drm_dev
);
656 acomp
->ops
= &nv50_audio_component_ops
;
658 drm
->audio
.component
= acomp
;
659 drm_modeset_unlock_all(drm_dev
);
664 nv50_audio_component_unbind(struct device
*kdev
, struct device
*hda_kdev
,
667 struct drm_device
*drm_dev
= dev_get_drvdata(kdev
);
668 struct nouveau_drm
*drm
= nouveau_drm(drm_dev
);
669 struct drm_audio_component
*acomp
= data
;
671 drm_modeset_lock_all(drm_dev
);
672 drm
->audio
.component
= NULL
;
675 drm_modeset_unlock_all(drm_dev
);
678 static const struct component_ops nv50_audio_component_bind_ops
= {
679 .bind
= nv50_audio_component_bind
,
680 .unbind
= nv50_audio_component_unbind
,
684 nv50_audio_component_init(struct nouveau_drm
*drm
)
686 if (component_add(drm
->dev
->dev
, &nv50_audio_component_bind_ops
))
689 drm
->audio
.component_registered
= true;
690 mutex_init(&drm
->audio
.lock
);
694 nv50_audio_component_fini(struct nouveau_drm
*drm
)
696 if (!drm
->audio
.component_registered
)
699 component_del(drm
->dev
->dev
, &nv50_audio_component_bind_ops
);
700 drm
->audio
.component_registered
= false;
701 mutex_destroy(&drm
->audio
.lock
);
704 /******************************************************************************
706 *****************************************************************************/
708 nv50_audio_supported(struct drm_encoder
*encoder
)
710 struct nv50_disp
*disp
= nv50_disp(encoder
->dev
);
712 if (disp
->disp
->object
.oclass
<= GT200_DISP
||
713 disp
->disp
->object
.oclass
== GT206_DISP
)
720 nv50_audio_disable(struct drm_encoder
*encoder
, struct nouveau_crtc
*nv_crtc
)
722 struct nouveau_drm
*drm
= nouveau_drm(encoder
->dev
);
723 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
724 struct nvif_outp
*outp
= &nv_encoder
->outp
;
726 if (!nv50_audio_supported(encoder
))
729 mutex_lock(&drm
->audio
.lock
);
730 if (nv_encoder
->audio
.enabled
) {
731 nv_encoder
->audio
.enabled
= false;
732 nv_encoder
->audio
.connector
= NULL
;
733 nvif_outp_hda_eld(&nv_encoder
->outp
, nv_crtc
->index
, NULL
, 0);
735 mutex_unlock(&drm
->audio
.lock
);
737 nv50_audio_component_eld_notify(drm
->audio
.component
, outp
->or.id
, nv_crtc
->index
);
741 nv50_audio_enable(struct drm_encoder
*encoder
, struct nouveau_crtc
*nv_crtc
,
742 struct nouveau_connector
*nv_connector
, struct drm_atomic_state
*state
,
743 struct drm_display_mode
*mode
)
745 struct nouveau_drm
*drm
= nouveau_drm(encoder
->dev
);
746 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
747 struct nvif_outp
*outp
= &nv_encoder
->outp
;
749 if (!nv50_audio_supported(encoder
) || !drm_detect_monitor_audio(nv_connector
->edid
))
752 mutex_lock(&drm
->audio
.lock
);
754 nvif_outp_hda_eld(&nv_encoder
->outp
, nv_crtc
->index
, nv_connector
->base
.eld
,
755 drm_eld_size(nv_connector
->base
.eld
));
756 nv_encoder
->audio
.enabled
= true;
757 nv_encoder
->audio
.connector
= &nv_connector
->base
;
759 mutex_unlock(&drm
->audio
.lock
);
761 nv50_audio_component_eld_notify(drm
->audio
.component
, outp
->or.id
, nv_crtc
->index
);
764 /******************************************************************************
766 *****************************************************************************/
768 nv50_hdmi_enable(struct drm_encoder
*encoder
, struct nouveau_crtc
*nv_crtc
,
769 struct nouveau_connector
*nv_connector
, struct drm_atomic_state
*state
,
770 struct drm_display_mode
*mode
, bool hda
)
772 struct nouveau_drm
*drm
= nouveau_drm(encoder
->dev
);
773 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
774 struct drm_hdmi_info
*hdmi
= &nv_connector
->base
.display_info
.hdmi
;
775 union hdmi_infoframe infoframe
= { 0 };
776 const u8 rekey
= 56; /* binary driver, and tegra, constant */
780 struct nvif_outp_infoframe_v0 infoframe
;
785 max_ac_packet
= mode
->htotal
- mode
->hdisplay
;
786 max_ac_packet
-= rekey
;
787 max_ac_packet
-= 18; /* constant from tegra */
790 if (hdmi
->scdc
.scrambling
.supported
) {
791 const bool high_tmds_clock_ratio
= mode
->clock
> 340000;
793 ret
= drm_scdc_readb(nv_encoder
->i2c
, SCDC_TMDS_CONFIG
, &scdc
);
795 NV_ERROR(drm
, "Failure to read SCDC_TMDS_CONFIG: %d\n", ret
);
799 scdc
&= ~(SCDC_TMDS_BIT_CLOCK_RATIO_BY_40
| SCDC_SCRAMBLING_ENABLE
);
800 if (high_tmds_clock_ratio
|| hdmi
->scdc
.scrambling
.low_rates
)
801 scdc
|= SCDC_SCRAMBLING_ENABLE
;
802 if (high_tmds_clock_ratio
)
803 scdc
|= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40
;
805 ret
= drm_scdc_writeb(nv_encoder
->i2c
, SCDC_TMDS_CONFIG
, scdc
);
807 NV_ERROR(drm
, "Failure to write SCDC_TMDS_CONFIG = 0x%02x: %d\n",
811 ret
= nvif_outp_acquire_tmds(&nv_encoder
->outp
, nv_crtc
->index
, true,
812 max_ac_packet
, rekey
, scdc
, hda
);
817 args
.infoframe
.version
= 0;
818 args
.infoframe
.head
= nv_crtc
->index
;
820 if (!drm_hdmi_avi_infoframe_from_display_mode(&infoframe
.avi
, &nv_connector
->base
, mode
)) {
821 drm_hdmi_avi_infoframe_quant_range(&infoframe
.avi
, &nv_connector
->base
, mode
,
822 HDMI_QUANTIZATION_RANGE_FULL
);
824 size
= hdmi_infoframe_pack(&infoframe
, args
.data
, ARRAY_SIZE(args
.data
));
829 nvif_outp_infoframe(&nv_encoder
->outp
, NVIF_OUTP_INFOFRAME_V0_AVI
, &args
.infoframe
, size
);
831 /* Vendor InfoFrame. */
832 memset(&args
.data
, 0, sizeof(args
.data
));
833 if (!drm_hdmi_vendor_infoframe_from_display_mode(&infoframe
.vendor
.hdmi
,
834 &nv_connector
->base
, mode
))
835 size
= hdmi_infoframe_pack(&infoframe
, args
.data
, ARRAY_SIZE(args
.data
));
839 nvif_outp_infoframe(&nv_encoder
->outp
, NVIF_OUTP_INFOFRAME_V0_VSI
, &args
.infoframe
, size
);
841 nv50_audio_enable(encoder
, nv_crtc
, nv_connector
, state
, mode
);
844 /******************************************************************************
846 *****************************************************************************/
847 #define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
848 #define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
849 #define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
852 struct nv50_mstm
*mstm
;
853 struct drm_dp_mst_port
*port
;
854 struct drm_connector connector
;
856 struct drm_display_mode
*native
;
861 struct drm_encoder encoder
;
863 /* head is statically assigned on msto creation */
864 struct nv50_head
*head
;
865 struct nv50_mstc
*mstc
;
870 struct nouveau_encoder
*nv50_real_outp(struct drm_encoder
*encoder
)
872 struct nv50_msto
*msto
;
874 if (encoder
->encoder_type
!= DRM_MODE_ENCODER_DPMST
)
875 return nouveau_encoder(encoder
);
877 msto
= nv50_msto(encoder
);
880 return msto
->mstc
->mstm
->outp
;
884 nv50_msto_cleanup(struct drm_atomic_state
*state
,
885 struct drm_dp_mst_topology_state
*new_mst_state
,
886 struct drm_dp_mst_topology_mgr
*mgr
,
887 struct nv50_msto
*msto
)
889 struct nouveau_drm
*drm
= nouveau_drm(msto
->encoder
.dev
);
890 struct drm_dp_mst_atomic_payload
*new_payload
=
891 drm_atomic_get_mst_payload_state(new_mst_state
, msto
->mstc
->port
);
892 struct drm_dp_mst_topology_state
*old_mst_state
=
893 drm_atomic_get_old_mst_topology_state(state
, mgr
);
894 const struct drm_dp_mst_atomic_payload
*old_payload
=
895 drm_atomic_get_mst_payload_state(old_mst_state
, msto
->mstc
->port
);
897 NV_ATOMIC(drm
, "%s: msto cleanup\n", msto
->encoder
.name
);
899 if (msto
->disabled
) {
901 msto
->disabled
= false;
902 drm_dp_remove_payload_part2(mgr
, new_mst_state
, old_payload
, new_payload
);
903 } else if (msto
->enabled
) {
904 drm_dp_add_payload_part2(mgr
, state
, new_payload
);
905 msto
->enabled
= false;
910 nv50_msto_prepare(struct drm_atomic_state
*state
,
911 struct drm_dp_mst_topology_state
*mst_state
,
912 struct drm_dp_mst_topology_mgr
*mgr
,
913 struct nv50_msto
*msto
)
915 struct nouveau_drm
*drm
= nouveau_drm(msto
->encoder
.dev
);
916 struct nv50_mstc
*mstc
= msto
->mstc
;
917 struct nv50_mstm
*mstm
= mstc
->mstm
;
918 struct drm_dp_mst_atomic_payload
*payload
;
920 NV_ATOMIC(drm
, "%s: msto prepare\n", msto
->encoder
.name
);
922 payload
= drm_atomic_get_mst_payload_state(mst_state
, mstc
->port
);
924 // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
925 if (msto
->disabled
) {
926 drm_dp_remove_payload_part1(mgr
, mst_state
, payload
);
928 nvif_outp_dp_mst_vcpi(&mstm
->outp
->outp
, msto
->head
->base
.index
, 0, 0, 0, 0);
931 drm_dp_add_payload_part1(mgr
, mst_state
, payload
);
933 nvif_outp_dp_mst_vcpi(&mstm
->outp
->outp
, msto
->head
->base
.index
,
934 payload
->vc_start_slot
, payload
->time_slots
,
935 payload
->pbn
, payload
->time_slots
* mst_state
->pbn_div
);
940 nv50_msto_atomic_check(struct drm_encoder
*encoder
,
941 struct drm_crtc_state
*crtc_state
,
942 struct drm_connector_state
*conn_state
)
944 struct drm_atomic_state
*state
= crtc_state
->state
;
945 struct drm_connector
*connector
= conn_state
->connector
;
946 struct drm_dp_mst_topology_state
*mst_state
;
947 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
948 struct nv50_mstm
*mstm
= mstc
->mstm
;
949 struct nv50_head_atom
*asyh
= nv50_head_atom(crtc_state
);
953 ret
= nv50_outp_atomic_check_view(encoder
, crtc_state
, conn_state
,
958 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
962 * When restoring duplicated states, we need to make sure that the bw
963 * remains the same and avoid recalculating it, as the connector's bpc
964 * may have changed after the state was duplicated
966 if (!state
->duplicated
) {
967 const int clock
= crtc_state
->adjusted_mode
.clock
;
969 asyh
->or.bpc
= connector
->display_info
.bpc
;
970 asyh
->dp
.pbn
= drm_dp_calc_pbn_mode(clock
, asyh
->or.bpc
* 3,
974 mst_state
= drm_atomic_get_mst_topology_state(state
, &mstm
->mgr
);
975 if (IS_ERR(mst_state
))
976 return PTR_ERR(mst_state
);
978 if (!mst_state
->pbn_div
) {
979 struct nouveau_encoder
*outp
= mstc
->mstm
->outp
;
981 mst_state
->pbn_div
= drm_dp_get_vc_payload_bw(&mstm
->mgr
,
982 outp
->dp
.link_bw
, outp
->dp
.link_nr
);
985 slots
= drm_dp_atomic_find_time_slots(state
, &mstm
->mgr
, mstc
->port
, asyh
->dp
.pbn
);
995 nv50_dp_bpc_to_depth(unsigned int bpc
)
998 case 6: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444
;
999 case 8: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444
;
1001 default: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444
;
1006 nv50_msto_atomic_enable(struct drm_encoder
*encoder
, struct drm_atomic_state
*state
)
1008 struct nv50_msto
*msto
= nv50_msto(encoder
);
1009 struct nv50_head
*head
= msto
->head
;
1010 struct nv50_head_atom
*asyh
=
1011 nv50_head_atom(drm_atomic_get_new_crtc_state(state
, &head
->base
.base
));
1012 struct nv50_mstc
*mstc
= NULL
;
1013 struct nv50_mstm
*mstm
= NULL
;
1014 struct drm_connector
*connector
;
1015 struct drm_connector_list_iter conn_iter
;
1018 drm_connector_list_iter_begin(encoder
->dev
, &conn_iter
);
1019 drm_for_each_connector_iter(connector
, &conn_iter
) {
1020 if (connector
->state
->best_encoder
== &msto
->encoder
) {
1021 mstc
= nv50_mstc(connector
);
1026 drm_connector_list_iter_end(&conn_iter
);
1031 if (!mstm
->links
++) {
1032 /*XXX: MST audio. */
1033 nvif_outp_acquire_dp(&mstm
->outp
->outp
, mstm
->outp
->dp
.dpcd
, 0, 0, false, true);
1036 if (mstm
->outp
->outp
.or.link
& 1)
1037 proto
= NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A
;
1039 proto
= NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B
;
1041 mstm
->outp
->update(mstm
->outp
, head
->base
.index
, asyh
, proto
,
1042 nv50_dp_bpc_to_depth(asyh
->or.bpc
));
1045 msto
->enabled
= true;
1046 mstm
->modified
= true;
1050 nv50_msto_atomic_disable(struct drm_encoder
*encoder
, struct drm_atomic_state
*state
)
1052 struct nv50_msto
*msto
= nv50_msto(encoder
);
1053 struct nv50_mstc
*mstc
= msto
->mstc
;
1054 struct nv50_mstm
*mstm
= mstc
->mstm
;
1056 mstm
->outp
->update(mstm
->outp
, msto
->head
->base
.index
, NULL
, 0, 0);
1057 mstm
->modified
= true;
1059 mstm
->disabled
= true;
1060 msto
->disabled
= true;
1063 static const struct drm_encoder_helper_funcs
1065 .atomic_disable
= nv50_msto_atomic_disable
,
1066 .atomic_enable
= nv50_msto_atomic_enable
,
1067 .atomic_check
= nv50_msto_atomic_check
,
1071 nv50_msto_destroy(struct drm_encoder
*encoder
)
1073 struct nv50_msto
*msto
= nv50_msto(encoder
);
1074 drm_encoder_cleanup(&msto
->encoder
);
1078 static const struct drm_encoder_funcs
1080 .destroy
= nv50_msto_destroy
,
1083 static struct nv50_msto
*
1084 nv50_msto_new(struct drm_device
*dev
, struct nv50_head
*head
, int id
)
1086 struct nv50_msto
*msto
;
1089 msto
= kzalloc(sizeof(*msto
), GFP_KERNEL
);
1091 return ERR_PTR(-ENOMEM
);
1093 ret
= drm_encoder_init(dev
, &msto
->encoder
, &nv50_msto
,
1094 DRM_MODE_ENCODER_DPMST
, "mst-%d", id
);
1097 return ERR_PTR(ret
);
1100 drm_encoder_helper_add(&msto
->encoder
, &nv50_msto_help
);
1101 msto
->encoder
.possible_crtcs
= drm_crtc_mask(&head
->base
.base
);
1106 static struct drm_encoder
*
1107 nv50_mstc_atomic_best_encoder(struct drm_connector
*connector
,
1108 struct drm_atomic_state
*state
)
1110 struct drm_connector_state
*connector_state
= drm_atomic_get_new_connector_state(state
,
1112 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
1113 struct drm_crtc
*crtc
= connector_state
->crtc
;
1115 if (!(mstc
->mstm
->outp
->dcb
->heads
& drm_crtc_mask(crtc
)))
1118 return &nv50_head(crtc
)->msto
->encoder
;
1121 static enum drm_mode_status
1122 nv50_mstc_mode_valid(struct drm_connector
*connector
,
1123 struct drm_display_mode
*mode
)
1125 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
1126 struct nouveau_encoder
*outp
= mstc
->mstm
->outp
;
1128 /* TODO: calculate the PBN from the dotclock and validate against the
1129 * MSTB's max possible PBN
1132 return nv50_dp_mode_valid(outp
, mode
, NULL
);
1136 nv50_mstc_get_modes(struct drm_connector
*connector
)
1138 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
1141 mstc
->edid
= drm_dp_mst_get_edid(&mstc
->connector
, mstc
->port
->mgr
, mstc
->port
);
1142 drm_connector_update_edid_property(&mstc
->connector
, mstc
->edid
);
1144 ret
= drm_add_edid_modes(&mstc
->connector
, mstc
->edid
);
1147 * XXX: Since we don't use HDR in userspace quite yet, limit the bpc
1148 * to 8 to save bandwidth on the topology. In the future, we'll want
1149 * to properly fix this by dynamically selecting the highest possible
1150 * bpc that would fit in the topology
1152 if (connector
->display_info
.bpc
)
1153 connector
->display_info
.bpc
=
1154 clamp(connector
->display_info
.bpc
, 6U, 8U);
1156 connector
->display_info
.bpc
= 8;
1159 drm_mode_destroy(mstc
->connector
.dev
, mstc
->native
);
1160 mstc
->native
= nouveau_conn_native_mode(&mstc
->connector
);
1165 nv50_mstc_atomic_check(struct drm_connector
*connector
,
1166 struct drm_atomic_state
*state
)
1168 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
1169 struct drm_dp_mst_topology_mgr
*mgr
= &mstc
->mstm
->mgr
;
1171 return drm_dp_atomic_release_time_slots(state
, mgr
, mstc
->port
);
1175 nv50_mstc_detect(struct drm_connector
*connector
,
1176 struct drm_modeset_acquire_ctx
*ctx
, bool force
)
1178 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
1181 if (drm_connector_is_unregistered(connector
))
1182 return connector_status_disconnected
;
1184 ret
= pm_runtime_get_sync(connector
->dev
->dev
);
1185 if (ret
< 0 && ret
!= -EACCES
) {
1186 pm_runtime_put_autosuspend(connector
->dev
->dev
);
1187 return connector_status_disconnected
;
1190 ret
= drm_dp_mst_detect_port(connector
, ctx
, mstc
->port
->mgr
,
1192 if (ret
!= connector_status_connected
)
1196 pm_runtime_mark_last_busy(connector
->dev
->dev
);
1197 pm_runtime_put_autosuspend(connector
->dev
->dev
);
1201 static const struct drm_connector_helper_funcs
1203 .get_modes
= nv50_mstc_get_modes
,
1204 .mode_valid
= nv50_mstc_mode_valid
,
1205 .atomic_best_encoder
= nv50_mstc_atomic_best_encoder
,
1206 .atomic_check
= nv50_mstc_atomic_check
,
1207 .detect_ctx
= nv50_mstc_detect
,
1211 nv50_mstc_destroy(struct drm_connector
*connector
)
1213 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
1215 drm_connector_cleanup(&mstc
->connector
);
1216 drm_dp_mst_put_port_malloc(mstc
->port
);
1221 static const struct drm_connector_funcs
1223 .reset
= nouveau_conn_reset
,
1224 .fill_modes
= drm_helper_probe_single_connector_modes
,
1225 .destroy
= nv50_mstc_destroy
,
1226 .atomic_duplicate_state
= nouveau_conn_atomic_duplicate_state
,
1227 .atomic_destroy_state
= nouveau_conn_atomic_destroy_state
,
1228 .atomic_set_property
= nouveau_conn_atomic_set_property
,
1229 .atomic_get_property
= nouveau_conn_atomic_get_property
,
1233 nv50_mstc_new(struct nv50_mstm
*mstm
, struct drm_dp_mst_port
*port
,
1234 const char *path
, struct nv50_mstc
**pmstc
)
1236 struct drm_device
*dev
= mstm
->outp
->base
.base
.dev
;
1237 struct drm_crtc
*crtc
;
1238 struct nv50_mstc
*mstc
;
1241 if (!(mstc
= *pmstc
= kzalloc(sizeof(*mstc
), GFP_KERNEL
)))
1246 ret
= drm_connector_init(dev
, &mstc
->connector
, &nv50_mstc
,
1247 DRM_MODE_CONNECTOR_DisplayPort
);
1254 drm_connector_helper_add(&mstc
->connector
, &nv50_mstc_help
);
1256 mstc
->connector
.funcs
->reset(&mstc
->connector
);
1257 nouveau_conn_attach_properties(&mstc
->connector
);
1259 drm_for_each_crtc(crtc
, dev
) {
1260 if (!(mstm
->outp
->dcb
->heads
& drm_crtc_mask(crtc
)))
1263 drm_connector_attach_encoder(&mstc
->connector
,
1264 &nv50_head(crtc
)->msto
->encoder
);
1267 drm_object_attach_property(&mstc
->connector
.base
, dev
->mode_config
.path_property
, 0);
1268 drm_object_attach_property(&mstc
->connector
.base
, dev
->mode_config
.tile_property
, 0);
1269 drm_connector_set_path_property(&mstc
->connector
, path
);
1270 drm_dp_mst_get_port_malloc(port
);
1275 nv50_mstm_cleanup(struct drm_atomic_state
*state
,
1276 struct drm_dp_mst_topology_state
*mst_state
,
1277 struct nv50_mstm
*mstm
)
1279 struct nouveau_drm
*drm
= nouveau_drm(mstm
->outp
->base
.base
.dev
);
1280 struct drm_encoder
*encoder
;
1282 NV_ATOMIC(drm
, "%s: mstm cleanup\n", mstm
->outp
->base
.base
.name
);
1283 drm_dp_check_act_status(&mstm
->mgr
);
1285 drm_for_each_encoder(encoder
, mstm
->outp
->base
.base
.dev
) {
1286 if (encoder
->encoder_type
== DRM_MODE_ENCODER_DPMST
) {
1287 struct nv50_msto
*msto
= nv50_msto(encoder
);
1288 struct nv50_mstc
*mstc
= msto
->mstc
;
1289 if (mstc
&& mstc
->mstm
== mstm
)
1290 nv50_msto_cleanup(state
, mst_state
, &mstm
->mgr
, msto
);
1294 mstm
->modified
= false;
1298 nv50_mstm_prepare(struct drm_atomic_state
*state
,
1299 struct drm_dp_mst_topology_state
*mst_state
,
1300 struct nv50_mstm
*mstm
)
1302 struct nouveau_drm
*drm
= nouveau_drm(mstm
->outp
->base
.base
.dev
);
1303 struct drm_encoder
*encoder
;
1305 NV_ATOMIC(drm
, "%s: mstm prepare\n", mstm
->outp
->base
.base
.name
);
1307 /* Disable payloads first */
1308 drm_for_each_encoder(encoder
, mstm
->outp
->base
.base
.dev
) {
1309 if (encoder
->encoder_type
== DRM_MODE_ENCODER_DPMST
) {
1310 struct nv50_msto
*msto
= nv50_msto(encoder
);
1311 struct nv50_mstc
*mstc
= msto
->mstc
;
1312 if (mstc
&& mstc
->mstm
== mstm
&& msto
->disabled
)
1313 nv50_msto_prepare(state
, mst_state
, &mstm
->mgr
, msto
);
1317 /* Add payloads for new heads, while also updating the start slots of any unmodified (but
1318 * active) heads that may have had their VC slots shifted left after the previous step
1320 drm_for_each_encoder(encoder
, mstm
->outp
->base
.base
.dev
) {
1321 if (encoder
->encoder_type
== DRM_MODE_ENCODER_DPMST
) {
1322 struct nv50_msto
*msto
= nv50_msto(encoder
);
1323 struct nv50_mstc
*mstc
= msto
->mstc
;
1324 if (mstc
&& mstc
->mstm
== mstm
&& !msto
->disabled
)
1325 nv50_msto_prepare(state
, mst_state
, &mstm
->mgr
, msto
);
1329 if (mstm
->disabled
) {
1331 nvif_outp_release(&mstm
->outp
->outp
);
1332 mstm
->disabled
= false;
1336 static struct drm_connector
*
1337 nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr
*mgr
,
1338 struct drm_dp_mst_port
*port
, const char *path
)
1340 struct nv50_mstm
*mstm
= nv50_mstm(mgr
);
1341 struct nv50_mstc
*mstc
;
1344 ret
= nv50_mstc_new(mstm
, port
, path
, &mstc
);
1348 return &mstc
->connector
;
1351 static const struct drm_dp_mst_topology_cbs
1353 .add_connector
= nv50_mstm_add_connector
,
1357 nv50_mstm_service(struct nouveau_drm
*drm
,
1358 struct nouveau_connector
*nv_connector
,
1359 struct nv50_mstm
*mstm
)
1361 struct drm_dp_aux
*aux
= &nv_connector
->aux
;
1362 bool handled
= true, ret
= true;
1369 rc
= drm_dp_dpcd_read(aux
, DP_SINK_COUNT_ESI
, esi
, 8);
1375 drm_dp_mst_hpd_irq_handle_event(&mstm
->mgr
, esi
, ack
, &handled
);
1379 rc
= drm_dp_dpcd_writeb(aux
, DP_SINK_COUNT_ESI
+ 1, ack
[1]);
1386 drm_dp_mst_hpd_irq_send_new_request(&mstm
->mgr
);
1390 NV_DEBUG(drm
, "Failed to handle ESI on %s: %d\n",
1391 nv_connector
->base
.name
, rc
);
1397 nv50_mstm_remove(struct nv50_mstm
*mstm
)
1399 mstm
->is_mst
= false;
1400 drm_dp_mst_topology_mgr_set_mst(&mstm
->mgr
, false);
1404 nv50_mstm_detect(struct nouveau_encoder
*outp
)
1406 struct nv50_mstm
*mstm
= outp
->dp
.mstm
;
1407 struct drm_dp_aux
*aux
;
1410 if (!mstm
|| !mstm
->can_mst
)
1413 aux
= mstm
->mgr
.aux
;
1415 /* Clear any leftover MST state we didn't set ourselves by first
1416 * disabling MST if it was already enabled
1418 ret
= drm_dp_dpcd_writeb(aux
, DP_MSTM_CTRL
, 0);
1422 /* And start enabling */
1423 ret
= drm_dp_mst_topology_mgr_set_mst(&mstm
->mgr
, true);
1427 mstm
->is_mst
= true;
1432 nv50_mstm_fini(struct nouveau_encoder
*outp
)
1434 struct nv50_mstm
*mstm
= outp
->dp
.mstm
;
1439 /* Don't change the MST state of this connector until we've finished
1440 * resuming, since we can't safely grab hpd_irq_lock in our resume
1441 * path to protect mstm->is_mst without potentially deadlocking
1443 mutex_lock(&outp
->dp
.hpd_irq_lock
);
1444 mstm
->suspended
= true;
1445 mutex_unlock(&outp
->dp
.hpd_irq_lock
);
1448 drm_dp_mst_topology_mgr_suspend(&mstm
->mgr
);
1452 nv50_mstm_init(struct nouveau_encoder
*outp
, bool runtime
)
1454 struct nv50_mstm
*mstm
= outp
->dp
.mstm
;
1461 ret
= drm_dp_mst_topology_mgr_resume(&mstm
->mgr
, !runtime
);
1463 nv50_mstm_remove(mstm
);
1466 mutex_lock(&outp
->dp
.hpd_irq_lock
);
1467 mstm
->suspended
= false;
1468 mutex_unlock(&outp
->dp
.hpd_irq_lock
);
1471 drm_kms_helper_hotplug_event(mstm
->mgr
.dev
);
1475 nv50_mstm_del(struct nv50_mstm
**pmstm
)
1477 struct nv50_mstm
*mstm
= *pmstm
;
1479 drm_dp_mst_topology_mgr_destroy(&mstm
->mgr
);
1486 nv50_mstm_new(struct nouveau_encoder
*outp
, struct drm_dp_aux
*aux
, int aux_max
,
1487 int conn_base_id
, struct nv50_mstm
**pmstm
)
1489 const int max_payloads
= hweight8(outp
->dcb
->heads
);
1490 struct drm_device
*dev
= outp
->base
.base
.dev
;
1491 struct nv50_mstm
*mstm
;
1494 if (!(mstm
= *pmstm
= kzalloc(sizeof(*mstm
), GFP_KERNEL
)))
1497 mstm
->mgr
.cbs
= &nv50_mstm
;
1499 ret
= drm_dp_mst_topology_mgr_init(&mstm
->mgr
, dev
, aux
, aux_max
,
1500 max_payloads
, conn_base_id
);
1507 /******************************************************************************
1509 *****************************************************************************/
1511 nv50_sor_update(struct nouveau_encoder
*nv_encoder
, u8 head
,
1512 struct nv50_head_atom
*asyh
, u8 proto
, u8 depth
)
1514 struct nv50_disp
*disp
= nv50_disp(nv_encoder
->base
.base
.dev
);
1515 struct nv50_core
*core
= disp
->core
;
1518 nv_encoder
->ctrl
&= ~BIT(head
);
1519 if (NVDEF_TEST(nv_encoder
->ctrl
, NV507D
, SOR_SET_CONTROL
, OWNER
, ==, NONE
))
1520 nv_encoder
->ctrl
= 0;
1522 nv_encoder
->ctrl
|= NVVAL(NV507D
, SOR_SET_CONTROL
, PROTOCOL
, proto
);
1523 nv_encoder
->ctrl
|= BIT(head
);
1524 asyh
->or.depth
= depth
;
1527 core
->func
->sor
->ctrl(core
, nv_encoder
->outp
.or.id
, nv_encoder
->ctrl
, asyh
);
1530 /* TODO: Should we extend this to PWM-only backlights?
1531 * As well, should we add a DRM helper for waiting for the backlight to acknowledge
1532 * the panel backlight has been shut off? Intel doesn't seem to do this, and uses a
1533 * fixed time delay from the vbios…
1536 nv50_sor_atomic_disable(struct drm_encoder
*encoder
, struct drm_atomic_state
*state
)
1538 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
1539 struct nouveau_crtc
*nv_crtc
= nouveau_crtc(nv_encoder
->crtc
);
1540 struct nouveau_connector
*nv_connector
= nv50_outp_get_old_connector(state
, nv_encoder
);
1541 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1542 struct nouveau_drm
*drm
= nouveau_drm(nv_encoder
->base
.base
.dev
);
1543 struct nouveau_backlight
*backlight
= nv_connector
->backlight
;
1545 struct drm_dp_aux
*aux
= &nv_connector
->aux
;
1549 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1550 if (backlight
&& backlight
->uses_dpcd
) {
1551 ret
= drm_edp_backlight_disable(aux
, &backlight
->edp_info
);
1553 NV_ERROR(drm
, "Failed to disable backlight on [CONNECTOR:%d:%s]: %d\n",
1554 nv_connector
->base
.base
.id
, nv_connector
->base
.name
, ret
);
1558 if (nv_encoder
->dcb
->type
== DCB_OUTPUT_DP
) {
1559 ret
= drm_dp_dpcd_readb(aux
, DP_SET_POWER
, &pwr
);
1562 pwr
&= ~DP_SET_POWER_MASK
;
1563 pwr
|= DP_SET_POWER_D3
;
1564 drm_dp_dpcd_writeb(aux
, DP_SET_POWER
, pwr
);
1568 nv_encoder
->update(nv_encoder
, nv_crtc
->index
, NULL
, 0, 0);
1569 nv50_audio_disable(encoder
, nv_crtc
);
1570 nvif_outp_release(&nv_encoder
->outp
);
1571 nv_encoder
->crtc
= NULL
;
1575 nv50_sor_atomic_enable(struct drm_encoder
*encoder
, struct drm_atomic_state
*state
)
1577 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
1578 struct nouveau_crtc
*nv_crtc
= nv50_outp_get_new_crtc(state
, nv_encoder
);
1579 struct nv50_head_atom
*asyh
=
1580 nv50_head_atom(drm_atomic_get_new_crtc_state(state
, &nv_crtc
->base
));
1581 struct drm_display_mode
*mode
= &asyh
->state
.adjusted_mode
;
1582 struct nv50_disp
*disp
= nv50_disp(encoder
->dev
);
1583 struct nvif_outp
*outp
= &nv_encoder
->outp
;
1584 struct drm_device
*dev
= encoder
->dev
;
1585 struct nouveau_drm
*drm
= nouveau_drm(dev
);
1586 struct nouveau_connector
*nv_connector
;
1587 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1588 struct nouveau_backlight
*backlight
;
1590 struct nvbios
*bios
= &drm
->vbios
;
1591 bool lvds_dual
= false, lvds_8bpc
= false, hda
= false;
1592 u8 proto
= NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM
;
1593 u8 depth
= NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT
;
1595 nv_connector
= nv50_outp_get_new_connector(state
, nv_encoder
);
1596 nv_encoder
->crtc
= &nv_crtc
->base
;
1598 if ((disp
->disp
->object
.oclass
== GT214_DISP
||
1599 disp
->disp
->object
.oclass
>= GF110_DISP
) &&
1600 drm_detect_monitor_audio(nv_connector
->edid
))
1603 switch (nv_encoder
->dcb
->type
) {
1604 case DCB_OUTPUT_TMDS
:
1605 if (disp
->disp
->object
.oclass
== NV50_DISP
||
1606 !drm_detect_hdmi_monitor(nv_connector
->edid
))
1607 nvif_outp_acquire_tmds(outp
, nv_crtc
->index
, false, 0, 0, 0, false);
1609 nv50_hdmi_enable(encoder
, nv_crtc
, nv_connector
, state
, mode
, hda
);
1611 if (nv_encoder
->outp
.or.link
& 1) {
1612 proto
= NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A
;
1613 /* Only enable dual-link if:
1614 * - Need to (i.e. rate > 165MHz)
1616 * - Not an HDMI monitor, since there's no dual-link
1619 if (mode
->clock
>= 165000 &&
1620 nv_encoder
->dcb
->duallink_possible
&&
1621 !drm_detect_hdmi_monitor(nv_connector
->edid
))
1622 proto
= NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS
;
1624 proto
= NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B
;
1627 case DCB_OUTPUT_LVDS
:
1628 proto
= NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM
;
1630 if (bios
->fp_no_ddc
) {
1631 lvds_dual
= bios
->fp
.dual_link
;
1632 lvds_8bpc
= bios
->fp
.if_is_24bit
;
1634 if (nv_connector
->type
== DCB_CONNECTOR_LVDS_SPWG
) {
1635 if (((u8
*)nv_connector
->edid
)[121] == 2)
1638 if (mode
->clock
>= bios
->fp
.duallink_transition_clk
) {
1643 if (bios
->fp
.strapless_is_24bit
& 2)
1646 if (bios
->fp
.strapless_is_24bit
& 1)
1650 if (asyh
->or.bpc
== 8)
1654 nvif_outp_acquire_lvds(&nv_encoder
->outp
, lvds_dual
, lvds_8bpc
);
1657 nvif_outp_acquire_dp(&nv_encoder
->outp
, nv_encoder
->dp
.dpcd
, 0, 0, hda
, false);
1658 depth
= nv50_dp_bpc_to_depth(asyh
->or.bpc
);
1660 if (nv_encoder
->outp
.or.link
& 1)
1661 proto
= NV887D_SOR_SET_CONTROL_PROTOCOL_DP_A
;
1663 proto
= NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B
;
1665 nv50_audio_enable(encoder
, nv_crtc
, nv_connector
, state
, mode
);
1667 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1668 backlight
= nv_connector
->backlight
;
1669 if (backlight
&& backlight
->uses_dpcd
)
1670 drm_edp_backlight_enable(&nv_connector
->aux
, &backlight
->edp_info
,
1671 (u16
)backlight
->dev
->props
.brightness
);
1680 nv_encoder
->update(nv_encoder
, nv_crtc
->index
, asyh
, proto
, depth
);
1683 static const struct drm_encoder_helper_funcs
1685 .atomic_check
= nv50_outp_atomic_check
,
1686 .atomic_enable
= nv50_sor_atomic_enable
,
1687 .atomic_disable
= nv50_sor_atomic_disable
,
1691 nv50_sor_destroy(struct drm_encoder
*encoder
)
1693 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
1695 nvif_outp_dtor(&nv_encoder
->outp
);
1697 nv50_mstm_del(&nv_encoder
->dp
.mstm
);
1698 drm_encoder_cleanup(encoder
);
1700 if (nv_encoder
->dcb
->type
== DCB_OUTPUT_DP
)
1701 mutex_destroy(&nv_encoder
->dp
.hpd_irq_lock
);
1706 static const struct drm_encoder_funcs
1708 .destroy
= nv50_sor_destroy
,
1711 bool nv50_has_mst(struct nouveau_drm
*drm
)
1713 struct nvkm_bios
*bios
= nvxx_bios(&drm
->client
.device
);
1715 u8 ver
, hdr
, cnt
, len
;
1717 data
= nvbios_dp_table(bios
, &ver
, &hdr
, &cnt
, &len
);
1718 return data
&& ver
>= 0x40 && (nvbios_rd08(bios
, data
+ 0x08) & 0x04);
1722 nv50_sor_create(struct drm_connector
*connector
, struct dcb_output
*dcbe
)
1724 struct nouveau_connector
*nv_connector
= nouveau_connector(connector
);
1725 struct nouveau_drm
*drm
= nouveau_drm(connector
->dev
);
1726 struct nvkm_i2c
*i2c
= nvxx_i2c(&drm
->client
.device
);
1727 struct nouveau_encoder
*nv_encoder
;
1728 struct drm_encoder
*encoder
;
1729 struct nv50_disp
*disp
= nv50_disp(connector
->dev
);
1732 switch (dcbe
->type
) {
1733 case DCB_OUTPUT_LVDS
: type
= DRM_MODE_ENCODER_LVDS
; break;
1734 case DCB_OUTPUT_TMDS
:
1737 type
= DRM_MODE_ENCODER_TMDS
;
1741 nv_encoder
= kzalloc(sizeof(*nv_encoder
), GFP_KERNEL
);
1744 nv_encoder
->dcb
= dcbe
;
1745 nv_encoder
->update
= nv50_sor_update
;
1747 encoder
= to_drm_encoder(nv_encoder
);
1748 encoder
->possible_crtcs
= dcbe
->heads
;
1749 encoder
->possible_clones
= 0;
1750 drm_encoder_init(connector
->dev
, encoder
, &nv50_sor_func
, type
,
1751 "sor-%04x-%04x", dcbe
->hasht
, dcbe
->hashm
);
1752 drm_encoder_helper_add(encoder
, &nv50_sor_help
);
1754 drm_connector_attach_encoder(connector
, encoder
);
1756 disp
->core
->func
->sor
->get_caps(disp
, nv_encoder
, ffs(dcbe
->or) - 1);
1757 nv50_outp_dump_caps(drm
, nv_encoder
);
1759 if (dcbe
->type
== DCB_OUTPUT_DP
) {
1760 struct nvkm_i2c_aux
*aux
=
1761 nvkm_i2c_aux_find(i2c
, dcbe
->i2c_index
);
1763 mutex_init(&nv_encoder
->dp
.hpd_irq_lock
);
1766 if (disp
->disp
->object
.oclass
< GF110_DISP
) {
1767 /* HW has no support for address-only
1768 * transactions, so we're required to
1769 * use custom I2C-over-AUX code.
1771 nv_encoder
->i2c
= &aux
->i2c
;
1773 nv_encoder
->i2c
= &nv_connector
->aux
.ddc
;
1775 nv_encoder
->aux
= aux
;
1778 if (nv_connector
->type
!= DCB_CONNECTOR_eDP
&&
1779 nv50_has_mst(drm
)) {
1780 ret
= nv50_mstm_new(nv_encoder
, &nv_connector
->aux
,
1781 16, nv_connector
->base
.base
.id
,
1782 &nv_encoder
->dp
.mstm
);
1787 struct nvkm_i2c_bus
*bus
=
1788 nvkm_i2c_bus_find(i2c
, dcbe
->i2c_index
);
1790 nv_encoder
->i2c
= &bus
->i2c
;
1793 return nvif_outp_ctor(disp
->disp
, nv_encoder
->base
.base
.name
, dcbe
->id
, &nv_encoder
->outp
);
1796 /******************************************************************************
1798 *****************************************************************************/
1800 nv50_pior_atomic_check(struct drm_encoder
*encoder
,
1801 struct drm_crtc_state
*crtc_state
,
1802 struct drm_connector_state
*conn_state
)
1804 int ret
= nv50_outp_atomic_check(encoder
, crtc_state
, conn_state
);
1807 crtc_state
->adjusted_mode
.clock
*= 2;
1812 nv50_pior_atomic_disable(struct drm_encoder
*encoder
, struct drm_atomic_state
*state
)
1814 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
1815 struct nv50_core
*core
= nv50_disp(encoder
->dev
)->core
;
1816 const u32 ctrl
= NVDEF(NV507D
, PIOR_SET_CONTROL
, OWNER
, NONE
);
1818 core
->func
->pior
->ctrl(core
, nv_encoder
->outp
.or.id
, ctrl
, NULL
);
1819 nv_encoder
->crtc
= NULL
;
1820 nvif_outp_release(&nv_encoder
->outp
);
1824 nv50_pior_atomic_enable(struct drm_encoder
*encoder
, struct drm_atomic_state
*state
)
1826 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
1827 struct nouveau_crtc
*nv_crtc
= nv50_outp_get_new_crtc(state
, nv_encoder
);
1828 struct nv50_head_atom
*asyh
=
1829 nv50_head_atom(drm_atomic_get_new_crtc_state(state
, &nv_crtc
->base
));
1830 struct nv50_core
*core
= nv50_disp(encoder
->dev
)->core
;
1833 switch (nv_crtc
->index
) {
1834 case 0: ctrl
|= NVDEF(NV507D
, PIOR_SET_CONTROL
, OWNER
, HEAD0
); break;
1835 case 1: ctrl
|= NVDEF(NV507D
, PIOR_SET_CONTROL
, OWNER
, HEAD1
); break;
1841 switch (asyh
->or.bpc
) {
1842 case 10: asyh
->or.depth
= NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444
; break;
1843 case 8: asyh
->or.depth
= NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444
; break;
1844 case 6: asyh
->or.depth
= NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444
; break;
1845 default: asyh
->or.depth
= NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT
; break;
1848 switch (nv_encoder
->dcb
->type
) {
1849 case DCB_OUTPUT_TMDS
:
1850 ctrl
|= NVDEF(NV507D
, PIOR_SET_CONTROL
, PROTOCOL
, EXT_TMDS_ENC
);
1851 nvif_outp_acquire_tmds(&nv_encoder
->outp
, false, false, 0, 0, 0, false);
1854 ctrl
|= NVDEF(NV507D
, PIOR_SET_CONTROL
, PROTOCOL
, EXT_TMDS_ENC
);
1855 nvif_outp_acquire_dp(&nv_encoder
->outp
, nv_encoder
->dp
.dpcd
, 0, 0, false, false);
1862 core
->func
->pior
->ctrl(core
, nv_encoder
->outp
.or.id
, ctrl
, asyh
);
1863 nv_encoder
->crtc
= &nv_crtc
->base
;
1866 static const struct drm_encoder_helper_funcs
1868 .atomic_check
= nv50_pior_atomic_check
,
1869 .atomic_enable
= nv50_pior_atomic_enable
,
1870 .atomic_disable
= nv50_pior_atomic_disable
,
1874 nv50_pior_destroy(struct drm_encoder
*encoder
)
1876 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
1878 nvif_outp_dtor(&nv_encoder
->outp
);
1880 drm_encoder_cleanup(encoder
);
1882 mutex_destroy(&nv_encoder
->dp
.hpd_irq_lock
);
1886 static const struct drm_encoder_funcs
1888 .destroy
= nv50_pior_destroy
,
1892 nv50_pior_create(struct drm_connector
*connector
, struct dcb_output
*dcbe
)
1894 struct drm_device
*dev
= connector
->dev
;
1895 struct nouveau_drm
*drm
= nouveau_drm(dev
);
1896 struct nv50_disp
*disp
= nv50_disp(dev
);
1897 struct nvkm_i2c
*i2c
= nvxx_i2c(&drm
->client
.device
);
1898 struct nvkm_i2c_bus
*bus
= NULL
;
1899 struct nvkm_i2c_aux
*aux
= NULL
;
1900 struct i2c_adapter
*ddc
;
1901 struct nouveau_encoder
*nv_encoder
;
1902 struct drm_encoder
*encoder
;
1905 switch (dcbe
->type
) {
1906 case DCB_OUTPUT_TMDS
:
1907 bus
= nvkm_i2c_bus_find(i2c
, NVKM_I2C_BUS_EXT(dcbe
->extdev
));
1908 ddc
= bus
? &bus
->i2c
: NULL
;
1909 type
= DRM_MODE_ENCODER_TMDS
;
1912 aux
= nvkm_i2c_aux_find(i2c
, NVKM_I2C_AUX_EXT(dcbe
->extdev
));
1913 ddc
= aux
? &aux
->i2c
: NULL
;
1914 type
= DRM_MODE_ENCODER_TMDS
;
1920 nv_encoder
= kzalloc(sizeof(*nv_encoder
), GFP_KERNEL
);
1923 nv_encoder
->dcb
= dcbe
;
1924 nv_encoder
->i2c
= ddc
;
1925 nv_encoder
->aux
= aux
;
1927 mutex_init(&nv_encoder
->dp
.hpd_irq_lock
);
1929 encoder
= to_drm_encoder(nv_encoder
);
1930 encoder
->possible_crtcs
= dcbe
->heads
;
1931 encoder
->possible_clones
= 0;
1932 drm_encoder_init(connector
->dev
, encoder
, &nv50_pior_func
, type
,
1933 "pior-%04x-%04x", dcbe
->hasht
, dcbe
->hashm
);
1934 drm_encoder_helper_add(encoder
, &nv50_pior_help
);
1936 drm_connector_attach_encoder(connector
, encoder
);
1938 disp
->core
->func
->pior
->get_caps(disp
, nv_encoder
, ffs(dcbe
->or) - 1);
1939 nv50_outp_dump_caps(drm
, nv_encoder
);
1941 return nvif_outp_ctor(disp
->disp
, nv_encoder
->base
.base
.name
, dcbe
->id
, &nv_encoder
->outp
);
1944 /******************************************************************************
1946 *****************************************************************************/
1949 nv50_disp_atomic_commit_core(struct drm_atomic_state
*state
, u32
*interlock
)
1951 struct drm_dp_mst_topology_mgr
*mgr
;
1952 struct drm_dp_mst_topology_state
*mst_state
;
1953 struct nouveau_drm
*drm
= nouveau_drm(state
->dev
);
1954 struct nv50_disp
*disp
= nv50_disp(drm
->dev
);
1955 struct nv50_core
*core
= disp
->core
;
1956 struct nv50_mstm
*mstm
;
1959 NV_ATOMIC(drm
, "commit core %08x\n", interlock
[NV50_DISP_INTERLOCK_BASE
]);
1961 for_each_new_mst_mgr_in_state(state
, mgr
, mst_state
, i
) {
1962 mstm
= nv50_mstm(mgr
);
1964 nv50_mstm_prepare(state
, mst_state
, mstm
);
1967 core
->func
->ntfy_init(disp
->sync
, NV50_DISP_CORE_NTFY
);
1968 core
->func
->update(core
, interlock
, true);
1969 if (core
->func
->ntfy_wait_done(disp
->sync
, NV50_DISP_CORE_NTFY
,
1970 disp
->core
->chan
.base
.device
))
1971 NV_ERROR(drm
, "core notifier timeout\n");
1973 for_each_new_mst_mgr_in_state(state
, mgr
, mst_state
, i
) {
1974 mstm
= nv50_mstm(mgr
);
1976 nv50_mstm_cleanup(state
, mst_state
, mstm
);
1981 nv50_disp_atomic_commit_wndw(struct drm_atomic_state
*state
, u32
*interlock
)
1983 struct drm_plane_state
*new_plane_state
;
1984 struct drm_plane
*plane
;
1987 for_each_new_plane_in_state(state
, plane
, new_plane_state
, i
) {
1988 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
1989 if (interlock
[wndw
->interlock
.type
] & wndw
->interlock
.data
) {
1990 if (wndw
->func
->update
)
1991 wndw
->func
->update(wndw
, interlock
);
1997 nv50_disp_atomic_commit_tail(struct drm_atomic_state
*state
)
1999 struct drm_device
*dev
= state
->dev
;
2000 struct drm_crtc_state
*new_crtc_state
, *old_crtc_state
;
2001 struct drm_crtc
*crtc
;
2002 struct drm_plane_state
*new_plane_state
;
2003 struct drm_plane
*plane
;
2004 struct nouveau_drm
*drm
= nouveau_drm(dev
);
2005 struct nv50_disp
*disp
= nv50_disp(dev
);
2006 struct nv50_atom
*atom
= nv50_atom(state
);
2007 struct nv50_core
*core
= disp
->core
;
2008 struct nv50_outp_atom
*outp
, *outt
;
2009 u32 interlock
[NV50_DISP_INTERLOCK__SIZE
] = {};
2011 bool flushed
= false;
2013 NV_ATOMIC(drm
, "commit %d %d\n", atom
->lock_core
, atom
->flush_disable
);
2014 nv50_crc_atomic_stop_reporting(state
);
2015 drm_atomic_helper_wait_for_fences(dev
, state
, false);
2016 drm_atomic_helper_wait_for_dependencies(state
);
2017 drm_dp_mst_atomic_wait_for_dependencies(state
);
2018 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
2019 drm_atomic_helper_calc_timestamping_constants(state
);
2021 if (atom
->lock_core
)
2022 mutex_lock(&disp
->mutex
);
2024 /* Disable head(s). */
2025 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
2026 struct nv50_head_atom
*asyh
= nv50_head_atom(new_crtc_state
);
2027 struct nv50_head
*head
= nv50_head(crtc
);
2029 NV_ATOMIC(drm
, "%s: clr %04x (set %04x)\n", crtc
->name
,
2030 asyh
->clr
.mask
, asyh
->set
.mask
);
2032 if (old_crtc_state
->active
&& !new_crtc_state
->active
) {
2033 pm_runtime_put_noidle(dev
->dev
);
2034 drm_crtc_vblank_off(crtc
);
2037 if (asyh
->clr
.mask
) {
2038 nv50_head_flush_clr(head
, asyh
, atom
->flush_disable
);
2039 interlock
[NV50_DISP_INTERLOCK_CORE
] |= 1;
2043 /* Disable plane(s). */
2044 for_each_new_plane_in_state(state
, plane
, new_plane_state
, i
) {
2045 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(new_plane_state
);
2046 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
2048 NV_ATOMIC(drm
, "%s: clr %02x (set %02x)\n", plane
->name
,
2049 asyw
->clr
.mask
, asyw
->set
.mask
);
2050 if (!asyw
->clr
.mask
)
2053 nv50_wndw_flush_clr(wndw
, interlock
, atom
->flush_disable
, asyw
);
2056 /* Disable output path(s). */
2057 list_for_each_entry(outp
, &atom
->outp
, head
) {
2058 const struct drm_encoder_helper_funcs
*help
;
2059 struct drm_encoder
*encoder
;
2061 encoder
= outp
->encoder
;
2062 help
= encoder
->helper_private
;
2064 NV_ATOMIC(drm
, "%s: clr %02x (set %02x)\n", encoder
->name
,
2065 outp
->clr
.mask
, outp
->set
.mask
);
2067 if (outp
->clr
.mask
) {
2068 help
->atomic_disable(encoder
, state
);
2069 interlock
[NV50_DISP_INTERLOCK_CORE
] |= 1;
2070 if (outp
->flush_disable
) {
2071 nv50_disp_atomic_commit_wndw(state
, interlock
);
2072 nv50_disp_atomic_commit_core(state
, interlock
);
2073 memset(interlock
, 0x00, sizeof(interlock
));
2080 /* Flush disable. */
2081 if (interlock
[NV50_DISP_INTERLOCK_CORE
]) {
2082 if (atom
->flush_disable
) {
2083 nv50_disp_atomic_commit_wndw(state
, interlock
);
2084 nv50_disp_atomic_commit_core(state
, interlock
);
2085 memset(interlock
, 0x00, sizeof(interlock
));
2092 nv50_crc_atomic_release_notifier_contexts(state
);
2093 nv50_crc_atomic_init_notifier_contexts(state
);
2095 /* Update output path(s). */
2096 list_for_each_entry_safe(outp
, outt
, &atom
->outp
, head
) {
2097 const struct drm_encoder_helper_funcs
*help
;
2098 struct drm_encoder
*encoder
;
2100 encoder
= outp
->encoder
;
2101 help
= encoder
->helper_private
;
2103 NV_ATOMIC(drm
, "%s: set %02x (clr %02x)\n", encoder
->name
,
2104 outp
->set
.mask
, outp
->clr
.mask
);
2106 if (outp
->set
.mask
) {
2107 help
->atomic_enable(encoder
, state
);
2108 interlock
[NV50_DISP_INTERLOCK_CORE
] = 1;
2111 list_del(&outp
->head
);
2115 /* Update head(s). */
2116 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
2117 struct nv50_head_atom
*asyh
= nv50_head_atom(new_crtc_state
);
2118 struct nv50_head
*head
= nv50_head(crtc
);
2120 NV_ATOMIC(drm
, "%s: set %04x (clr %04x)\n", crtc
->name
,
2121 asyh
->set
.mask
, asyh
->clr
.mask
);
2123 if (asyh
->set
.mask
) {
2124 nv50_head_flush_set(head
, asyh
);
2125 interlock
[NV50_DISP_INTERLOCK_CORE
] = 1;
2128 if (new_crtc_state
->active
) {
2129 if (!old_crtc_state
->active
) {
2130 drm_crtc_vblank_on(crtc
);
2131 pm_runtime_get_noresume(dev
->dev
);
2133 if (new_crtc_state
->event
)
2134 drm_crtc_vblank_get(crtc
);
2138 /* Update window->head assignment.
2140 * This has to happen in an update that's not interlocked with
2141 * any window channels to avoid hitting HW error checks.
2143 *TODO: Proper handling of window ownership (Turing apparently
2144 * supports non-fixed mappings).
2146 if (core
->assign_windows
) {
2147 core
->func
->wndw
.owner(core
);
2148 nv50_disp_atomic_commit_core(state
, interlock
);
2149 core
->assign_windows
= false;
2150 interlock
[NV50_DISP_INTERLOCK_CORE
] = 0;
2153 /* Finish updating head(s)...
2155 * NVD is rather picky about both where window assignments can change,
2156 * *and* about certain core and window channel states matching.
2158 * The EFI GOP driver on newer GPUs configures window channels with a
2159 * different output format to what we do, and the core channel update
2160 * in the assign_windows case above would result in a state mismatch.
2162 * Delay some of the head update until after that point to workaround
2163 * the issue. This only affects the initial modeset.
2165 * TODO: handle this better when adding flexible window mapping
2167 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
2168 struct nv50_head_atom
*asyh
= nv50_head_atom(new_crtc_state
);
2169 struct nv50_head
*head
= nv50_head(crtc
);
2171 NV_ATOMIC(drm
, "%s: set %04x (clr %04x)\n", crtc
->name
,
2172 asyh
->set
.mask
, asyh
->clr
.mask
);
2174 if (asyh
->set
.mask
) {
2175 nv50_head_flush_set_wndw(head
, asyh
);
2176 interlock
[NV50_DISP_INTERLOCK_CORE
] = 1;
2180 /* Update plane(s). */
2181 for_each_new_plane_in_state(state
, plane
, new_plane_state
, i
) {
2182 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(new_plane_state
);
2183 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
2185 NV_ATOMIC(drm
, "%s: set %02x (clr %02x)\n", plane
->name
,
2186 asyw
->set
.mask
, asyw
->clr
.mask
);
2187 if ( !asyw
->set
.mask
&&
2188 (!asyw
->clr
.mask
|| atom
->flush_disable
))
2191 nv50_wndw_flush_set(wndw
, interlock
, asyw
);
2195 nv50_disp_atomic_commit_wndw(state
, interlock
);
2197 if (interlock
[NV50_DISP_INTERLOCK_CORE
]) {
2198 if (interlock
[NV50_DISP_INTERLOCK_BASE
] ||
2199 interlock
[NV50_DISP_INTERLOCK_OVLY
] ||
2200 interlock
[NV50_DISP_INTERLOCK_WNDW
] ||
2201 !atom
->state
.legacy_cursor_update
)
2202 nv50_disp_atomic_commit_core(state
, interlock
);
2204 disp
->core
->func
->update(disp
->core
, interlock
, false);
2207 if (atom
->lock_core
)
2208 mutex_unlock(&disp
->mutex
);
2210 /* Wait for HW to signal completion. */
2211 for_each_new_plane_in_state(state
, plane
, new_plane_state
, i
) {
2212 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(new_plane_state
);
2213 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
2214 int ret
= nv50_wndw_wait_armed(wndw
, asyw
);
2216 NV_ERROR(drm
, "%s: timeout\n", plane
->name
);
2219 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
2220 if (new_crtc_state
->event
) {
2221 unsigned long flags
;
2222 /* Get correct count/ts if racing with vblank irq */
2223 if (new_crtc_state
->active
)
2224 drm_crtc_accurate_vblank_count(crtc
);
2225 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
2226 drm_crtc_send_vblank_event(crtc
, new_crtc_state
->event
);
2227 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
2229 new_crtc_state
->event
= NULL
;
2230 if (new_crtc_state
->active
)
2231 drm_crtc_vblank_put(crtc
);
2235 nv50_crc_atomic_start_reporting(state
);
2237 nv50_crc_atomic_release_notifier_contexts(state
);
2239 drm_atomic_helper_commit_hw_done(state
);
2240 drm_atomic_helper_cleanup_planes(dev
, state
);
2241 drm_atomic_helper_commit_cleanup_done(state
);
2242 drm_atomic_state_put(state
);
2244 /* Drop the RPM ref we got from nv50_disp_atomic_commit() */
2245 pm_runtime_mark_last_busy(dev
->dev
);
2246 pm_runtime_put_autosuspend(dev
->dev
);
2250 nv50_disp_atomic_commit_work(struct work_struct
*work
)
2252 struct drm_atomic_state
*state
=
2253 container_of(work
, typeof(*state
), commit_work
);
2254 nv50_disp_atomic_commit_tail(state
);
2258 nv50_disp_atomic_commit(struct drm_device
*dev
,
2259 struct drm_atomic_state
*state
, bool nonblock
)
2261 struct drm_plane_state
*new_plane_state
;
2262 struct drm_plane
*plane
;
2265 ret
= pm_runtime_get_sync(dev
->dev
);
2266 if (ret
< 0 && ret
!= -EACCES
) {
2267 pm_runtime_put_autosuspend(dev
->dev
);
2271 ret
= drm_atomic_helper_setup_commit(state
, nonblock
);
2275 INIT_WORK(&state
->commit_work
, nv50_disp_atomic_commit_work
);
2277 ret
= drm_atomic_helper_prepare_planes(dev
, state
);
2282 ret
= drm_atomic_helper_wait_for_fences(dev
, state
, true);
2287 ret
= drm_atomic_helper_swap_state(state
, true);
2291 for_each_new_plane_in_state(state
, plane
, new_plane_state
, i
) {
2292 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(new_plane_state
);
2293 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
2295 if (asyw
->set
.image
)
2296 nv50_wndw_ntfy_enable(wndw
, asyw
);
2299 drm_atomic_state_get(state
);
2302 * Grab another RPM ref for the commit tail, which will release the
2303 * ref when it's finished
2305 pm_runtime_get_noresume(dev
->dev
);
2308 queue_work(system_unbound_wq
, &state
->commit_work
);
2310 nv50_disp_atomic_commit_tail(state
);
2314 drm_atomic_helper_cleanup_planes(dev
, state
);
2316 pm_runtime_put_autosuspend(dev
->dev
);
2320 static struct nv50_outp_atom
*
2321 nv50_disp_outp_atomic_add(struct nv50_atom
*atom
, struct drm_encoder
*encoder
)
2323 struct nv50_outp_atom
*outp
;
2325 list_for_each_entry(outp
, &atom
->outp
, head
) {
2326 if (outp
->encoder
== encoder
)
2330 outp
= kzalloc(sizeof(*outp
), GFP_KERNEL
);
2332 return ERR_PTR(-ENOMEM
);
2334 list_add(&outp
->head
, &atom
->outp
);
2335 outp
->encoder
= encoder
;
2340 nv50_disp_outp_atomic_check_clr(struct nv50_atom
*atom
,
2341 struct drm_connector_state
*old_connector_state
)
2343 struct drm_encoder
*encoder
= old_connector_state
->best_encoder
;
2344 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
2345 struct drm_crtc
*crtc
;
2346 struct nv50_outp_atom
*outp
;
2348 if (!(crtc
= old_connector_state
->crtc
))
2351 old_crtc_state
= drm_atomic_get_old_crtc_state(&atom
->state
, crtc
);
2352 new_crtc_state
= drm_atomic_get_new_crtc_state(&atom
->state
, crtc
);
2353 if (old_crtc_state
->active
&& drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
2354 outp
= nv50_disp_outp_atomic_add(atom
, encoder
);
2356 return PTR_ERR(outp
);
2358 if (outp
->encoder
->encoder_type
== DRM_MODE_ENCODER_DPMST
) {
2359 outp
->flush_disable
= true;
2360 atom
->flush_disable
= true;
2362 outp
->clr
.ctrl
= true;
2363 atom
->lock_core
= true;
2370 nv50_disp_outp_atomic_check_set(struct nv50_atom
*atom
,
2371 struct drm_connector_state
*connector_state
)
2373 struct drm_encoder
*encoder
= connector_state
->best_encoder
;
2374 struct drm_crtc_state
*new_crtc_state
;
2375 struct drm_crtc
*crtc
;
2376 struct nv50_outp_atom
*outp
;
2378 if (!(crtc
= connector_state
->crtc
))
2381 new_crtc_state
= drm_atomic_get_new_crtc_state(&atom
->state
, crtc
);
2382 if (new_crtc_state
->active
&& drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
2383 outp
= nv50_disp_outp_atomic_add(atom
, encoder
);
2385 return PTR_ERR(outp
);
2387 outp
->set
.ctrl
= true;
2388 atom
->lock_core
= true;
2395 nv50_disp_atomic_check(struct drm_device
*dev
, struct drm_atomic_state
*state
)
2397 struct nv50_atom
*atom
= nv50_atom(state
);
2398 struct nv50_core
*core
= nv50_disp(dev
)->core
;
2399 struct drm_connector_state
*old_connector_state
, *new_connector_state
;
2400 struct drm_connector
*connector
;
2401 struct drm_crtc_state
*new_crtc_state
;
2402 struct drm_crtc
*crtc
;
2403 struct nv50_head
*head
;
2404 struct nv50_head_atom
*asyh
;
2407 if (core
->assign_windows
&& core
->func
->head
->static_wndw_map
) {
2408 drm_for_each_crtc(crtc
, dev
) {
2409 new_crtc_state
= drm_atomic_get_crtc_state(state
,
2411 if (IS_ERR(new_crtc_state
))
2412 return PTR_ERR(new_crtc_state
);
2414 head
= nv50_head(crtc
);
2415 asyh
= nv50_head_atom(new_crtc_state
);
2416 core
->func
->head
->static_wndw_map(head
, asyh
);
2420 /* We need to handle colour management on a per-plane basis. */
2421 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
2422 if (new_crtc_state
->color_mgmt_changed
) {
2423 ret
= drm_atomic_add_affected_planes(state
, crtc
);
2429 ret
= drm_atomic_helper_check(dev
, state
);
2433 for_each_oldnew_connector_in_state(state
, connector
, old_connector_state
, new_connector_state
, i
) {
2434 ret
= nv50_disp_outp_atomic_check_clr(atom
, old_connector_state
);
2438 ret
= nv50_disp_outp_atomic_check_set(atom
, new_connector_state
);
2443 ret
= drm_dp_mst_atomic_check(state
);
2447 nv50_crc_atomic_check_outp(atom
);
2453 nv50_disp_atomic_state_clear(struct drm_atomic_state
*state
)
2455 struct nv50_atom
*atom
= nv50_atom(state
);
2456 struct nv50_outp_atom
*outp
, *outt
;
2458 list_for_each_entry_safe(outp
, outt
, &atom
->outp
, head
) {
2459 list_del(&outp
->head
);
2463 drm_atomic_state_default_clear(state
);
2467 nv50_disp_atomic_state_free(struct drm_atomic_state
*state
)
2469 struct nv50_atom
*atom
= nv50_atom(state
);
2470 drm_atomic_state_default_release(&atom
->state
);
2474 static struct drm_atomic_state
*
2475 nv50_disp_atomic_state_alloc(struct drm_device
*dev
)
2477 struct nv50_atom
*atom
;
2478 if (!(atom
= kzalloc(sizeof(*atom
), GFP_KERNEL
)) ||
2479 drm_atomic_state_init(dev
, &atom
->state
) < 0) {
2483 INIT_LIST_HEAD(&atom
->outp
);
2484 return &atom
->state
;
2487 static const struct drm_mode_config_funcs
2489 .fb_create
= nouveau_user_framebuffer_create
,
2490 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
2491 .atomic_check
= nv50_disp_atomic_check
,
2492 .atomic_commit
= nv50_disp_atomic_commit
,
2493 .atomic_state_alloc
= nv50_disp_atomic_state_alloc
,
2494 .atomic_state_clear
= nv50_disp_atomic_state_clear
,
2495 .atomic_state_free
= nv50_disp_atomic_state_free
,
2498 static const struct drm_mode_config_helper_funcs
2499 nv50_disp_helper_func
= {
2500 .atomic_commit_setup
= drm_dp_mst_atomic_setup_commit
,
2503 /******************************************************************************
2505 *****************************************************************************/
2508 nv50_display_fini(struct drm_device
*dev
, bool runtime
, bool suspend
)
2510 struct nouveau_drm
*drm
= nouveau_drm(dev
);
2511 struct drm_encoder
*encoder
;
2513 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
2514 if (encoder
->encoder_type
!= DRM_MODE_ENCODER_DPMST
)
2515 nv50_mstm_fini(nouveau_encoder(encoder
));
2519 cancel_work_sync(&drm
->hpd_work
);
2523 nv50_display_init(struct drm_device
*dev
, bool resume
, bool runtime
)
2525 struct nv50_core
*core
= nv50_disp(dev
)->core
;
2526 struct drm_encoder
*encoder
;
2528 if (resume
|| runtime
)
2529 core
->func
->init(core
);
2531 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
2532 if (encoder
->encoder_type
!= DRM_MODE_ENCODER_DPMST
) {
2533 struct nouveau_encoder
*nv_encoder
=
2534 nouveau_encoder(encoder
);
2535 nv50_mstm_init(nv_encoder
, runtime
);
2543 nv50_display_destroy(struct drm_device
*dev
)
2545 struct nv50_disp
*disp
= nv50_disp(dev
);
2547 nv50_audio_component_fini(nouveau_drm(dev
));
2549 nvif_object_unmap(&disp
->caps
);
2550 nvif_object_dtor(&disp
->caps
);
2551 nv50_core_del(&disp
->core
);
2553 nouveau_bo_unmap(disp
->sync
);
2555 nouveau_bo_unpin(disp
->sync
);
2556 nouveau_bo_ref(NULL
, &disp
->sync
);
2558 nouveau_display(dev
)->priv
= NULL
;
2563 nv50_display_create(struct drm_device
*dev
)
2565 struct nvif_device
*device
= &nouveau_drm(dev
)->client
.device
;
2566 struct nouveau_drm
*drm
= nouveau_drm(dev
);
2567 struct dcb_table
*dcb
= &drm
->vbios
.dcb
;
2568 struct drm_connector
*connector
, *tmp
;
2569 struct nv50_disp
*disp
;
2570 struct dcb_output
*dcbe
;
2572 bool has_mst
= nv50_has_mst(drm
);
2574 disp
= kzalloc(sizeof(*disp
), GFP_KERNEL
);
2578 mutex_init(&disp
->mutex
);
2580 nouveau_display(dev
)->priv
= disp
;
2581 nouveau_display(dev
)->dtor
= nv50_display_destroy
;
2582 nouveau_display(dev
)->init
= nv50_display_init
;
2583 nouveau_display(dev
)->fini
= nv50_display_fini
;
2584 disp
->disp
= &nouveau_display(dev
)->disp
;
2585 dev
->mode_config
.funcs
= &nv50_disp_func
;
2586 dev
->mode_config
.helper_private
= &nv50_disp_helper_func
;
2587 dev
->mode_config
.quirk_addfb_prefer_xbgr_30bpp
= true;
2588 dev
->mode_config
.normalize_zpos
= true;
2590 /* small shared memory area we use for notifiers and semaphores */
2591 ret
= nouveau_bo_new(&drm
->client
, 4096, 0x1000,
2592 NOUVEAU_GEM_DOMAIN_VRAM
,
2593 0, 0x0000, NULL
, NULL
, &disp
->sync
);
2595 ret
= nouveau_bo_pin(disp
->sync
, NOUVEAU_GEM_DOMAIN_VRAM
, true);
2597 ret
= nouveau_bo_map(disp
->sync
);
2599 nouveau_bo_unpin(disp
->sync
);
2602 nouveau_bo_ref(NULL
, &disp
->sync
);
2608 /* allocate master evo channel */
2609 ret
= nv50_core_new(drm
, &disp
->core
);
2613 disp
->core
->func
->init(disp
->core
);
2614 if (disp
->core
->func
->caps_init
) {
2615 ret
= disp
->core
->func
->caps_init(drm
, disp
);
2620 /* Assign the correct format modifiers */
2621 if (disp
->disp
->object
.oclass
>= TU102_DISP
)
2622 nouveau_display(dev
)->format_modifiers
= wndwc57e_modifiers
;
2624 if (drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_FERMI
)
2625 nouveau_display(dev
)->format_modifiers
= disp90xx_modifiers
;
2627 nouveau_display(dev
)->format_modifiers
= disp50xx_modifiers
;
2629 /* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later
2630 * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The
2631 * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to
2632 * small page allocations in prepare_fb(). When this is implemented, we should also force
2633 * large pages (128K) for ovly fbs in order to fix Kepler ovlys.
2634 * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using
2637 if (disp
->disp
->object
.oclass
>= GM107_DISP
) {
2638 dev
->mode_config
.cursor_width
= 256;
2639 dev
->mode_config
.cursor_height
= 256;
2640 } else if (disp
->disp
->object
.oclass
>= GK104_DISP
) {
2641 dev
->mode_config
.cursor_width
= 128;
2642 dev
->mode_config
.cursor_height
= 128;
2644 dev
->mode_config
.cursor_width
= 64;
2645 dev
->mode_config
.cursor_height
= 64;
2648 /* create crtc objects to represent the hw heads */
2649 if (disp
->disp
->object
.oclass
>= GV100_DISP
)
2650 crtcs
= nvif_rd32(&device
->object
, 0x610060) & 0xff;
2652 if (disp
->disp
->object
.oclass
>= GF110_DISP
)
2653 crtcs
= nvif_rd32(&device
->object
, 0x612004) & 0xf;
2657 for (i
= 0; i
< fls(crtcs
); i
++) {
2658 struct nv50_head
*head
;
2660 if (!(crtcs
& (1 << i
)))
2663 head
= nv50_head_create(dev
, i
);
2665 ret
= PTR_ERR(head
);
2670 head
->msto
= nv50_msto_new(dev
, head
, i
);
2671 if (IS_ERR(head
->msto
)) {
2672 ret
= PTR_ERR(head
->msto
);
2678 * FIXME: This is a hack to workaround the following
2681 * https://gitlab.gnome.org/GNOME/mutter/issues/759
2682 * https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277
2684 * Once these issues are closed, this should be
2687 head
->msto
->encoder
.possible_crtcs
= crtcs
;
2691 /* create encoder/connector objects based on VBIOS DCB table */
2692 for (i
= 0, dcbe
= &dcb
->entry
[0]; i
< dcb
->entries
; i
++, dcbe
++) {
2693 connector
= nouveau_connector_create(dev
, dcbe
);
2694 if (IS_ERR(connector
))
2697 if (dcbe
->location
== DCB_LOC_ON_CHIP
) {
2698 switch (dcbe
->type
) {
2699 case DCB_OUTPUT_TMDS
:
2700 case DCB_OUTPUT_LVDS
:
2702 ret
= nv50_sor_create(connector
, dcbe
);
2704 case DCB_OUTPUT_ANALOG
:
2705 ret
= nv50_dac_create(connector
, dcbe
);
2712 ret
= nv50_pior_create(connector
, dcbe
);
2716 NV_WARN(drm
, "failed to create encoder %d/%d/%d: %d\n",
2717 dcbe
->location
, dcbe
->type
,
2718 ffs(dcbe
->or) - 1, ret
);
2723 /* cull any connectors we created that don't have an encoder */
2724 list_for_each_entry_safe(connector
, tmp
, &dev
->mode_config
.connector_list
, head
) {
2725 if (connector
->possible_encoders
)
2728 NV_WARN(drm
, "%s has no encoders, removing\n",
2730 connector
->funcs
->destroy(connector
);
2733 /* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
2734 dev
->vblank_disable_immediate
= true;
2736 nv50_audio_component_init(drm
);
2740 nv50_display_destroy(dev
);
2744 /******************************************************************************
2746 *****************************************************************************/
2748 /****************************************************************
2749 * Log2(block height) ----------------------------+ *
2750 * Page Kind ----------------------------------+ | *
2751 * Gob Height/Page Kind Generation ------+ | | *
2752 * Sector layout -------+ | | | *
2753 * Compression ------+ | | | | */
2754 const u64 disp50xx_modifiers
[] = { /* | | | | | */
2755 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 0),
2756 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 1),
2757 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 2),
2758 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 3),
2759 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 4),
2760 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 5),
2761 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 0),
2762 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 1),
2763 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 2),
2764 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 3),
2765 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 4),
2766 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 5),
2767 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 0),
2768 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 1),
2769 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 2),
2770 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 3),
2771 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 4),
2772 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 5),
2773 DRM_FORMAT_MOD_LINEAR
,
2774 DRM_FORMAT_MOD_INVALID
2777 /****************************************************************
2778 * Log2(block height) ----------------------------+ *
2779 * Page Kind ----------------------------------+ | *
2780 * Gob Height/Page Kind Generation ------+ | | *
2781 * Sector layout -------+ | | | *
2782 * Compression ------+ | | | | */
2783 const u64 disp90xx_modifiers
[] = { /* | | | | | */
2784 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 0),
2785 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 1),
2786 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 2),
2787 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 3),
2788 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 4),
2789 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 5),
2790 DRM_FORMAT_MOD_LINEAR
,
2791 DRM_FORMAT_MOD_INVALID