2 * Copyright 2011 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/dma-mapping.h>
26 #include <linux/hdmi.h>
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_crtc_helper.h>
32 #include <drm/drm_dp_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include <drm/drm_plane_helper.h>
35 #include <drm/drm_edid.h>
39 #include <nvif/class.h>
40 #include <nvif/cl0002.h>
41 #include <nvif/cl5070.h>
42 #include <nvif/cl507a.h>
43 #include <nvif/cl507b.h>
44 #include <nvif/cl507c.h>
45 #include <nvif/cl507d.h>
46 #include <nvif/cl507e.h>
47 #include <nvif/event.h>
49 #include "nouveau_drv.h"
50 #include "nouveau_dma.h"
51 #include "nouveau_gem.h"
52 #include "nouveau_connector.h"
53 #include "nouveau_encoder.h"
54 #include "nouveau_crtc.h"
55 #include "nouveau_fence.h"
56 #include "nouveau_fbcon.h"
57 #include "nv50_display.h"
61 #define EVO_MASTER (0x00)
62 #define EVO_FLIP(c) (0x01 + (c))
63 #define EVO_OVLY(c) (0x05 + (c))
64 #define EVO_OIMM(c) (0x09 + (c))
65 #define EVO_CURS(c) (0x0d + (c))
67 /* offsets in shared sync bo of various structures */
68 #define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
69 #define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
70 #define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
71 #define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
72 #define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20)
73 #define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30)
75 /******************************************************************************
77 *****************************************************************************/
78 #define nv50_atom(p) container_of((p), struct nv50_atom, state)
81 struct drm_atomic_state state
;
83 struct list_head outp
;
88 struct nv50_outp_atom
{
89 struct list_head head
;
91 struct drm_encoder
*encoder
;
109 #define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
111 struct nv50_head_atom
{
112 struct drm_crtc_state state
;
121 struct nv50_head_mode
{
222 static inline struct nv50_head_atom
*
223 nv50_head_atom_get(struct drm_atomic_state
*state
, struct drm_crtc
*crtc
)
225 struct drm_crtc_state
*statec
= drm_atomic_get_crtc_state(state
, crtc
);
227 return (void *)statec
;
228 return nv50_head_atom(statec
);
231 #define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
233 struct nv50_wndw_atom
{
234 struct drm_plane_state state
;
296 /******************************************************************************
298 *****************************************************************************/
301 struct nvif_object user
;
302 struct nvif_device
*device
;
306 nv50_chan_create(struct nvif_device
*device
, struct nvif_object
*disp
,
307 const s32
*oclass
, u8 head
, void *data
, u32 size
,
308 struct nv50_chan
*chan
)
310 struct nvif_sclass
*sclass
;
313 chan
->device
= device
;
315 ret
= n
= nvif_object_sclass_get(disp
, &sclass
);
320 for (i
= 0; i
< n
; i
++) {
321 if (sclass
[i
].oclass
== oclass
[0]) {
322 ret
= nvif_object_init(disp
, 0, oclass
[0],
323 data
, size
, &chan
->user
);
325 nvif_object_map(&chan
->user
, NULL
, 0);
326 nvif_object_sclass_put(&sclass
);
333 nvif_object_sclass_put(&sclass
);
338 nv50_chan_destroy(struct nv50_chan
*chan
)
340 nvif_object_fini(&chan
->user
);
343 /******************************************************************************
345 *****************************************************************************/
348 struct nv50_chan base
;
352 nv50_pioc_destroy(struct nv50_pioc
*pioc
)
354 nv50_chan_destroy(&pioc
->base
);
358 nv50_pioc_create(struct nvif_device
*device
, struct nvif_object
*disp
,
359 const s32
*oclass
, u8 head
, void *data
, u32 size
,
360 struct nv50_pioc
*pioc
)
362 return nv50_chan_create(device
, disp
, oclass
, head
, data
, size
,
366 /******************************************************************************
368 *****************************************************************************/
371 struct nv50_pioc base
;
375 nv50_oimm_create(struct nvif_device
*device
, struct nvif_object
*disp
,
376 int head
, struct nv50_oimm
*oimm
)
378 struct nv50_disp_cursor_v0 args
= {
381 static const s32 oclass
[] = {
390 return nv50_pioc_create(device
, disp
, oclass
, head
, &args
, sizeof(args
),
394 /******************************************************************************
396 *****************************************************************************/
398 struct nv50_dmac_ctxdma
{
399 struct list_head head
;
400 struct nvif_object object
;
404 struct nv50_chan base
;
406 struct nvif_mem push
;
409 struct nvif_object sync
;
410 struct nvif_object vram
;
411 struct list_head ctxdma
;
413 /* Protects against concurrent pushbuf access to this channel, lock is
414 * grabbed by evo_wait (if the pushbuf reservation is successful) and
415 * dropped again by evo_kick. */
420 nv50_dmac_ctxdma_del(struct nv50_dmac_ctxdma
*ctxdma
)
422 nvif_object_fini(&ctxdma
->object
);
423 list_del(&ctxdma
->head
);
427 static struct nv50_dmac_ctxdma
*
428 nv50_dmac_ctxdma_new(struct nv50_dmac
*dmac
, struct nouveau_framebuffer
*fb
)
430 struct nouveau_drm
*drm
= nouveau_drm(fb
->base
.dev
);
431 struct nv50_dmac_ctxdma
*ctxdma
;
432 const u8 kind
= fb
->nvbo
->kind
;
433 const u32 handle
= 0xfb000000 | kind
;
435 struct nv_dma_v0 base
;
437 struct nv50_dma_v0 nv50
;
438 struct gf100_dma_v0 gf100
;
439 struct gf119_dma_v0 gf119
;
442 u32 argc
= sizeof(args
.base
);
445 list_for_each_entry(ctxdma
, &dmac
->ctxdma
, head
) {
446 if (ctxdma
->object
.handle
== handle
)
450 if (!(ctxdma
= kzalloc(sizeof(*ctxdma
), GFP_KERNEL
)))
451 return ERR_PTR(-ENOMEM
);
452 list_add(&ctxdma
->head
, &dmac
->ctxdma
);
454 args
.base
.target
= NV_DMA_V0_TARGET_VRAM
;
455 args
.base
.access
= NV_DMA_V0_ACCESS_RDWR
;
457 args
.base
.limit
= drm
->client
.device
.info
.ram_user
- 1;
459 if (drm
->client
.device
.info
.chipset
< 0x80) {
460 args
.nv50
.part
= NV50_DMA_V0_PART_256
;
461 argc
+= sizeof(args
.nv50
);
463 if (drm
->client
.device
.info
.chipset
< 0xc0) {
464 args
.nv50
.part
= NV50_DMA_V0_PART_256
;
465 args
.nv50
.kind
= kind
;
466 argc
+= sizeof(args
.nv50
);
468 if (drm
->client
.device
.info
.chipset
< 0xd0) {
469 args
.gf100
.kind
= kind
;
470 argc
+= sizeof(args
.gf100
);
472 args
.gf119
.page
= GF119_DMA_V0_PAGE_LP
;
473 args
.gf119
.kind
= kind
;
474 argc
+= sizeof(args
.gf119
);
477 ret
= nvif_object_init(&dmac
->base
.user
, handle
, NV_DMA_IN_MEMORY
,
478 &args
, argc
, &ctxdma
->object
);
480 nv50_dmac_ctxdma_del(ctxdma
);
488 nv50_dmac_destroy(struct nv50_dmac
*dmac
)
490 struct nv50_dmac_ctxdma
*ctxdma
, *ctxtmp
;
492 list_for_each_entry_safe(ctxdma
, ctxtmp
, &dmac
->ctxdma
, head
) {
493 nv50_dmac_ctxdma_del(ctxdma
);
496 nvif_object_fini(&dmac
->vram
);
497 nvif_object_fini(&dmac
->sync
);
499 nv50_chan_destroy(&dmac
->base
);
501 nvif_mem_fini(&dmac
->push
);
505 nv50_dmac_create(struct nvif_device
*device
, struct nvif_object
*disp
,
506 const s32
*oclass
, u8 head
, void *data
, u32 size
, u64 syncbuf
,
507 struct nv50_dmac
*dmac
)
509 struct nouveau_cli
*cli
= (void *)device
->object
.client
;
510 struct nv50_disp_core_channel_dma_v0
*args
= data
;
513 mutex_init(&dmac
->lock
);
514 INIT_LIST_HEAD(&dmac
->ctxdma
);
516 ret
= nvif_mem_init_map(&cli
->mmu
, NVIF_MEM_COHERENT
, 0x1000,
521 dmac
->ptr
= dmac
->push
.object
.map
.ptr
;
523 args
->pushbuf
= nvif_handle(&dmac
->push
.object
);
525 ret
= nv50_chan_create(device
, disp
, oclass
, head
, data
, size
,
530 ret
= nvif_object_init(&dmac
->base
.user
, 0xf0000000, NV_DMA_IN_MEMORY
,
531 &(struct nv_dma_v0
) {
532 .target
= NV_DMA_V0_TARGET_VRAM
,
533 .access
= NV_DMA_V0_ACCESS_RDWR
,
534 .start
= syncbuf
+ 0x0000,
535 .limit
= syncbuf
+ 0x0fff,
536 }, sizeof(struct nv_dma_v0
),
541 ret
= nvif_object_init(&dmac
->base
.user
, 0xf0000001, NV_DMA_IN_MEMORY
,
542 &(struct nv_dma_v0
) {
543 .target
= NV_DMA_V0_TARGET_VRAM
,
544 .access
= NV_DMA_V0_ACCESS_RDWR
,
546 .limit
= device
->info
.ram_user
- 1,
547 }, sizeof(struct nv_dma_v0
),
555 /******************************************************************************
557 *****************************************************************************/
560 struct nv50_dmac base
;
564 nv50_core_create(struct nvif_device
*device
, struct nvif_object
*disp
,
565 u64 syncbuf
, struct nv50_mast
*core
)
567 struct nv50_disp_core_channel_dma_v0 args
= {};
568 static const s32 oclass
[] = {
569 GP102_DISP_CORE_CHANNEL_DMA
,
570 GP100_DISP_CORE_CHANNEL_DMA
,
571 GM200_DISP_CORE_CHANNEL_DMA
,
572 GM107_DISP_CORE_CHANNEL_DMA
,
573 GK110_DISP_CORE_CHANNEL_DMA
,
574 GK104_DISP_CORE_CHANNEL_DMA
,
575 GF110_DISP_CORE_CHANNEL_DMA
,
576 GT214_DISP_CORE_CHANNEL_DMA
,
577 GT206_DISP_CORE_CHANNEL_DMA
,
578 GT200_DISP_CORE_CHANNEL_DMA
,
579 G82_DISP_CORE_CHANNEL_DMA
,
580 NV50_DISP_CORE_CHANNEL_DMA
,
584 return nv50_dmac_create(device
, disp
, oclass
, 0, &args
, sizeof(args
),
585 syncbuf
, &core
->base
);
588 /******************************************************************************
590 *****************************************************************************/
593 struct nv50_dmac base
;
599 nv50_base_create(struct nvif_device
*device
, struct nvif_object
*disp
,
600 int head
, u64 syncbuf
, struct nv50_sync
*base
)
602 struct nv50_disp_base_channel_dma_v0 args
= {
605 static const s32 oclass
[] = {
606 GK110_DISP_BASE_CHANNEL_DMA
,
607 GK104_DISP_BASE_CHANNEL_DMA
,
608 GF110_DISP_BASE_CHANNEL_DMA
,
609 GT214_DISP_BASE_CHANNEL_DMA
,
610 GT200_DISP_BASE_CHANNEL_DMA
,
611 G82_DISP_BASE_CHANNEL_DMA
,
612 NV50_DISP_BASE_CHANNEL_DMA
,
616 return nv50_dmac_create(device
, disp
, oclass
, head
, &args
, sizeof(args
),
617 syncbuf
, &base
->base
);
620 /******************************************************************************
622 *****************************************************************************/
625 struct nv50_dmac base
;
629 nv50_ovly_create(struct nvif_device
*device
, struct nvif_object
*disp
,
630 int head
, u64 syncbuf
, struct nv50_ovly
*ovly
)
632 struct nv50_disp_overlay_channel_dma_v0 args
= {
635 static const s32 oclass
[] = {
636 GK104_DISP_OVERLAY_CONTROL_DMA
,
637 GF110_DISP_OVERLAY_CONTROL_DMA
,
638 GT214_DISP_OVERLAY_CHANNEL_DMA
,
639 GT200_DISP_OVERLAY_CHANNEL_DMA
,
640 G82_DISP_OVERLAY_CHANNEL_DMA
,
641 NV50_DISP_OVERLAY_CHANNEL_DMA
,
645 return nv50_dmac_create(device
, disp
, oclass
, head
, &args
, sizeof(args
),
646 syncbuf
, &ovly
->base
);
650 struct nouveau_crtc base
;
652 struct nouveau_bo
*nvbo
[2];
655 struct nv50_ovly ovly
;
656 struct nv50_oimm oimm
;
659 #define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
660 #define nv50_ovly(c) (&nv50_head(c)->ovly)
661 #define nv50_oimm(c) (&nv50_head(c)->oimm)
662 #define nv50_chan(c) (&(c)->base.base)
663 #define nv50_vers(c) nv50_chan(c)->user.oclass
666 struct nvif_disp
*disp
;
667 struct nv50_mast mast
;
669 struct nouveau_bo
*sync
;
674 static struct nv50_disp
*
675 nv50_disp(struct drm_device
*dev
)
677 return nouveau_display(dev
)->priv
;
680 #define nv50_mast(d) (&nv50_disp(d)->mast)
682 /******************************************************************************
683 * EVO channel helpers
684 *****************************************************************************/
686 evo_wait(void *evoc
, int nr
)
688 struct nv50_dmac
*dmac
= evoc
;
689 struct nvif_device
*device
= dmac
->base
.device
;
690 u32 put
= nvif_rd32(&dmac
->base
.user
, 0x0000) / 4;
692 mutex_lock(&dmac
->lock
);
693 if (put
+ nr
>= (PAGE_SIZE
/ 4) - 8) {
694 dmac
->ptr
[put
] = 0x20000000;
696 nvif_wr32(&dmac
->base
.user
, 0x0000, 0x00000000);
697 if (nvif_msec(device
, 2000,
698 if (!nvif_rd32(&dmac
->base
.user
, 0x0004))
701 mutex_unlock(&dmac
->lock
);
702 pr_err("nouveau: evo channel stalled\n");
709 return dmac
->ptr
+ put
;
713 evo_kick(u32
*push
, void *evoc
)
715 struct nv50_dmac
*dmac
= evoc
;
716 nvif_wr32(&dmac
->base
.user
, 0x0000, (push
- dmac
->ptr
) << 2);
717 mutex_unlock(&dmac
->lock
);
720 #define evo_mthd(p, m, s) do { \
721 const u32 _m = (m), _s = (s); \
722 if (drm_debug & DRM_UT_KMS) \
723 pr_err("%04x %d %s\n", _m, _s, __func__); \
724 *((p)++) = ((_s << 18) | _m); \
727 #define evo_data(p, d) do { \
728 const u32 _d = (d); \
729 if (drm_debug & DRM_UT_KMS) \
730 pr_err("\t%08x\n", _d); \
734 /******************************************************************************
736 *****************************************************************************/
737 #define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
740 const struct nv50_wndw_func
*func
;
741 struct nv50_dmac
*dmac
;
743 struct drm_plane plane
;
745 struct nvif_notify notify
;
751 struct nv50_wndw_func
{
752 void *(*dtor
)(struct nv50_wndw
*);
753 int (*acquire
)(struct nv50_wndw
*, struct nv50_wndw_atom
*asyw
,
754 struct nv50_head_atom
*asyh
);
755 void (*release
)(struct nv50_wndw
*, struct nv50_wndw_atom
*asyw
,
756 struct nv50_head_atom
*asyh
);
757 void (*prepare
)(struct nv50_wndw
*, struct nv50_head_atom
*asyh
,
758 struct nv50_wndw_atom
*asyw
);
760 void (*sema_set
)(struct nv50_wndw
*, struct nv50_wndw_atom
*);
761 void (*sema_clr
)(struct nv50_wndw
*);
762 void (*ntfy_set
)(struct nv50_wndw
*, struct nv50_wndw_atom
*);
763 void (*ntfy_clr
)(struct nv50_wndw
*);
764 int (*ntfy_wait_begun
)(struct nv50_wndw
*, struct nv50_wndw_atom
*);
765 void (*image_set
)(struct nv50_wndw
*, struct nv50_wndw_atom
*);
766 void (*image_clr
)(struct nv50_wndw
*);
767 void (*lut
)(struct nv50_wndw
*, struct nv50_wndw_atom
*);
768 void (*point
)(struct nv50_wndw
*, struct nv50_wndw_atom
*);
770 u32 (*update
)(struct nv50_wndw
*, u32 interlock
);
774 nv50_wndw_wait_armed(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
777 return wndw
->func
->ntfy_wait_begun(wndw
, asyw
);
782 nv50_wndw_flush_clr(struct nv50_wndw
*wndw
, u32 interlock
, bool flush
,
783 struct nv50_wndw_atom
*asyw
)
785 if (asyw
->clr
.sema
&& (!asyw
->set
.sema
|| flush
))
786 wndw
->func
->sema_clr(wndw
);
787 if (asyw
->clr
.ntfy
&& (!asyw
->set
.ntfy
|| flush
))
788 wndw
->func
->ntfy_clr(wndw
);
789 if (asyw
->clr
.image
&& (!asyw
->set
.image
|| flush
))
790 wndw
->func
->image_clr(wndw
);
792 return flush
? wndw
->func
->update(wndw
, interlock
) : 0;
796 nv50_wndw_flush_set(struct nv50_wndw
*wndw
, u32 interlock
,
797 struct nv50_wndw_atom
*asyw
)
800 asyw
->image
.mode
= 0;
801 asyw
->image
.interval
= 1;
804 if (asyw
->set
.sema
) wndw
->func
->sema_set (wndw
, asyw
);
805 if (asyw
->set
.ntfy
) wndw
->func
->ntfy_set (wndw
, asyw
);
806 if (asyw
->set
.image
) wndw
->func
->image_set(wndw
, asyw
);
807 if (asyw
->set
.lut
) wndw
->func
->lut (wndw
, asyw
);
808 if (asyw
->set
.point
) wndw
->func
->point (wndw
, asyw
);
810 return wndw
->func
->update(wndw
, interlock
);
814 nv50_wndw_atomic_check_release(struct nv50_wndw
*wndw
,
815 struct nv50_wndw_atom
*asyw
,
816 struct nv50_head_atom
*asyh
)
818 struct nouveau_drm
*drm
= nouveau_drm(wndw
->plane
.dev
);
819 NV_ATOMIC(drm
, "%s release\n", wndw
->plane
.name
);
820 wndw
->func
->release(wndw
, asyw
, asyh
);
821 asyw
->ntfy
.handle
= 0;
822 asyw
->sema
.handle
= 0;
826 nv50_wndw_atomic_check_acquire(struct nv50_wndw
*wndw
,
827 struct nv50_wndw_atom
*asyw
,
828 struct nv50_head_atom
*asyh
)
830 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(asyw
->state
.fb
);
831 struct nouveau_drm
*drm
= nouveau_drm(wndw
->plane
.dev
);
834 NV_ATOMIC(drm
, "%s acquire\n", wndw
->plane
.name
);
836 asyw
->image
.w
= fb
->base
.width
;
837 asyw
->image
.h
= fb
->base
.height
;
838 asyw
->image
.kind
= fb
->nvbo
->kind
;
840 if (asyh
->state
.pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
)
845 if (asyw
->image
.kind
) {
846 asyw
->image
.layout
= 0;
847 if (drm
->client
.device
.info
.chipset
>= 0xc0)
848 asyw
->image
.block
= fb
->nvbo
->mode
>> 4;
850 asyw
->image
.block
= fb
->nvbo
->mode
;
851 asyw
->image
.pitch
= (fb
->base
.pitches
[0] / 4) << 4;
853 asyw
->image
.layout
= 1;
854 asyw
->image
.block
= 0;
855 asyw
->image
.pitch
= fb
->base
.pitches
[0];
858 ret
= wndw
->func
->acquire(wndw
, asyw
, asyh
);
862 if (asyw
->set
.image
) {
863 if (!(asyw
->image
.mode
= asyw
->interval
? 0 : 1))
864 asyw
->image
.interval
= asyw
->interval
;
866 asyw
->image
.interval
= 0;
873 nv50_wndw_atomic_check(struct drm_plane
*plane
, struct drm_plane_state
*state
)
875 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
876 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
877 struct nv50_wndw_atom
*armw
= nv50_wndw_atom(wndw
->plane
.state
);
878 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
879 struct nv50_head_atom
*harm
= NULL
, *asyh
= NULL
;
880 bool varm
= false, asyv
= false, asym
= false;
883 NV_ATOMIC(drm
, "%s atomic_check\n", plane
->name
);
884 if (asyw
->state
.crtc
) {
885 asyh
= nv50_head_atom_get(asyw
->state
.state
, asyw
->state
.crtc
);
887 return PTR_ERR(asyh
);
888 asym
= drm_atomic_crtc_needs_modeset(&asyh
->state
);
889 asyv
= asyh
->state
.active
;
892 if (armw
->state
.crtc
) {
893 harm
= nv50_head_atom_get(asyw
->state
.state
, armw
->state
.crtc
);
895 return PTR_ERR(harm
);
896 varm
= harm
->state
.crtc
->state
->active
;
900 asyw
->point
.x
= asyw
->state
.crtc_x
;
901 asyw
->point
.y
= asyw
->state
.crtc_y
;
902 if (memcmp(&armw
->point
, &asyw
->point
, sizeof(asyw
->point
)))
903 asyw
->set
.point
= true;
905 ret
= nv50_wndw_atomic_check_acquire(wndw
, asyw
, asyh
);
910 nv50_wndw_atomic_check_release(wndw
, asyw
, harm
);
916 asyw
->clr
.ntfy
= armw
->ntfy
.handle
!= 0;
917 asyw
->clr
.sema
= armw
->sema
.handle
!= 0;
918 if (wndw
->func
->image_clr
)
919 asyw
->clr
.image
= armw
->image
.handle
!= 0;
920 asyw
->set
.lut
= wndw
->func
->lut
&& asyv
;
927 nv50_wndw_cleanup_fb(struct drm_plane
*plane
, struct drm_plane_state
*old_state
)
929 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(old_state
->fb
);
930 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
932 NV_ATOMIC(drm
, "%s cleanup: %p\n", plane
->name
, old_state
->fb
);
936 nouveau_bo_unpin(fb
->nvbo
);
940 nv50_wndw_prepare_fb(struct drm_plane
*plane
, struct drm_plane_state
*state
)
942 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(state
->fb
);
943 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
944 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
945 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
946 struct nv50_head_atom
*asyh
;
947 struct nv50_dmac_ctxdma
*ctxdma
;
950 NV_ATOMIC(drm
, "%s prepare: %p\n", plane
->name
, state
->fb
);
954 ret
= nouveau_bo_pin(fb
->nvbo
, TTM_PL_FLAG_VRAM
, true);
958 ctxdma
= nv50_dmac_ctxdma_new(wndw
->dmac
, fb
);
959 if (IS_ERR(ctxdma
)) {
960 nouveau_bo_unpin(fb
->nvbo
);
961 return PTR_ERR(ctxdma
);
964 asyw
->state
.fence
= reservation_object_get_excl_rcu(fb
->nvbo
->bo
.resv
);
965 asyw
->image
.handle
= ctxdma
->object
.handle
;
966 asyw
->image
.offset
= fb
->nvbo
->bo
.offset
;
968 if (wndw
->func
->prepare
) {
969 asyh
= nv50_head_atom_get(asyw
->state
.state
, asyw
->state
.crtc
);
971 return PTR_ERR(asyh
);
973 wndw
->func
->prepare(wndw
, asyh
, asyw
);
979 static const struct drm_plane_helper_funcs
981 .prepare_fb
= nv50_wndw_prepare_fb
,
982 .cleanup_fb
= nv50_wndw_cleanup_fb
,
983 .atomic_check
= nv50_wndw_atomic_check
,
987 nv50_wndw_atomic_destroy_state(struct drm_plane
*plane
,
988 struct drm_plane_state
*state
)
990 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
991 __drm_atomic_helper_plane_destroy_state(&asyw
->state
);
995 static struct drm_plane_state
*
996 nv50_wndw_atomic_duplicate_state(struct drm_plane
*plane
)
998 struct nv50_wndw_atom
*armw
= nv50_wndw_atom(plane
->state
);
999 struct nv50_wndw_atom
*asyw
;
1000 if (!(asyw
= kmalloc(sizeof(*asyw
), GFP_KERNEL
)))
1002 __drm_atomic_helper_plane_duplicate_state(plane
, &asyw
->state
);
1004 asyw
->sema
= armw
->sema
;
1005 asyw
->ntfy
= armw
->ntfy
;
1006 asyw
->image
= armw
->image
;
1007 asyw
->point
= armw
->point
;
1008 asyw
->lut
= armw
->lut
;
1011 return &asyw
->state
;
1015 nv50_wndw_reset(struct drm_plane
*plane
)
1017 struct nv50_wndw_atom
*asyw
;
1019 if (WARN_ON(!(asyw
= kzalloc(sizeof(*asyw
), GFP_KERNEL
))))
1023 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
1024 plane
->state
= &asyw
->state
;
1025 plane
->state
->plane
= plane
;
1026 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
1030 nv50_wndw_destroy(struct drm_plane
*plane
)
1032 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
1034 nvif_notify_fini(&wndw
->notify
);
1035 data
= wndw
->func
->dtor(wndw
);
1036 drm_plane_cleanup(&wndw
->plane
);
1040 static const struct drm_plane_funcs
1042 .update_plane
= drm_atomic_helper_update_plane
,
1043 .disable_plane
= drm_atomic_helper_disable_plane
,
1044 .destroy
= nv50_wndw_destroy
,
1045 .reset
= nv50_wndw_reset
,
1046 .atomic_duplicate_state
= nv50_wndw_atomic_duplicate_state
,
1047 .atomic_destroy_state
= nv50_wndw_atomic_destroy_state
,
1051 nv50_wndw_fini(struct nv50_wndw
*wndw
)
1053 nvif_notify_put(&wndw
->notify
);
1057 nv50_wndw_init(struct nv50_wndw
*wndw
)
1059 nvif_notify_get(&wndw
->notify
);
1063 nv50_wndw_ctor(const struct nv50_wndw_func
*func
, struct drm_device
*dev
,
1064 enum drm_plane_type type
, const char *name
, int index
,
1065 struct nv50_dmac
*dmac
, const u32
*format
, int nformat
,
1066 struct nv50_wndw
*wndw
)
1073 ret
= drm_universal_plane_init(dev
, &wndw
->plane
, 0, &nv50_wndw
,
1074 format
, nformat
, NULL
,
1075 type
, "%s-%d", name
, index
);
1079 drm_plane_helper_add(&wndw
->plane
, &nv50_wndw_helper
);
1083 /******************************************************************************
1085 *****************************************************************************/
1086 #define nv50_curs(p) container_of((p), struct nv50_curs, wndw)
1089 struct nv50_wndw wndw
;
1090 struct nvif_object chan
;
1094 nv50_curs_update(struct nv50_wndw
*wndw
, u32 interlock
)
1096 struct nv50_curs
*curs
= nv50_curs(wndw
);
1097 nvif_wr32(&curs
->chan
, 0x0080, 0x00000000);
1102 nv50_curs_point(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
1104 struct nv50_curs
*curs
= nv50_curs(wndw
);
1105 nvif_wr32(&curs
->chan
, 0x0084, (asyw
->point
.y
<< 16) | asyw
->point
.x
);
1109 nv50_curs_prepare(struct nv50_wndw
*wndw
, struct nv50_head_atom
*asyh
,
1110 struct nv50_wndw_atom
*asyw
)
1112 u32 handle
= nv50_disp(wndw
->plane
.dev
)->mast
.base
.vram
.handle
;
1113 u32 offset
= asyw
->image
.offset
;
1114 if (asyh
->curs
.handle
!= handle
|| asyh
->curs
.offset
!= offset
) {
1115 asyh
->curs
.handle
= handle
;
1116 asyh
->curs
.offset
= offset
;
1117 asyh
->set
.curs
= asyh
->curs
.visible
;
1122 nv50_curs_release(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
,
1123 struct nv50_head_atom
*asyh
)
1125 asyh
->curs
.visible
= false;
1129 nv50_curs_acquire(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
,
1130 struct nv50_head_atom
*asyh
)
1134 ret
= drm_atomic_helper_check_plane_state(&asyw
->state
, &asyh
->state
,
1135 DRM_PLANE_HELPER_NO_SCALING
,
1136 DRM_PLANE_HELPER_NO_SCALING
,
1138 asyh
->curs
.visible
= asyw
->state
.visible
;
1139 if (ret
|| !asyh
->curs
.visible
)
1142 switch (asyw
->state
.fb
->width
) {
1143 case 32: asyh
->curs
.layout
= 0; break;
1144 case 64: asyh
->curs
.layout
= 1; break;
1149 if (asyw
->state
.fb
->width
!= asyw
->state
.fb
->height
)
1152 switch (asyw
->state
.fb
->format
->format
) {
1153 case DRM_FORMAT_ARGB8888
: asyh
->curs
.format
= 1; break;
1163 nv50_curs_dtor(struct nv50_wndw
*wndw
)
1165 struct nv50_curs
*curs
= nv50_curs(wndw
);
1166 nvif_object_fini(&curs
->chan
);
1171 nv50_curs_format
[] = {
1172 DRM_FORMAT_ARGB8888
,
1175 static const struct nv50_wndw_func
1177 .dtor
= nv50_curs_dtor
,
1178 .acquire
= nv50_curs_acquire
,
1179 .release
= nv50_curs_release
,
1180 .prepare
= nv50_curs_prepare
,
1181 .point
= nv50_curs_point
,
1182 .update
= nv50_curs_update
,
1186 nv50_curs_new(struct nouveau_drm
*drm
, struct nv50_head
*head
,
1187 struct nv50_curs
**pcurs
)
1189 static const struct nvif_mclass curses
[] = {
1190 { GK104_DISP_CURSOR
, 0 },
1191 { GF110_DISP_CURSOR
, 0 },
1192 { GT214_DISP_CURSOR
, 0 },
1193 { G82_DISP_CURSOR
, 0 },
1194 { NV50_DISP_CURSOR
, 0 },
1197 struct nv50_disp_cursor_v0 args
= {
1198 .head
= head
->base
.index
,
1200 struct nv50_disp
*disp
= nv50_disp(drm
->dev
);
1201 struct nv50_curs
*curs
;
1204 cid
= nvif_mclass(&disp
->disp
->object
, curses
);
1206 NV_ERROR(drm
, "No supported cursor immediate class\n");
1210 if (!(curs
= *pcurs
= kzalloc(sizeof(*curs
), GFP_KERNEL
)))
1213 ret
= nv50_wndw_ctor(&nv50_curs
, drm
->dev
, DRM_PLANE_TYPE_CURSOR
,
1214 "curs", head
->base
.index
, &disp
->mast
.base
,
1215 nv50_curs_format
, ARRAY_SIZE(nv50_curs_format
),
1222 ret
= nvif_object_init(&disp
->disp
->object
, 0, curses
[cid
].oclass
,
1223 &args
, sizeof(args
), &curs
->chan
);
1225 NV_ERROR(drm
, "curs%04x allocation failed: %d\n",
1226 curses
[cid
].oclass
, ret
);
1233 /******************************************************************************
1235 *****************************************************************************/
1236 #define nv50_base(p) container_of((p), struct nv50_base, wndw)
1239 struct nv50_wndw wndw
;
1240 struct nv50_sync chan
;
1245 nv50_base_notify(struct nvif_notify
*notify
)
1247 return NVIF_NOTIFY_KEEP
;
1251 nv50_base_lut(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
1253 struct nv50_base
*base
= nv50_base(wndw
);
1255 if ((push
= evo_wait(&base
->chan
, 2))) {
1256 evo_mthd(push
, 0x00e0, 1);
1257 evo_data(push
, asyw
->lut
.enable
<< 30);
1258 evo_kick(push
, &base
->chan
);
1263 nv50_base_image_clr(struct nv50_wndw
*wndw
)
1265 struct nv50_base
*base
= nv50_base(wndw
);
1267 if ((push
= evo_wait(&base
->chan
, 4))) {
1268 evo_mthd(push
, 0x0084, 1);
1269 evo_data(push
, 0x00000000);
1270 evo_mthd(push
, 0x00c0, 1);
1271 evo_data(push
, 0x00000000);
1272 evo_kick(push
, &base
->chan
);
1277 nv50_base_image_set(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
1279 struct nv50_base
*base
= nv50_base(wndw
);
1280 const s32 oclass
= base
->chan
.base
.base
.user
.oclass
;
1282 if ((push
= evo_wait(&base
->chan
, 10))) {
1283 evo_mthd(push
, 0x0084, 1);
1284 evo_data(push
, (asyw
->image
.mode
<< 8) |
1285 (asyw
->image
.interval
<< 4));
1286 evo_mthd(push
, 0x00c0, 1);
1287 evo_data(push
, asyw
->image
.handle
);
1288 if (oclass
< G82_DISP_BASE_CHANNEL_DMA
) {
1289 evo_mthd(push
, 0x0800, 5);
1290 evo_data(push
, asyw
->image
.offset
>> 8);
1291 evo_data(push
, 0x00000000);
1292 evo_data(push
, (asyw
->image
.h
<< 16) | asyw
->image
.w
);
1293 evo_data(push
, (asyw
->image
.layout
<< 20) |
1296 evo_data(push
, (asyw
->image
.kind
<< 16) |
1297 (asyw
->image
.format
<< 8));
1299 if (oclass
< GF110_DISP_BASE_CHANNEL_DMA
) {
1300 evo_mthd(push
, 0x0800, 5);
1301 evo_data(push
, asyw
->image
.offset
>> 8);
1302 evo_data(push
, 0x00000000);
1303 evo_data(push
, (asyw
->image
.h
<< 16) | asyw
->image
.w
);
1304 evo_data(push
, (asyw
->image
.layout
<< 20) |
1307 evo_data(push
, asyw
->image
.format
<< 8);
1309 evo_mthd(push
, 0x0400, 5);
1310 evo_data(push
, asyw
->image
.offset
>> 8);
1311 evo_data(push
, 0x00000000);
1312 evo_data(push
, (asyw
->image
.h
<< 16) | asyw
->image
.w
);
1313 evo_data(push
, (asyw
->image
.layout
<< 24) |
1316 evo_data(push
, asyw
->image
.format
<< 8);
1318 evo_kick(push
, &base
->chan
);
1323 nv50_base_ntfy_clr(struct nv50_wndw
*wndw
)
1325 struct nv50_base
*base
= nv50_base(wndw
);
1327 if ((push
= evo_wait(&base
->chan
, 2))) {
1328 evo_mthd(push
, 0x00a4, 1);
1329 evo_data(push
, 0x00000000);
1330 evo_kick(push
, &base
->chan
);
1335 nv50_base_ntfy_set(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
1337 struct nv50_base
*base
= nv50_base(wndw
);
1339 if ((push
= evo_wait(&base
->chan
, 3))) {
1340 evo_mthd(push
, 0x00a0, 2);
1341 evo_data(push
, (asyw
->ntfy
.awaken
<< 30) | asyw
->ntfy
.offset
);
1342 evo_data(push
, asyw
->ntfy
.handle
);
1343 evo_kick(push
, &base
->chan
);
1348 nv50_base_sema_clr(struct nv50_wndw
*wndw
)
1350 struct nv50_base
*base
= nv50_base(wndw
);
1352 if ((push
= evo_wait(&base
->chan
, 2))) {
1353 evo_mthd(push
, 0x0094, 1);
1354 evo_data(push
, 0x00000000);
1355 evo_kick(push
, &base
->chan
);
1360 nv50_base_sema_set(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
1362 struct nv50_base
*base
= nv50_base(wndw
);
1364 if ((push
= evo_wait(&base
->chan
, 5))) {
1365 evo_mthd(push
, 0x0088, 4);
1366 evo_data(push
, asyw
->sema
.offset
);
1367 evo_data(push
, asyw
->sema
.acquire
);
1368 evo_data(push
, asyw
->sema
.release
);
1369 evo_data(push
, asyw
->sema
.handle
);
1370 evo_kick(push
, &base
->chan
);
1375 nv50_base_update(struct nv50_wndw
*wndw
, u32 interlock
)
1377 struct nv50_base
*base
= nv50_base(wndw
);
1380 if (!(push
= evo_wait(&base
->chan
, 2)))
1382 evo_mthd(push
, 0x0080, 1);
1383 evo_data(push
, interlock
);
1384 evo_kick(push
, &base
->chan
);
1386 if (base
->chan
.base
.base
.user
.oclass
< GF110_DISP_BASE_CHANNEL_DMA
)
1387 return interlock
? 2 << (base
->id
* 8) : 0;
1388 return interlock
? 2 << (base
->id
* 4) : 0;
1392 nv50_base_ntfy_wait_begun(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
1394 struct nouveau_drm
*drm
= nouveau_drm(wndw
->plane
.dev
);
1395 struct nv50_disp
*disp
= nv50_disp(wndw
->plane
.dev
);
1396 if (nvif_msec(&drm
->client
.device
, 2000ULL,
1397 u32 data
= nouveau_bo_rd32(disp
->sync
, asyw
->ntfy
.offset
/ 4);
1398 if ((data
& 0xc0000000) == 0x40000000)
1407 nv50_base_release(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
,
1408 struct nv50_head_atom
*asyh
)
1414 nv50_base_acquire(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
,
1415 struct nv50_head_atom
*asyh
)
1417 const struct drm_framebuffer
*fb
= asyw
->state
.fb
;
1420 if (!fb
->format
->depth
)
1423 ret
= drm_atomic_helper_check_plane_state(&asyw
->state
, &asyh
->state
,
1424 DRM_PLANE_HELPER_NO_SCALING
,
1425 DRM_PLANE_HELPER_NO_SCALING
,
1430 asyh
->base
.depth
= fb
->format
->depth
;
1431 asyh
->base
.cpp
= fb
->format
->cpp
[0];
1432 asyh
->base
.x
= asyw
->state
.src
.x1
>> 16;
1433 asyh
->base
.y
= asyw
->state
.src
.y1
>> 16;
1434 asyh
->base
.w
= asyw
->state
.fb
->width
;
1435 asyh
->base
.h
= asyw
->state
.fb
->height
;
1437 switch (fb
->format
->format
) {
1438 case DRM_FORMAT_C8
: asyw
->image
.format
= 0x1e; break;
1439 case DRM_FORMAT_RGB565
: asyw
->image
.format
= 0xe8; break;
1440 case DRM_FORMAT_XRGB1555
:
1441 case DRM_FORMAT_ARGB1555
: asyw
->image
.format
= 0xe9; break;
1442 case DRM_FORMAT_XRGB8888
:
1443 case DRM_FORMAT_ARGB8888
: asyw
->image
.format
= 0xcf; break;
1444 case DRM_FORMAT_XBGR2101010
:
1445 case DRM_FORMAT_ABGR2101010
: asyw
->image
.format
= 0xd1; break;
1446 case DRM_FORMAT_XBGR8888
:
1447 case DRM_FORMAT_ABGR8888
: asyw
->image
.format
= 0xd5; break;
1453 asyw
->lut
.enable
= 1;
1454 asyw
->set
.image
= true;
1459 nv50_base_dtor(struct nv50_wndw
*wndw
)
1461 struct nv50_base
*base
= nv50_base(wndw
);
1462 nv50_dmac_destroy(&base
->chan
.base
);
1467 nv50_base_format
[] = {
1470 DRM_FORMAT_XRGB1555
,
1471 DRM_FORMAT_ARGB1555
,
1472 DRM_FORMAT_XRGB8888
,
1473 DRM_FORMAT_ARGB8888
,
1474 DRM_FORMAT_XBGR2101010
,
1475 DRM_FORMAT_ABGR2101010
,
1476 DRM_FORMAT_XBGR8888
,
1477 DRM_FORMAT_ABGR8888
,
1480 static const struct nv50_wndw_func
1482 .dtor
= nv50_base_dtor
,
1483 .acquire
= nv50_base_acquire
,
1484 .release
= nv50_base_release
,
1485 .sema_set
= nv50_base_sema_set
,
1486 .sema_clr
= nv50_base_sema_clr
,
1487 .ntfy_set
= nv50_base_ntfy_set
,
1488 .ntfy_clr
= nv50_base_ntfy_clr
,
1489 .ntfy_wait_begun
= nv50_base_ntfy_wait_begun
,
1490 .image_set
= nv50_base_image_set
,
1491 .image_clr
= nv50_base_image_clr
,
1492 .lut
= nv50_base_lut
,
1493 .update
= nv50_base_update
,
1497 nv50_base_new(struct nouveau_drm
*drm
, struct nv50_head
*head
,
1498 struct nv50_base
**pbase
)
1500 struct nv50_disp
*disp
= nv50_disp(drm
->dev
);
1501 struct nv50_base
*base
;
1504 if (!(base
= *pbase
= kzalloc(sizeof(*base
), GFP_KERNEL
)))
1506 base
->id
= head
->base
.index
;
1507 base
->wndw
.ntfy
= EVO_FLIP_NTFY0(base
->id
);
1508 base
->wndw
.sema
= EVO_FLIP_SEM0(base
->id
);
1509 base
->wndw
.data
= 0x00000000;
1511 ret
= nv50_wndw_ctor(&nv50_base
, drm
->dev
, DRM_PLANE_TYPE_PRIMARY
,
1512 "base", base
->id
, &base
->chan
.base
,
1513 nv50_base_format
, ARRAY_SIZE(nv50_base_format
),
1520 ret
= nv50_base_create(&drm
->client
.device
, &disp
->disp
->object
,
1521 base
->id
, disp
->sync
->bo
.offset
, &base
->chan
);
1525 return nvif_notify_init(&base
->chan
.base
.base
.user
, nv50_base_notify
,
1527 NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT
,
1528 &(struct nvif_notify_uevent_req
) {},
1529 sizeof(struct nvif_notify_uevent_req
),
1530 sizeof(struct nvif_notify_uevent_rep
),
1531 &base
->wndw
.notify
);
1534 /******************************************************************************
1536 *****************************************************************************/
1538 nv50_head_procamp(struct nv50_head
*head
, struct nv50_head_atom
*asyh
)
1540 struct nv50_dmac
*core
= &nv50_disp(head
->base
.base
.dev
)->mast
.base
;
1542 if ((push
= evo_wait(core
, 2))) {
1543 if (core
->base
.user
.oclass
< GF110_DISP_CORE_CHANNEL_DMA
)
1544 evo_mthd(push
, 0x08a8 + (head
->base
.index
* 0x400), 1);
1546 evo_mthd(push
, 0x0498 + (head
->base
.index
* 0x300), 1);
1547 evo_data(push
, (asyh
->procamp
.sat
.sin
<< 20) |
1548 (asyh
->procamp
.sat
.cos
<< 8));
1549 evo_kick(push
, core
);
1554 nv50_head_dither(struct nv50_head
*head
, struct nv50_head_atom
*asyh
)
1556 struct nv50_dmac
*core
= &nv50_disp(head
->base
.base
.dev
)->mast
.base
;
1558 if ((push
= evo_wait(core
, 2))) {
1559 if (core
->base
.user
.oclass
< GF110_DISP_CORE_CHANNEL_DMA
)
1560 evo_mthd(push
, 0x08a0 + (head
->base
.index
* 0x0400), 1);
1562 if (core
->base
.user
.oclass
< GK104_DISP_CORE_CHANNEL_DMA
)
1563 evo_mthd(push
, 0x0490 + (head
->base
.index
* 0x0300), 1);
1565 evo_mthd(push
, 0x04a0 + (head
->base
.index
* 0x0300), 1);
1566 evo_data(push
, (asyh
->dither
.mode
<< 3) |
1567 (asyh
->dither
.bits
<< 1) |
1568 asyh
->dither
.enable
);
1569 evo_kick(push
, core
);
1574 nv50_head_ovly(struct nv50_head
*head
, struct nv50_head_atom
*asyh
)
1576 struct nv50_dmac
*core
= &nv50_disp(head
->base
.base
.dev
)->mast
.base
;
1580 if (asyh
->base
.cpp
) {
1581 switch (asyh
->base
.cpp
) {
1582 case 8: bounds
|= 0x00000500; break;
1583 case 4: bounds
|= 0x00000300; break;
1584 case 2: bounds
|= 0x00000100; break;
1589 bounds
|= 0x00000001;
1592 if ((push
= evo_wait(core
, 2))) {
1593 if (core
->base
.user
.oclass
< GF110_DISP_CORE_CHANNEL_DMA
)
1594 evo_mthd(push
, 0x0904 + head
->base
.index
* 0x400, 1);
1596 evo_mthd(push
, 0x04d4 + head
->base
.index
* 0x300, 1);
1597 evo_data(push
, bounds
);
1598 evo_kick(push
, core
);
1603 nv50_head_base(struct nv50_head
*head
, struct nv50_head_atom
*asyh
)
1605 struct nv50_dmac
*core
= &nv50_disp(head
->base
.base
.dev
)->mast
.base
;
1609 if (asyh
->base
.cpp
) {
1610 switch (asyh
->base
.cpp
) {
1611 case 8: bounds
|= 0x00000500; break;
1612 case 4: bounds
|= 0x00000300; break;
1613 case 2: bounds
|= 0x00000100; break;
1614 case 1: bounds
|= 0x00000000; break;
1619 bounds
|= 0x00000001;
1622 if ((push
= evo_wait(core
, 2))) {
1623 if (core
->base
.user
.oclass
< GF110_DISP_CORE_CHANNEL_DMA
)
1624 evo_mthd(push
, 0x0900 + head
->base
.index
* 0x400, 1);
1626 evo_mthd(push
, 0x04d0 + head
->base
.index
* 0x300, 1);
1627 evo_data(push
, bounds
);
1628 evo_kick(push
, core
);
1633 nv50_head_curs_clr(struct nv50_head
*head
)
1635 struct nv50_dmac
*core
= &nv50_disp(head
->base
.base
.dev
)->mast
.base
;
1637 if ((push
= evo_wait(core
, 4))) {
1638 if (core
->base
.user
.oclass
< G82_DISP_CORE_CHANNEL_DMA
) {
1639 evo_mthd(push
, 0x0880 + head
->base
.index
* 0x400, 1);
1640 evo_data(push
, 0x05000000);
1642 if (core
->base
.user
.oclass
< GF110_DISP_CORE_CHANNEL_DMA
) {
1643 evo_mthd(push
, 0x0880 + head
->base
.index
* 0x400, 1);
1644 evo_data(push
, 0x05000000);
1645 evo_mthd(push
, 0x089c + head
->base
.index
* 0x400, 1);
1646 evo_data(push
, 0x00000000);
1648 evo_mthd(push
, 0x0480 + head
->base
.index
* 0x300, 1);
1649 evo_data(push
, 0x05000000);
1650 evo_mthd(push
, 0x048c + head
->base
.index
* 0x300, 1);
1651 evo_data(push
, 0x00000000);
1653 evo_kick(push
, core
);
1658 nv50_head_curs_set(struct nv50_head
*head
, struct nv50_head_atom
*asyh
)
1660 struct nv50_dmac
*core
= &nv50_disp(head
->base
.base
.dev
)->mast
.base
;
1662 if ((push
= evo_wait(core
, 5))) {
1663 if (core
->base
.user
.oclass
< G82_DISP_BASE_CHANNEL_DMA
) {
1664 evo_mthd(push
, 0x0880 + head
->base
.index
* 0x400, 2);
1665 evo_data(push
, 0x80000000 | (asyh
->curs
.layout
<< 26) |
1666 (asyh
->curs
.format
<< 24));
1667 evo_data(push
, asyh
->curs
.offset
>> 8);
1669 if (core
->base
.user
.oclass
< GF110_DISP_BASE_CHANNEL_DMA
) {
1670 evo_mthd(push
, 0x0880 + head
->base
.index
* 0x400, 2);
1671 evo_data(push
, 0x80000000 | (asyh
->curs
.layout
<< 26) |
1672 (asyh
->curs
.format
<< 24));
1673 evo_data(push
, asyh
->curs
.offset
>> 8);
1674 evo_mthd(push
, 0x089c + head
->base
.index
* 0x400, 1);
1675 evo_data(push
, asyh
->curs
.handle
);
1677 evo_mthd(push
, 0x0480 + head
->base
.index
* 0x300, 2);
1678 evo_data(push
, 0x80000000 | (asyh
->curs
.layout
<< 26) |
1679 (asyh
->curs
.format
<< 24));
1680 evo_data(push
, asyh
->curs
.offset
>> 8);
1681 evo_mthd(push
, 0x048c + head
->base
.index
* 0x300, 1);
1682 evo_data(push
, asyh
->curs
.handle
);
1684 evo_kick(push
, core
);
1689 nv50_head_core_clr(struct nv50_head
*head
)
1691 struct nv50_dmac
*core
= &nv50_disp(head
->base
.base
.dev
)->mast
.base
;
1693 if ((push
= evo_wait(core
, 2))) {
1694 if (core
->base
.user
.oclass
< GF110_DISP_CORE_CHANNEL_DMA
)
1695 evo_mthd(push
, 0x0874 + head
->base
.index
* 0x400, 1);
1697 evo_mthd(push
, 0x0474 + head
->base
.index
* 0x300, 1);
1698 evo_data(push
, 0x00000000);
1699 evo_kick(push
, core
);
1704 nv50_head_core_set(struct nv50_head
*head
, struct nv50_head_atom
*asyh
)
1706 struct nv50_dmac
*core
= &nv50_disp(head
->base
.base
.dev
)->mast
.base
;
1708 if ((push
= evo_wait(core
, 9))) {
1709 if (core
->base
.user
.oclass
< G82_DISP_CORE_CHANNEL_DMA
) {
1710 evo_mthd(push
, 0x0860 + head
->base
.index
* 0x400, 1);
1711 evo_data(push
, asyh
->core
.offset
>> 8);
1712 evo_mthd(push
, 0x0868 + head
->base
.index
* 0x400, 4);
1713 evo_data(push
, (asyh
->core
.h
<< 16) | asyh
->core
.w
);
1714 evo_data(push
, asyh
->core
.layout
<< 20 |
1715 (asyh
->core
.pitch
>> 8) << 8 |
1717 evo_data(push
, asyh
->core
.kind
<< 16 |
1718 asyh
->core
.format
<< 8);
1719 evo_data(push
, asyh
->core
.handle
);
1720 evo_mthd(push
, 0x08c0 + head
->base
.index
* 0x400, 1);
1721 evo_data(push
, (asyh
->core
.y
<< 16) | asyh
->core
.x
);
1722 /* EVO will complain with INVALID_STATE if we have an
1723 * active cursor and (re)specify HeadSetContextDmaIso
1724 * without also updating HeadSetOffsetCursor.
1726 asyh
->set
.curs
= asyh
->curs
.visible
;
1728 if (core
->base
.user
.oclass
< GF110_DISP_CORE_CHANNEL_DMA
) {
1729 evo_mthd(push
, 0x0860 + head
->base
.index
* 0x400, 1);
1730 evo_data(push
, asyh
->core
.offset
>> 8);
1731 evo_mthd(push
, 0x0868 + head
->base
.index
* 0x400, 4);
1732 evo_data(push
, (asyh
->core
.h
<< 16) | asyh
->core
.w
);
1733 evo_data(push
, asyh
->core
.layout
<< 20 |
1734 (asyh
->core
.pitch
>> 8) << 8 |
1736 evo_data(push
, asyh
->core
.format
<< 8);
1737 evo_data(push
, asyh
->core
.handle
);
1738 evo_mthd(push
, 0x08c0 + head
->base
.index
* 0x400, 1);
1739 evo_data(push
, (asyh
->core
.y
<< 16) | asyh
->core
.x
);
1741 evo_mthd(push
, 0x0460 + head
->base
.index
* 0x300, 1);
1742 evo_data(push
, asyh
->core
.offset
>> 8);
1743 evo_mthd(push
, 0x0468 + head
->base
.index
* 0x300, 4);
1744 evo_data(push
, (asyh
->core
.h
<< 16) | asyh
->core
.w
);
1745 evo_data(push
, asyh
->core
.layout
<< 24 |
1746 (asyh
->core
.pitch
>> 8) << 8 |
1748 evo_data(push
, asyh
->core
.format
<< 8);
1749 evo_data(push
, asyh
->core
.handle
);
1750 evo_mthd(push
, 0x04b0 + head
->base
.index
* 0x300, 1);
1751 evo_data(push
, (asyh
->core
.y
<< 16) | asyh
->core
.x
);
1753 evo_kick(push
, core
);
1758 nv50_head_lut_clr(struct nv50_head
*head
)
1760 struct nv50_dmac
*core
= &nv50_disp(head
->base
.base
.dev
)->mast
.base
;
1762 if ((push
= evo_wait(core
, 4))) {
1763 if (core
->base
.user
.oclass
< G82_DISP_CORE_CHANNEL_DMA
) {
1764 evo_mthd(push
, 0x0840 + (head
->base
.index
* 0x400), 1);
1765 evo_data(push
, 0x40000000);
1767 if (core
->base
.user
.oclass
< GF110_DISP_CORE_CHANNEL_DMA
) {
1768 evo_mthd(push
, 0x0840 + (head
->base
.index
* 0x400), 1);
1769 evo_data(push
, 0x40000000);
1770 evo_mthd(push
, 0x085c + (head
->base
.index
* 0x400), 1);
1771 evo_data(push
, 0x00000000);
1773 evo_mthd(push
, 0x0440 + (head
->base
.index
* 0x300), 1);
1774 evo_data(push
, 0x03000000);
1775 evo_mthd(push
, 0x045c + (head
->base
.index
* 0x300), 1);
1776 evo_data(push
, 0x00000000);
1778 evo_kick(push
, core
);
1783 nv50_head_lut_load(struct drm_property_blob
*blob
, int mode
,
1784 struct nouveau_bo
*nvbo
)
1786 struct drm_color_lut
*in
= (struct drm_color_lut
*)blob
->data
;
1787 void __iomem
*lut
= (u8
*)nvbo_kmap_obj_iovirtual(nvbo
);
1788 const int size
= blob
->length
/ sizeof(*in
);
1792 /* This can't happen.. But it shuts the compiler up. */
1793 if (WARN_ON(size
!= 256))
1797 case 0: /* LORES. */
1798 case 1: /* HIRES. */
1803 case 7: /* INTERPOLATE_257_UNITY_RANGE. */
1813 for (i
= 0; i
< size
; i
++) {
1814 r
= (drm_color_lut_extract(in
[i
]. red
, bits
) + zero
) << shift
;
1815 g
= (drm_color_lut_extract(in
[i
].green
, bits
) + zero
) << shift
;
1816 b
= (drm_color_lut_extract(in
[i
]. blue
, bits
) + zero
) << shift
;
1817 writew(r
, lut
+ (i
* 0x08) + 0);
1818 writew(g
, lut
+ (i
* 0x08) + 2);
1819 writew(b
, lut
+ (i
* 0x08) + 4);
1822 /* INTERPOLATE modes require a "next" entry to interpolate with,
1823 * so we replicate the last entry to deal with this for now.
1825 writew(r
, lut
+ (i
* 0x08) + 0);
1826 writew(g
, lut
+ (i
* 0x08) + 2);
1827 writew(b
, lut
+ (i
* 0x08) + 4);
1831 nv50_head_lut_set(struct nv50_head
*head
, struct nv50_head_atom
*asyh
)
1833 struct nv50_dmac
*core
= &nv50_disp(head
->base
.base
.dev
)->mast
.base
;
1835 if ((push
= evo_wait(core
, 7))) {
1836 if (core
->base
.user
.oclass
< G82_DISP_CORE_CHANNEL_DMA
) {
1837 evo_mthd(push
, 0x0840 + (head
->base
.index
* 0x400), 2);
1838 evo_data(push
, 0x80000000 | asyh
->lut
.mode
<< 30);
1839 evo_data(push
, asyh
->lut
.offset
>> 8);
1841 if (core
->base
.user
.oclass
< GF110_DISP_CORE_CHANNEL_DMA
) {
1842 evo_mthd(push
, 0x0840 + (head
->base
.index
* 0x400), 2);
1843 evo_data(push
, 0x80000000 | asyh
->lut
.mode
<< 30);
1844 evo_data(push
, asyh
->lut
.offset
>> 8);
1845 evo_mthd(push
, 0x085c + (head
->base
.index
* 0x400), 1);
1846 evo_data(push
, asyh
->lut
.handle
);
1848 evo_mthd(push
, 0x0440 + (head
->base
.index
* 0x300), 4);
1849 evo_data(push
, 0x80000000 | asyh
->lut
.mode
<< 24);
1850 evo_data(push
, asyh
->lut
.offset
>> 8);
1851 evo_data(push
, 0x00000000);
1852 evo_data(push
, 0x00000000);
1853 evo_mthd(push
, 0x045c + (head
->base
.index
* 0x300), 1);
1854 evo_data(push
, asyh
->lut
.handle
);
1856 evo_kick(push
, core
);
1861 nv50_head_mode(struct nv50_head
*head
, struct nv50_head_atom
*asyh
)
1863 struct nv50_dmac
*core
= &nv50_disp(head
->base
.base
.dev
)->mast
.base
;
1864 struct nv50_head_mode
*m
= &asyh
->mode
;
1866 if ((push
= evo_wait(core
, 14))) {
1867 if (core
->base
.user
.oclass
< GF110_DISP_CORE_CHANNEL_DMA
) {
1868 evo_mthd(push
, 0x0804 + (head
->base
.index
* 0x400), 2);
1869 evo_data(push
, 0x00800000 | m
->clock
);
1870 evo_data(push
, m
->interlace
? 0x00000002 : 0x00000000);
1871 evo_mthd(push
, 0x0810 + (head
->base
.index
* 0x400), 7);
1872 evo_data(push
, 0x00000000);
1873 evo_data(push
, (m
->v
.active
<< 16) | m
->h
.active
);
1874 evo_data(push
, (m
->v
.synce
<< 16) | m
->h
.synce
);
1875 evo_data(push
, (m
->v
.blanke
<< 16) | m
->h
.blanke
);
1876 evo_data(push
, (m
->v
.blanks
<< 16) | m
->h
.blanks
);
1877 evo_data(push
, (m
->v
.blank2e
<< 16) | m
->v
.blank2s
);
1878 evo_data(push
, asyh
->mode
.v
.blankus
);
1879 evo_mthd(push
, 0x082c + (head
->base
.index
* 0x400), 1);
1880 evo_data(push
, 0x00000000);
1882 evo_mthd(push
, 0x0410 + (head
->base
.index
* 0x300), 6);
1883 evo_data(push
, 0x00000000);
1884 evo_data(push
, (m
->v
.active
<< 16) | m
->h
.active
);
1885 evo_data(push
, (m
->v
.synce
<< 16) | m
->h
.synce
);
1886 evo_data(push
, (m
->v
.blanke
<< 16) | m
->h
.blanke
);
1887 evo_data(push
, (m
->v
.blanks
<< 16) | m
->h
.blanks
);
1888 evo_data(push
, (m
->v
.blank2e
<< 16) | m
->v
.blank2s
);
1889 evo_mthd(push
, 0x042c + (head
->base
.index
* 0x300), 2);
1890 evo_data(push
, 0x00000000); /* ??? */
1891 evo_data(push
, 0xffffff00);
1892 evo_mthd(push
, 0x0450 + (head
->base
.index
* 0x300), 3);
1893 evo_data(push
, m
->clock
* 1000);
1894 evo_data(push
, 0x00200000); /* ??? */
1895 evo_data(push
, m
->clock
* 1000);
1897 evo_kick(push
, core
);
1902 nv50_head_view(struct nv50_head
*head
, struct nv50_head_atom
*asyh
)
1904 struct nv50_dmac
*core
= &nv50_disp(head
->base
.base
.dev
)->mast
.base
;
1906 if ((push
= evo_wait(core
, 10))) {
1907 if (core
->base
.user
.oclass
< GF110_DISP_CORE_CHANNEL_DMA
) {
1908 evo_mthd(push
, 0x08a4 + (head
->base
.index
* 0x400), 1);
1909 evo_data(push
, 0x00000000);
1910 evo_mthd(push
, 0x08c8 + (head
->base
.index
* 0x400), 1);
1911 evo_data(push
, (asyh
->view
.iH
<< 16) | asyh
->view
.iW
);
1912 evo_mthd(push
, 0x08d8 + (head
->base
.index
* 0x400), 2);
1913 evo_data(push
, (asyh
->view
.oH
<< 16) | asyh
->view
.oW
);
1914 evo_data(push
, (asyh
->view
.oH
<< 16) | asyh
->view
.oW
);
1916 evo_mthd(push
, 0x0494 + (head
->base
.index
* 0x300), 1);
1917 evo_data(push
, 0x00000000);
1918 evo_mthd(push
, 0x04b8 + (head
->base
.index
* 0x300), 1);
1919 evo_data(push
, (asyh
->view
.iH
<< 16) | asyh
->view
.iW
);
1920 evo_mthd(push
, 0x04c0 + (head
->base
.index
* 0x300), 3);
1921 evo_data(push
, (asyh
->view
.oH
<< 16) | asyh
->view
.oW
);
1922 evo_data(push
, (asyh
->view
.oH
<< 16) | asyh
->view
.oW
);
1923 evo_data(push
, (asyh
->view
.oH
<< 16) | asyh
->view
.oW
);
1925 evo_kick(push
, core
);
1930 nv50_head_flush_clr(struct nv50_head
*head
, struct nv50_head_atom
*asyh
, bool y
)
1932 if (asyh
->clr
.ilut
&& (!asyh
->set
.ilut
|| y
))
1933 nv50_head_lut_clr(head
);
1934 if (asyh
->clr
.core
&& (!asyh
->set
.core
|| y
))
1935 nv50_head_core_clr(head
);
1936 if (asyh
->clr
.curs
&& (!asyh
->set
.curs
|| y
))
1937 nv50_head_curs_clr(head
);
1941 nv50_head_flush_set(struct nv50_head
*head
, struct nv50_head_atom
*asyh
)
1943 if (asyh
->set
.view
) nv50_head_view (head
, asyh
);
1944 if (asyh
->set
.mode
) nv50_head_mode (head
, asyh
);
1945 if (asyh
->set
.ilut
) {
1946 struct nouveau_bo
*nvbo
= head
->lut
.nvbo
[head
->lut
.next
];
1947 struct drm_property_blob
*blob
= asyh
->state
.gamma_lut
;
1949 nv50_head_lut_load(blob
, asyh
->lut
.mode
, nvbo
);
1950 asyh
->lut
.offset
= nvbo
->bo
.offset
;
1951 head
->lut
.next
^= 1;
1952 nv50_head_lut_set(head
, asyh
);
1954 if (asyh
->set
.core
) nv50_head_core_set(head
, asyh
);
1955 if (asyh
->set
.curs
) nv50_head_curs_set(head
, asyh
);
1956 if (asyh
->set
.base
) nv50_head_base (head
, asyh
);
1957 if (asyh
->set
.ovly
) nv50_head_ovly (head
, asyh
);
1958 if (asyh
->set
.dither
) nv50_head_dither (head
, asyh
);
1959 if (asyh
->set
.procamp
) nv50_head_procamp (head
, asyh
);
1963 nv50_head_atomic_check_procamp(struct nv50_head_atom
*armh
,
1964 struct nv50_head_atom
*asyh
,
1965 struct nouveau_conn_atom
*asyc
)
1967 const int vib
= asyc
->procamp
.color_vibrance
- 100;
1968 const int hue
= asyc
->procamp
.vibrant_hue
- 90;
1969 const int adj
= (vib
> 0) ? 50 : 0;
1970 asyh
->procamp
.sat
.cos
= ((vib
* 2047 + adj
) / 100) & 0xfff;
1971 asyh
->procamp
.sat
.sin
= ((hue
* 2047) / 100) & 0xfff;
1972 asyh
->set
.procamp
= true;
1976 nv50_head_atomic_check_dither(struct nv50_head_atom
*armh
,
1977 struct nv50_head_atom
*asyh
,
1978 struct nouveau_conn_atom
*asyc
)
1980 struct drm_connector
*connector
= asyc
->state
.connector
;
1983 if (asyc
->dither
.mode
== DITHERING_MODE_AUTO
) {
1984 if (asyh
->base
.depth
> connector
->display_info
.bpc
* 3)
1985 mode
= DITHERING_MODE_DYNAMIC2X2
;
1987 mode
= asyc
->dither
.mode
;
1990 if (asyc
->dither
.depth
== DITHERING_DEPTH_AUTO
) {
1991 if (connector
->display_info
.bpc
>= 8)
1992 mode
|= DITHERING_DEPTH_8BPC
;
1994 mode
|= asyc
->dither
.depth
;
1997 asyh
->dither
.enable
= mode
;
1998 asyh
->dither
.bits
= mode
>> 1;
1999 asyh
->dither
.mode
= mode
>> 3;
2000 asyh
->set
.dither
= true;
2004 nv50_head_atomic_check_view(struct nv50_head_atom
*armh
,
2005 struct nv50_head_atom
*asyh
,
2006 struct nouveau_conn_atom
*asyc
)
2008 struct drm_connector
*connector
= asyc
->state
.connector
;
2009 struct drm_display_mode
*omode
= &asyh
->state
.adjusted_mode
;
2010 struct drm_display_mode
*umode
= &asyh
->state
.mode
;
2011 int mode
= asyc
->scaler
.mode
;
2013 int umode_vdisplay
, omode_hdisplay
, omode_vdisplay
;
2015 if (connector
->edid_blob_ptr
)
2016 edid
= (struct edid
*)connector
->edid_blob_ptr
->data
;
2020 if (!asyc
->scaler
.full
) {
2021 if (mode
== DRM_MODE_SCALE_NONE
)
2024 /* Non-EDID LVDS/eDP mode. */
2025 mode
= DRM_MODE_SCALE_FULLSCREEN
;
2028 /* For the user-specified mode, we must ignore doublescan and
2029 * the like, but honor frame packing.
2031 umode_vdisplay
= umode
->vdisplay
;
2032 if ((umode
->flags
& DRM_MODE_FLAG_3D_MASK
) == DRM_MODE_FLAG_3D_FRAME_PACKING
)
2033 umode_vdisplay
+= umode
->vtotal
;
2034 asyh
->view
.iW
= umode
->hdisplay
;
2035 asyh
->view
.iH
= umode_vdisplay
;
2036 /* For the output mode, we can just use the stock helper. */
2037 drm_mode_get_hv_timing(omode
, &omode_hdisplay
, &omode_vdisplay
);
2038 asyh
->view
.oW
= omode_hdisplay
;
2039 asyh
->view
.oH
= omode_vdisplay
;
2041 /* Add overscan compensation if necessary, will keep the aspect
2042 * ratio the same as the backend mode unless overridden by the
2043 * user setting both hborder and vborder properties.
2045 if ((asyc
->scaler
.underscan
.mode
== UNDERSCAN_ON
||
2046 (asyc
->scaler
.underscan
.mode
== UNDERSCAN_AUTO
&&
2047 drm_detect_hdmi_monitor(edid
)))) {
2048 u32 bX
= asyc
->scaler
.underscan
.hborder
;
2049 u32 bY
= asyc
->scaler
.underscan
.vborder
;
2050 u32 r
= (asyh
->view
.oH
<< 19) / asyh
->view
.oW
;
2053 asyh
->view
.oW
-= (bX
* 2);
2054 if (bY
) asyh
->view
.oH
-= (bY
* 2);
2055 else asyh
->view
.oH
= ((asyh
->view
.oW
* r
) + (r
/ 2)) >> 19;
2057 asyh
->view
.oW
-= (asyh
->view
.oW
>> 4) + 32;
2058 if (bY
) asyh
->view
.oH
-= (bY
* 2);
2059 else asyh
->view
.oH
= ((asyh
->view
.oW
* r
) + (r
/ 2)) >> 19;
2063 /* Handle CENTER/ASPECT scaling, taking into account the areas
2064 * removed already for overscan compensation.
2067 case DRM_MODE_SCALE_CENTER
:
2068 asyh
->view
.oW
= min((u16
)umode
->hdisplay
, asyh
->view
.oW
);
2069 asyh
->view
.oH
= min((u16
)umode_vdisplay
, asyh
->view
.oH
);
2071 case DRM_MODE_SCALE_ASPECT
:
2072 if (asyh
->view
.oH
< asyh
->view
.oW
) {
2073 u32 r
= (asyh
->view
.iW
<< 19) / asyh
->view
.iH
;
2074 asyh
->view
.oW
= ((asyh
->view
.oH
* r
) + (r
/ 2)) >> 19;
2076 u32 r
= (asyh
->view
.iH
<< 19) / asyh
->view
.iW
;
2077 asyh
->view
.oH
= ((asyh
->view
.oW
* r
) + (r
/ 2)) >> 19;
2084 asyh
->set
.view
= true;
2088 nv50_head_atomic_check_lut(struct nv50_head
*head
,
2089 struct nv50_head_atom
*armh
,
2090 struct nv50_head_atom
*asyh
)
2092 struct nv50_disp
*disp
= nv50_disp(head
->base
.base
.dev
);
2094 /* An I8 surface without an input LUT makes no sense, and
2095 * EVO will throw an error if you try.
2097 * Legacy clients actually cause this due to the order in
2098 * which they call ioctls, so we will enable the LUT with
2099 * whatever contents the buffer already contains to avoid
2100 * triggering the error check.
2102 if (!asyh
->state
.gamma_lut
&& asyh
->base
.cpp
!= 1) {
2103 asyh
->lut
.handle
= 0;
2104 asyh
->clr
.ilut
= armh
->lut
.visible
;
2108 if (disp
->disp
->object
.oclass
< GF110_DISP
) {
2109 asyh
->lut
.mode
= (asyh
->base
.cpp
== 1) ? 0 : 1;
2110 asyh
->set
.ilut
= true;
2113 asyh
->set
.ilut
= asyh
->state
.color_mgmt_changed
;
2115 asyh
->lut
.handle
= disp
->mast
.base
.vram
.handle
;
2119 nv50_head_atomic_check_mode(struct nv50_head
*head
, struct nv50_head_atom
*asyh
)
2121 struct drm_display_mode
*mode
= &asyh
->state
.adjusted_mode
;
2122 struct nv50_head_mode
*m
= &asyh
->mode
;
2125 drm_mode_set_crtcinfo(mode
, CRTC_INTERLACE_HALVE_V
| CRTC_STEREO_DOUBLE
);
2128 * DRM modes are defined in terms of a repeating interval
2129 * starting with the active display area. The hardware modes
2130 * are defined in terms of a repeating interval starting one
2131 * unit (pixel or line) into the sync pulse. So, add bias.
2134 m
->h
.active
= mode
->crtc_htotal
;
2135 m
->h
.synce
= mode
->crtc_hsync_end
- mode
->crtc_hsync_start
- 1;
2136 m
->h
.blanke
= mode
->crtc_hblank_end
- mode
->crtc_hsync_start
- 1;
2137 m
->h
.blanks
= m
->h
.blanke
+ mode
->crtc_hdisplay
;
2139 m
->v
.active
= mode
->crtc_vtotal
;
2140 m
->v
.synce
= mode
->crtc_vsync_end
- mode
->crtc_vsync_start
- 1;
2141 m
->v
.blanke
= mode
->crtc_vblank_end
- mode
->crtc_vsync_start
- 1;
2142 m
->v
.blanks
= m
->v
.blanke
+ mode
->crtc_vdisplay
;
2144 /*XXX: Safe underestimate, even "0" works */
2145 blankus
= (m
->v
.active
- mode
->crtc_vdisplay
- 2) * m
->h
.active
;
2147 blankus
/= mode
->crtc_clock
;
2148 m
->v
.blankus
= blankus
;
2150 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
2151 m
->v
.blank2e
= m
->v
.active
+ m
->v
.blanke
;
2152 m
->v
.blank2s
= m
->v
.blank2e
+ mode
->crtc_vdisplay
;
2153 m
->v
.active
= (m
->v
.active
* 2) + 1;
2154 m
->interlace
= true;
2158 m
->interlace
= false;
2160 m
->clock
= mode
->crtc_clock
;
2162 asyh
->set
.mode
= true;
2166 nv50_head_atomic_check(struct drm_crtc
*crtc
, struct drm_crtc_state
*state
)
2168 struct nouveau_drm
*drm
= nouveau_drm(crtc
->dev
);
2169 struct nv50_disp
*disp
= nv50_disp(crtc
->dev
);
2170 struct nv50_head
*head
= nv50_head(crtc
);
2171 struct nv50_head_atom
*armh
= nv50_head_atom(crtc
->state
);
2172 struct nv50_head_atom
*asyh
= nv50_head_atom(state
);
2173 struct nouveau_conn_atom
*asyc
= NULL
;
2174 struct drm_connector_state
*conns
;
2175 struct drm_connector
*conn
;
2178 NV_ATOMIC(drm
, "%s atomic_check %d\n", crtc
->name
, asyh
->state
.active
);
2179 if (asyh
->state
.active
) {
2180 for_each_new_connector_in_state(asyh
->state
.state
, conn
, conns
, i
) {
2181 if (conns
->crtc
== crtc
) {
2182 asyc
= nouveau_conn_atom(conns
);
2187 if (armh
->state
.active
) {
2189 if (asyh
->state
.mode_changed
)
2190 asyc
->set
.scaler
= true;
2191 if (armh
->base
.depth
!= asyh
->base
.depth
)
2192 asyc
->set
.dither
= true;
2196 asyc
->set
.mask
= ~0;
2197 asyh
->set
.mask
= ~0;
2200 if (asyh
->state
.mode_changed
)
2201 nv50_head_atomic_check_mode(head
, asyh
);
2203 if (asyh
->state
.color_mgmt_changed
||
2204 asyh
->base
.cpp
!= armh
->base
.cpp
)
2205 nv50_head_atomic_check_lut(head
, armh
, asyh
);
2206 asyh
->lut
.visible
= asyh
->lut
.handle
!= 0;
2209 if (asyc
->set
.scaler
)
2210 nv50_head_atomic_check_view(armh
, asyh
, asyc
);
2211 if (asyc
->set
.dither
)
2212 nv50_head_atomic_check_dither(armh
, asyh
, asyc
);
2213 if (asyc
->set
.procamp
)
2214 nv50_head_atomic_check_procamp(armh
, asyh
, asyc
);
2217 if ((asyh
->core
.visible
= (asyh
->base
.cpp
!= 0))) {
2218 asyh
->core
.x
= asyh
->base
.x
;
2219 asyh
->core
.y
= asyh
->base
.y
;
2220 asyh
->core
.w
= asyh
->base
.w
;
2221 asyh
->core
.h
= asyh
->base
.h
;
2223 if ((asyh
->core
.visible
= asyh
->curs
.visible
) ||
2224 (asyh
->core
.visible
= asyh
->lut
.visible
)) {
2225 /*XXX: We need to either find some way of having the
2226 * primary base layer appear black, while still
2227 * being able to display the other layers, or we
2228 * need to allocate a dummy black surface here.
2232 asyh
->core
.w
= asyh
->state
.mode
.hdisplay
;
2233 asyh
->core
.h
= asyh
->state
.mode
.vdisplay
;
2235 asyh
->core
.handle
= disp
->mast
.base
.vram
.handle
;
2236 asyh
->core
.offset
= 0;
2237 asyh
->core
.format
= 0xcf;
2238 asyh
->core
.kind
= 0;
2239 asyh
->core
.layout
= 1;
2240 asyh
->core
.block
= 0;
2241 asyh
->core
.pitch
= ALIGN(asyh
->core
.w
, 64) * 4;
2242 asyh
->set
.base
= armh
->base
.cpp
!= asyh
->base
.cpp
;
2243 asyh
->set
.ovly
= armh
->ovly
.cpp
!= asyh
->ovly
.cpp
;
2245 asyh
->lut
.visible
= false;
2246 asyh
->core
.visible
= false;
2247 asyh
->curs
.visible
= false;
2252 if (!drm_atomic_crtc_needs_modeset(&asyh
->state
)) {
2253 if (asyh
->core
.visible
) {
2254 if (memcmp(&armh
->core
, &asyh
->core
, sizeof(asyh
->core
)))
2255 asyh
->set
.core
= true;
2257 if (armh
->core
.visible
) {
2258 asyh
->clr
.core
= true;
2261 if (asyh
->curs
.visible
) {
2262 if (memcmp(&armh
->curs
, &asyh
->curs
, sizeof(asyh
->curs
)))
2263 asyh
->set
.curs
= true;
2265 if (armh
->curs
.visible
) {
2266 asyh
->clr
.curs
= true;
2269 asyh
->clr
.ilut
= armh
->lut
.visible
;
2270 asyh
->clr
.core
= armh
->core
.visible
;
2271 asyh
->clr
.curs
= armh
->curs
.visible
;
2272 asyh
->set
.ilut
= asyh
->lut
.visible
;
2273 asyh
->set
.core
= asyh
->core
.visible
;
2274 asyh
->set
.curs
= asyh
->curs
.visible
;
2277 if (asyh
->clr
.mask
|| asyh
->set
.mask
)
2278 nv50_atom(asyh
->state
.state
)->lock_core
= true;
2282 static const struct drm_crtc_helper_funcs
2284 .atomic_check
= nv50_head_atomic_check
,
2288 nv50_head_atomic_destroy_state(struct drm_crtc
*crtc
,
2289 struct drm_crtc_state
*state
)
2291 struct nv50_head_atom
*asyh
= nv50_head_atom(state
);
2292 __drm_atomic_helper_crtc_destroy_state(&asyh
->state
);
2296 static struct drm_crtc_state
*
2297 nv50_head_atomic_duplicate_state(struct drm_crtc
*crtc
)
2299 struct nv50_head_atom
*armh
= nv50_head_atom(crtc
->state
);
2300 struct nv50_head_atom
*asyh
;
2301 if (!(asyh
= kmalloc(sizeof(*asyh
), GFP_KERNEL
)))
2303 __drm_atomic_helper_crtc_duplicate_state(crtc
, &asyh
->state
);
2304 asyh
->view
= armh
->view
;
2305 asyh
->mode
= armh
->mode
;
2306 asyh
->lut
= armh
->lut
;
2307 asyh
->core
= armh
->core
;
2308 asyh
->curs
= armh
->curs
;
2309 asyh
->base
= armh
->base
;
2310 asyh
->ovly
= armh
->ovly
;
2311 asyh
->dither
= armh
->dither
;
2312 asyh
->procamp
= armh
->procamp
;
2315 return &asyh
->state
;
2319 __drm_atomic_helper_crtc_reset(struct drm_crtc
*crtc
,
2320 struct drm_crtc_state
*state
)
2323 crtc
->funcs
->atomic_destroy_state(crtc
, crtc
->state
);
2324 crtc
->state
= state
;
2325 crtc
->state
->crtc
= crtc
;
2329 nv50_head_reset(struct drm_crtc
*crtc
)
2331 struct nv50_head_atom
*asyh
;
2333 if (WARN_ON(!(asyh
= kzalloc(sizeof(*asyh
), GFP_KERNEL
))))
2336 __drm_atomic_helper_crtc_reset(crtc
, &asyh
->state
);
2340 nv50_head_destroy(struct drm_crtc
*crtc
)
2342 struct nv50_head
*head
= nv50_head(crtc
);
2345 nv50_dmac_destroy(&head
->ovly
.base
);
2346 nv50_pioc_destroy(&head
->oimm
.base
);
2348 for (i
= 0; i
< ARRAY_SIZE(head
->lut
.nvbo
); i
++)
2349 nouveau_bo_unmap_unpin_unref(&head
->lut
.nvbo
[i
]);
2351 drm_crtc_cleanup(crtc
);
2355 static const struct drm_crtc_funcs
2357 .reset
= nv50_head_reset
,
2358 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
2359 .destroy
= nv50_head_destroy
,
2360 .set_config
= drm_atomic_helper_set_config
,
2361 .page_flip
= drm_atomic_helper_page_flip
,
2362 .atomic_duplicate_state
= nv50_head_atomic_duplicate_state
,
2363 .atomic_destroy_state
= nv50_head_atomic_destroy_state
,
2367 nv50_head_create(struct drm_device
*dev
, int index
)
2369 struct nouveau_drm
*drm
= nouveau_drm(dev
);
2370 struct nvif_device
*device
= &drm
->client
.device
;
2371 struct nv50_disp
*disp
= nv50_disp(dev
);
2372 struct nv50_head
*head
;
2373 struct nv50_base
*base
;
2374 struct nv50_curs
*curs
;
2375 struct drm_crtc
*crtc
;
2378 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
2382 head
->base
.index
= index
;
2383 ret
= nv50_base_new(drm
, head
, &base
);
2385 ret
= nv50_curs_new(drm
, head
, &curs
);
2391 crtc
= &head
->base
.base
;
2392 drm_crtc_init_with_planes(dev
, crtc
, &base
->wndw
.plane
,
2393 &curs
->wndw
.plane
, &nv50_head_func
,
2394 "head-%d", head
->base
.index
);
2395 drm_crtc_helper_add(crtc
, &nv50_head_help
);
2396 drm_mode_crtc_set_gamma_size(crtc
, 256);
2398 for (i
= 0; i
< ARRAY_SIZE(head
->lut
.nvbo
); i
++) {
2399 ret
= nouveau_bo_new_pin_map(&drm
->client
, 1025 * 8, 0x100,
2401 &head
->lut
.nvbo
[i
]);
2406 /* allocate overlay resources */
2407 ret
= nv50_oimm_create(device
, &disp
->disp
->object
, index
, &head
->oimm
);
2411 ret
= nv50_ovly_create(device
, &disp
->disp
->object
, index
,
2412 disp
->sync
->bo
.offset
, &head
->ovly
);
2418 nv50_head_destroy(crtc
);
2422 /******************************************************************************
2423 * Output path helpers
2424 *****************************************************************************/
2426 nv50_outp_release(struct nouveau_encoder
*nv_encoder
)
2428 struct nv50_disp
*disp
= nv50_disp(nv_encoder
->base
.base
.dev
);
2430 struct nv50_disp_mthd_v1 base
;
2433 .base
.method
= NV50_DISP_MTHD_V1_RELEASE
,
2434 .base
.hasht
= nv_encoder
->dcb
->hasht
,
2435 .base
.hashm
= nv_encoder
->dcb
->hashm
,
2438 nvif_mthd(&disp
->disp
->object
, 0, &args
, sizeof(args
));
2439 nv_encoder
->or = -1;
2440 nv_encoder
->link
= 0;
2444 nv50_outp_acquire(struct nouveau_encoder
*nv_encoder
)
2446 struct nouveau_drm
*drm
= nouveau_drm(nv_encoder
->base
.base
.dev
);
2447 struct nv50_disp
*disp
= nv50_disp(drm
->dev
);
2449 struct nv50_disp_mthd_v1 base
;
2450 struct nv50_disp_acquire_v0 info
;
2453 .base
.method
= NV50_DISP_MTHD_V1_ACQUIRE
,
2454 .base
.hasht
= nv_encoder
->dcb
->hasht
,
2455 .base
.hashm
= nv_encoder
->dcb
->hashm
,
2459 ret
= nvif_mthd(&disp
->disp
->object
, 0, &args
, sizeof(args
));
2461 NV_ERROR(drm
, "error acquiring output path: %d\n", ret
);
2465 nv_encoder
->or = args
.info
.or;
2466 nv_encoder
->link
= args
.info
.link
;
2471 nv50_outp_atomic_check_view(struct drm_encoder
*encoder
,
2472 struct drm_crtc_state
*crtc_state
,
2473 struct drm_connector_state
*conn_state
,
2474 struct drm_display_mode
*native_mode
)
2476 struct drm_display_mode
*adjusted_mode
= &crtc_state
->adjusted_mode
;
2477 struct drm_display_mode
*mode
= &crtc_state
->mode
;
2478 struct drm_connector
*connector
= conn_state
->connector
;
2479 struct nouveau_conn_atom
*asyc
= nouveau_conn_atom(conn_state
);
2480 struct nouveau_drm
*drm
= nouveau_drm(encoder
->dev
);
2482 NV_ATOMIC(drm
, "%s atomic_check\n", encoder
->name
);
2483 asyc
->scaler
.full
= false;
2487 if (asyc
->scaler
.mode
== DRM_MODE_SCALE_NONE
) {
2488 switch (connector
->connector_type
) {
2489 case DRM_MODE_CONNECTOR_LVDS
:
2490 case DRM_MODE_CONNECTOR_eDP
:
2491 /* Force use of scaler for non-EDID modes. */
2492 if (adjusted_mode
->type
& DRM_MODE_TYPE_DRIVER
)
2495 asyc
->scaler
.full
= true;
2504 if (!drm_mode_equal(adjusted_mode
, mode
)) {
2505 drm_mode_copy(adjusted_mode
, mode
);
2506 crtc_state
->mode_changed
= true;
2513 nv50_outp_atomic_check(struct drm_encoder
*encoder
,
2514 struct drm_crtc_state
*crtc_state
,
2515 struct drm_connector_state
*conn_state
)
2517 struct nouveau_connector
*nv_connector
=
2518 nouveau_connector(conn_state
->connector
);
2519 return nv50_outp_atomic_check_view(encoder
, crtc_state
, conn_state
,
2520 nv_connector
->native_mode
);
2523 /******************************************************************************
2525 *****************************************************************************/
2527 nv50_dac_disable(struct drm_encoder
*encoder
)
2529 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
2530 struct nv50_mast
*mast
= nv50_mast(encoder
->dev
);
2531 const int or = nv_encoder
->or;
2534 if (nv_encoder
->crtc
) {
2535 push
= evo_wait(mast
, 4);
2537 if (nv50_vers(mast
) < GF110_DISP_CORE_CHANNEL_DMA
) {
2538 evo_mthd(push
, 0x0400 + (or * 0x080), 1);
2539 evo_data(push
, 0x00000000);
2541 evo_mthd(push
, 0x0180 + (or * 0x020), 1);
2542 evo_data(push
, 0x00000000);
2544 evo_kick(push
, mast
);
2548 nv_encoder
->crtc
= NULL
;
2549 nv50_outp_release(nv_encoder
);
2553 nv50_dac_enable(struct drm_encoder
*encoder
)
2555 struct nv50_mast
*mast
= nv50_mast(encoder
->dev
);
2556 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
2557 struct nouveau_crtc
*nv_crtc
= nouveau_crtc(encoder
->crtc
);
2558 struct drm_display_mode
*mode
= &nv_crtc
->base
.state
->adjusted_mode
;
2561 nv50_outp_acquire(nv_encoder
);
2563 push
= evo_wait(mast
, 8);
2565 if (nv50_vers(mast
) < GF110_DISP_CORE_CHANNEL_DMA
) {
2566 u32 syncs
= 0x00000000;
2568 if (mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
2569 syncs
|= 0x00000001;
2570 if (mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
2571 syncs
|= 0x00000002;
2573 evo_mthd(push
, 0x0400 + (nv_encoder
->or * 0x080), 2);
2574 evo_data(push
, 1 << nv_crtc
->index
);
2575 evo_data(push
, syncs
);
2577 u32 magic
= 0x31ec6000 | (nv_crtc
->index
<< 25);
2578 u32 syncs
= 0x00000001;
2580 if (mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
2581 syncs
|= 0x00000008;
2582 if (mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
2583 syncs
|= 0x00000010;
2585 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
2586 magic
|= 0x00000001;
2588 evo_mthd(push
, 0x0404 + (nv_crtc
->index
* 0x300), 2);
2589 evo_data(push
, syncs
);
2590 evo_data(push
, magic
);
2591 evo_mthd(push
, 0x0180 + (nv_encoder
->or * 0x020), 1);
2592 evo_data(push
, 1 << nv_crtc
->index
);
2595 evo_kick(push
, mast
);
2598 nv_encoder
->crtc
= encoder
->crtc
;
2601 static enum drm_connector_status
2602 nv50_dac_detect(struct drm_encoder
*encoder
, struct drm_connector
*connector
)
2604 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
2605 struct nv50_disp
*disp
= nv50_disp(encoder
->dev
);
2607 struct nv50_disp_mthd_v1 base
;
2608 struct nv50_disp_dac_load_v0 load
;
2611 .base
.method
= NV50_DISP_MTHD_V1_DAC_LOAD
,
2612 .base
.hasht
= nv_encoder
->dcb
->hasht
,
2613 .base
.hashm
= nv_encoder
->dcb
->hashm
,
2617 args
.load
.data
= nouveau_drm(encoder
->dev
)->vbios
.dactestval
;
2618 if (args
.load
.data
== 0)
2619 args
.load
.data
= 340;
2621 ret
= nvif_mthd(&disp
->disp
->object
, 0, &args
, sizeof(args
));
2622 if (ret
|| !args
.load
.load
)
2623 return connector_status_disconnected
;
2625 return connector_status_connected
;
2628 static const struct drm_encoder_helper_funcs
2630 .atomic_check
= nv50_outp_atomic_check
,
2631 .enable
= nv50_dac_enable
,
2632 .disable
= nv50_dac_disable
,
2633 .detect
= nv50_dac_detect
2637 nv50_dac_destroy(struct drm_encoder
*encoder
)
2639 drm_encoder_cleanup(encoder
);
2643 static const struct drm_encoder_funcs
2645 .destroy
= nv50_dac_destroy
,
2649 nv50_dac_create(struct drm_connector
*connector
, struct dcb_output
*dcbe
)
2651 struct nouveau_drm
*drm
= nouveau_drm(connector
->dev
);
2652 struct nvkm_i2c
*i2c
= nvxx_i2c(&drm
->client
.device
);
2653 struct nvkm_i2c_bus
*bus
;
2654 struct nouveau_encoder
*nv_encoder
;
2655 struct drm_encoder
*encoder
;
2656 int type
= DRM_MODE_ENCODER_DAC
;
2658 nv_encoder
= kzalloc(sizeof(*nv_encoder
), GFP_KERNEL
);
2661 nv_encoder
->dcb
= dcbe
;
2663 bus
= nvkm_i2c_bus_find(i2c
, dcbe
->i2c_index
);
2665 nv_encoder
->i2c
= &bus
->i2c
;
2667 encoder
= to_drm_encoder(nv_encoder
);
2668 encoder
->possible_crtcs
= dcbe
->heads
;
2669 encoder
->possible_clones
= 0;
2670 drm_encoder_init(connector
->dev
, encoder
, &nv50_dac_func
, type
,
2671 "dac-%04x-%04x", dcbe
->hasht
, dcbe
->hashm
);
2672 drm_encoder_helper_add(encoder
, &nv50_dac_help
);
2674 drm_mode_connector_attach_encoder(connector
, encoder
);
2678 /******************************************************************************
2680 *****************************************************************************/
2682 nv50_audio_disable(struct drm_encoder
*encoder
, struct nouveau_crtc
*nv_crtc
)
2684 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
2685 struct nv50_disp
*disp
= nv50_disp(encoder
->dev
);
2687 struct nv50_disp_mthd_v1 base
;
2688 struct nv50_disp_sor_hda_eld_v0 eld
;
2691 .base
.method
= NV50_DISP_MTHD_V1_SOR_HDA_ELD
,
2692 .base
.hasht
= nv_encoder
->dcb
->hasht
,
2693 .base
.hashm
= (0xf0ff & nv_encoder
->dcb
->hashm
) |
2694 (0x0100 << nv_crtc
->index
),
2697 nvif_mthd(&disp
->disp
->object
, 0, &args
, sizeof(args
));
2701 nv50_audio_enable(struct drm_encoder
*encoder
, struct drm_display_mode
*mode
)
2703 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
2704 struct nouveau_crtc
*nv_crtc
= nouveau_crtc(encoder
->crtc
);
2705 struct nouveau_connector
*nv_connector
;
2706 struct nv50_disp
*disp
= nv50_disp(encoder
->dev
);
2709 struct nv50_disp_mthd_v1 mthd
;
2710 struct nv50_disp_sor_hda_eld_v0 eld
;
2712 u8 data
[sizeof(nv_connector
->base
.eld
)];
2714 .base
.mthd
.version
= 1,
2715 .base
.mthd
.method
= NV50_DISP_MTHD_V1_SOR_HDA_ELD
,
2716 .base
.mthd
.hasht
= nv_encoder
->dcb
->hasht
,
2717 .base
.mthd
.hashm
= (0xf0ff & nv_encoder
->dcb
->hashm
) |
2718 (0x0100 << nv_crtc
->index
),
2721 nv_connector
= nouveau_encoder_connector_get(nv_encoder
);
2722 if (!drm_detect_monitor_audio(nv_connector
->edid
))
2725 memcpy(args
.data
, nv_connector
->base
.eld
, sizeof(args
.data
));
2727 nvif_mthd(&disp
->disp
->object
, 0, &args
,
2728 sizeof(args
.base
) + drm_eld_size(args
.data
));
2731 /******************************************************************************
2733 *****************************************************************************/
2735 nv50_hdmi_disable(struct drm_encoder
*encoder
, struct nouveau_crtc
*nv_crtc
)
2737 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
2738 struct nv50_disp
*disp
= nv50_disp(encoder
->dev
);
2740 struct nv50_disp_mthd_v1 base
;
2741 struct nv50_disp_sor_hdmi_pwr_v0 pwr
;
2744 .base
.method
= NV50_DISP_MTHD_V1_SOR_HDMI_PWR
,
2745 .base
.hasht
= nv_encoder
->dcb
->hasht
,
2746 .base
.hashm
= (0xf0ff & nv_encoder
->dcb
->hashm
) |
2747 (0x0100 << nv_crtc
->index
),
2750 nvif_mthd(&disp
->disp
->object
, 0, &args
, sizeof(args
));
2754 nv50_hdmi_enable(struct drm_encoder
*encoder
, struct drm_display_mode
*mode
)
2756 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
2757 struct nouveau_crtc
*nv_crtc
= nouveau_crtc(encoder
->crtc
);
2758 struct nv50_disp
*disp
= nv50_disp(encoder
->dev
);
2760 struct nv50_disp_mthd_v1 base
;
2761 struct nv50_disp_sor_hdmi_pwr_v0 pwr
;
2762 u8 infoframes
[2 * 17]; /* two frames, up to 17 bytes each */
2765 .base
.method
= NV50_DISP_MTHD_V1_SOR_HDMI_PWR
,
2766 .base
.hasht
= nv_encoder
->dcb
->hasht
,
2767 .base
.hashm
= (0xf0ff & nv_encoder
->dcb
->hashm
) |
2768 (0x0100 << nv_crtc
->index
),
2770 .pwr
.rekey
= 56, /* binary driver, and tegra, constant */
2772 struct nouveau_connector
*nv_connector
;
2774 union hdmi_infoframe avi_frame
;
2775 union hdmi_infoframe vendor_frame
;
2779 nv_connector
= nouveau_encoder_connector_get(nv_encoder
);
2780 if (!drm_detect_hdmi_monitor(nv_connector
->edid
))
2783 ret
= drm_hdmi_avi_infoframe_from_display_mode(&avi_frame
.avi
, mode
,
2786 /* We have an AVI InfoFrame, populate it to the display */
2787 args
.pwr
.avi_infoframe_length
2788 = hdmi_infoframe_pack(&avi_frame
, args
.infoframes
, 17);
2791 ret
= drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame
.vendor
.hdmi
,
2792 &nv_connector
->base
, mode
);
2794 /* We have a Vendor InfoFrame, populate it to the display */
2795 args
.pwr
.vendor_infoframe_length
2796 = hdmi_infoframe_pack(&vendor_frame
,
2798 + args
.pwr
.avi_infoframe_length
,
2802 max_ac_packet
= mode
->htotal
- mode
->hdisplay
;
2803 max_ac_packet
-= args
.pwr
.rekey
;
2804 max_ac_packet
-= 18; /* constant from tegra */
2805 args
.pwr
.max_ac_packet
= max_ac_packet
/ 32;
2807 size
= sizeof(args
.base
)
2809 + args
.pwr
.avi_infoframe_length
2810 + args
.pwr
.vendor_infoframe_length
;
2811 nvif_mthd(&disp
->disp
->object
, 0, &args
, size
);
2812 nv50_audio_enable(encoder
, mode
);
2815 /******************************************************************************
2817 *****************************************************************************/
2818 #define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
2819 #define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
2820 #define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
2823 struct nouveau_encoder
*outp
;
2825 struct drm_dp_mst_topology_mgr mgr
;
2826 struct nv50_msto
*msto
[4];
2834 struct nv50_mstm
*mstm
;
2835 struct drm_dp_mst_port
*port
;
2836 struct drm_connector connector
;
2838 struct drm_display_mode
*native
;
2845 struct drm_encoder encoder
;
2847 struct nv50_head
*head
;
2848 struct nv50_mstc
*mstc
;
2852 static struct drm_dp_payload
*
2853 nv50_msto_payload(struct nv50_msto
*msto
)
2855 struct nouveau_drm
*drm
= nouveau_drm(msto
->encoder
.dev
);
2856 struct nv50_mstc
*mstc
= msto
->mstc
;
2857 struct nv50_mstm
*mstm
= mstc
->mstm
;
2858 int vcpi
= mstc
->port
->vcpi
.vcpi
, i
;
2860 NV_ATOMIC(drm
, "%s: vcpi %d\n", msto
->encoder
.name
, vcpi
);
2861 for (i
= 0; i
< mstm
->mgr
.max_payloads
; i
++) {
2862 struct drm_dp_payload
*payload
= &mstm
->mgr
.payloads
[i
];
2863 NV_ATOMIC(drm
, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
2864 mstm
->outp
->base
.base
.name
, i
, payload
->vcpi
,
2865 payload
->start_slot
, payload
->num_slots
);
2868 for (i
= 0; i
< mstm
->mgr
.max_payloads
; i
++) {
2869 struct drm_dp_payload
*payload
= &mstm
->mgr
.payloads
[i
];
2870 if (payload
->vcpi
== vcpi
)
2878 nv50_msto_cleanup(struct nv50_msto
*msto
)
2880 struct nouveau_drm
*drm
= nouveau_drm(msto
->encoder
.dev
);
2881 struct nv50_mstc
*mstc
= msto
->mstc
;
2882 struct nv50_mstm
*mstm
= mstc
->mstm
;
2884 NV_ATOMIC(drm
, "%s: msto cleanup\n", msto
->encoder
.name
);
2885 if (mstc
->port
&& mstc
->port
->vcpi
.vcpi
> 0 && !nv50_msto_payload(msto
))
2886 drm_dp_mst_deallocate_vcpi(&mstm
->mgr
, mstc
->port
);
2887 if (msto
->disabled
) {
2890 msto
->disabled
= false;
2895 nv50_msto_prepare(struct nv50_msto
*msto
)
2897 struct nouveau_drm
*drm
= nouveau_drm(msto
->encoder
.dev
);
2898 struct nv50_mstc
*mstc
= msto
->mstc
;
2899 struct nv50_mstm
*mstm
= mstc
->mstm
;
2901 struct nv50_disp_mthd_v1 base
;
2902 struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi
;
2905 .base
.method
= NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI
,
2906 .base
.hasht
= mstm
->outp
->dcb
->hasht
,
2907 .base
.hashm
= (0xf0ff & mstm
->outp
->dcb
->hashm
) |
2908 (0x0100 << msto
->head
->base
.index
),
2911 NV_ATOMIC(drm
, "%s: msto prepare\n", msto
->encoder
.name
);
2912 if (mstc
->port
&& mstc
->port
->vcpi
.vcpi
> 0) {
2913 struct drm_dp_payload
*payload
= nv50_msto_payload(msto
);
2915 args
.vcpi
.start_slot
= payload
->start_slot
;
2916 args
.vcpi
.num_slots
= payload
->num_slots
;
2917 args
.vcpi
.pbn
= mstc
->port
->vcpi
.pbn
;
2918 args
.vcpi
.aligned_pbn
= mstc
->port
->vcpi
.aligned_pbn
;
2922 NV_ATOMIC(drm
, "%s: %s: %02x %02x %04x %04x\n",
2923 msto
->encoder
.name
, msto
->head
->base
.base
.name
,
2924 args
.vcpi
.start_slot
, args
.vcpi
.num_slots
,
2925 args
.vcpi
.pbn
, args
.vcpi
.aligned_pbn
);
2926 nvif_mthd(&drm
->display
->disp
.object
, 0, &args
, sizeof(args
));
2930 nv50_msto_atomic_check(struct drm_encoder
*encoder
,
2931 struct drm_crtc_state
*crtc_state
,
2932 struct drm_connector_state
*conn_state
)
2934 struct nv50_mstc
*mstc
= nv50_mstc(conn_state
->connector
);
2935 struct nv50_mstm
*mstm
= mstc
->mstm
;
2936 int bpp
= conn_state
->connector
->display_info
.bpc
* 3;
2939 mstc
->pbn
= drm_dp_calc_pbn_mode(crtc_state
->adjusted_mode
.clock
, bpp
);
2941 slots
= drm_dp_find_vcpi_slots(&mstm
->mgr
, mstc
->pbn
);
2945 return nv50_outp_atomic_check_view(encoder
, crtc_state
, conn_state
,
2950 nv50_msto_enable(struct drm_encoder
*encoder
)
2952 struct nv50_head
*head
= nv50_head(encoder
->crtc
);
2953 struct nv50_msto
*msto
= nv50_msto(encoder
);
2954 struct nv50_mstc
*mstc
= NULL
;
2955 struct nv50_mstm
*mstm
= NULL
;
2956 struct drm_connector
*connector
;
2957 struct drm_connector_list_iter conn_iter
;
2962 drm_connector_list_iter_begin(encoder
->dev
, &conn_iter
);
2963 drm_for_each_connector_iter(connector
, &conn_iter
) {
2964 if (connector
->state
->best_encoder
== &msto
->encoder
) {
2965 mstc
= nv50_mstc(connector
);
2970 drm_connector_list_iter_end(&conn_iter
);
2975 slots
= drm_dp_find_vcpi_slots(&mstm
->mgr
, mstc
->pbn
);
2976 r
= drm_dp_mst_allocate_vcpi(&mstm
->mgr
, mstc
->port
, mstc
->pbn
, slots
);
2980 nv50_outp_acquire(mstm
->outp
);
2982 if (mstm
->outp
->link
& 1)
2987 switch (mstc
->connector
.display_info
.bpc
) {
2988 case 6: depth
= 0x2; break;
2989 case 8: depth
= 0x5; break;
2991 default: depth
= 0x6; break;
2994 mstm
->outp
->update(mstm
->outp
, head
->base
.index
,
2995 &head
->base
.base
.state
->adjusted_mode
, proto
, depth
);
2999 mstm
->modified
= true;
3003 nv50_msto_disable(struct drm_encoder
*encoder
)
3005 struct nv50_msto
*msto
= nv50_msto(encoder
);
3006 struct nv50_mstc
*mstc
= msto
->mstc
;
3007 struct nv50_mstm
*mstm
= mstc
->mstm
;
3010 drm_dp_mst_reset_vcpi_slots(&mstm
->mgr
, mstc
->port
);
3012 mstm
->outp
->update(mstm
->outp
, msto
->head
->base
.index
, NULL
, 0, 0);
3013 mstm
->modified
= true;
3015 mstm
->disabled
= true;
3016 msto
->disabled
= true;
3019 static const struct drm_encoder_helper_funcs
3021 .disable
= nv50_msto_disable
,
3022 .enable
= nv50_msto_enable
,
3023 .atomic_check
= nv50_msto_atomic_check
,
3027 nv50_msto_destroy(struct drm_encoder
*encoder
)
3029 struct nv50_msto
*msto
= nv50_msto(encoder
);
3030 drm_encoder_cleanup(&msto
->encoder
);
3034 static const struct drm_encoder_funcs
3036 .destroy
= nv50_msto_destroy
,
3040 nv50_msto_new(struct drm_device
*dev
, u32 heads
, const char *name
, int id
,
3041 struct nv50_msto
**pmsto
)
3043 struct nv50_msto
*msto
;
3046 if (!(msto
= *pmsto
= kzalloc(sizeof(*msto
), GFP_KERNEL
)))
3049 ret
= drm_encoder_init(dev
, &msto
->encoder
, &nv50_msto
,
3050 DRM_MODE_ENCODER_DPMST
, "%s-mst-%d", name
, id
);
3057 drm_encoder_helper_add(&msto
->encoder
, &nv50_msto_help
);
3058 msto
->encoder
.possible_crtcs
= heads
;
3062 static struct drm_encoder
*
3063 nv50_mstc_atomic_best_encoder(struct drm_connector
*connector
,
3064 struct drm_connector_state
*connector_state
)
3066 struct nv50_head
*head
= nv50_head(connector_state
->crtc
);
3067 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
3069 struct nv50_mstm
*mstm
= mstc
->mstm
;
3070 return &mstm
->msto
[head
->base
.index
]->encoder
;
3075 static struct drm_encoder
*
3076 nv50_mstc_best_encoder(struct drm_connector
*connector
)
3078 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
3080 struct nv50_mstm
*mstm
= mstc
->mstm
;
3081 return &mstm
->msto
[0]->encoder
;
3086 static enum drm_mode_status
3087 nv50_mstc_mode_valid(struct drm_connector
*connector
,
3088 struct drm_display_mode
*mode
)
3094 nv50_mstc_get_modes(struct drm_connector
*connector
)
3096 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
3099 mstc
->edid
= drm_dp_mst_get_edid(&mstc
->connector
, mstc
->port
->mgr
, mstc
->port
);
3100 drm_mode_connector_update_edid_property(&mstc
->connector
, mstc
->edid
);
3102 ret
= drm_add_edid_modes(&mstc
->connector
, mstc
->edid
);
3104 if (!mstc
->connector
.display_info
.bpc
)
3105 mstc
->connector
.display_info
.bpc
= 8;
3108 drm_mode_destroy(mstc
->connector
.dev
, mstc
->native
);
3109 mstc
->native
= nouveau_conn_native_mode(&mstc
->connector
);
3113 static const struct drm_connector_helper_funcs
3115 .get_modes
= nv50_mstc_get_modes
,
3116 .mode_valid
= nv50_mstc_mode_valid
,
3117 .best_encoder
= nv50_mstc_best_encoder
,
3118 .atomic_best_encoder
= nv50_mstc_atomic_best_encoder
,
3121 static enum drm_connector_status
3122 nv50_mstc_detect(struct drm_connector
*connector
, bool force
)
3124 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
3126 return connector_status_disconnected
;
3127 return drm_dp_mst_detect_port(connector
, mstc
->port
->mgr
, mstc
->port
);
3131 nv50_mstc_destroy(struct drm_connector
*connector
)
3133 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
3134 drm_connector_cleanup(&mstc
->connector
);
3138 static const struct drm_connector_funcs
3140 .reset
= nouveau_conn_reset
,
3141 .detect
= nv50_mstc_detect
,
3142 .fill_modes
= drm_helper_probe_single_connector_modes
,
3143 .destroy
= nv50_mstc_destroy
,
3144 .atomic_duplicate_state
= nouveau_conn_atomic_duplicate_state
,
3145 .atomic_destroy_state
= nouveau_conn_atomic_destroy_state
,
3146 .atomic_set_property
= nouveau_conn_atomic_set_property
,
3147 .atomic_get_property
= nouveau_conn_atomic_get_property
,
3151 nv50_mstc_new(struct nv50_mstm
*mstm
, struct drm_dp_mst_port
*port
,
3152 const char *path
, struct nv50_mstc
**pmstc
)
3154 struct drm_device
*dev
= mstm
->outp
->base
.base
.dev
;
3155 struct nv50_mstc
*mstc
;
3158 if (!(mstc
= *pmstc
= kzalloc(sizeof(*mstc
), GFP_KERNEL
)))
3163 ret
= drm_connector_init(dev
, &mstc
->connector
, &nv50_mstc
,
3164 DRM_MODE_CONNECTOR_DisplayPort
);
3171 drm_connector_helper_add(&mstc
->connector
, &nv50_mstc_help
);
3173 mstc
->connector
.funcs
->reset(&mstc
->connector
);
3174 nouveau_conn_attach_properties(&mstc
->connector
);
3176 for (i
= 0; i
< ARRAY_SIZE(mstm
->msto
) && mstm
->msto
[i
]; i
++)
3177 drm_mode_connector_attach_encoder(&mstc
->connector
, &mstm
->msto
[i
]->encoder
);
3179 drm_object_attach_property(&mstc
->connector
.base
, dev
->mode_config
.path_property
, 0);
3180 drm_object_attach_property(&mstc
->connector
.base
, dev
->mode_config
.tile_property
, 0);
3181 drm_mode_connector_set_path_property(&mstc
->connector
, path
);
3186 nv50_mstm_cleanup(struct nv50_mstm
*mstm
)
3188 struct nouveau_drm
*drm
= nouveau_drm(mstm
->outp
->base
.base
.dev
);
3189 struct drm_encoder
*encoder
;
3192 NV_ATOMIC(drm
, "%s: mstm cleanup\n", mstm
->outp
->base
.base
.name
);
3193 ret
= drm_dp_check_act_status(&mstm
->mgr
);
3195 ret
= drm_dp_update_payload_part2(&mstm
->mgr
);
3197 drm_for_each_encoder(encoder
, mstm
->outp
->base
.base
.dev
) {
3198 if (encoder
->encoder_type
== DRM_MODE_ENCODER_DPMST
) {
3199 struct nv50_msto
*msto
= nv50_msto(encoder
);
3200 struct nv50_mstc
*mstc
= msto
->mstc
;
3201 if (mstc
&& mstc
->mstm
== mstm
)
3202 nv50_msto_cleanup(msto
);
3206 mstm
->modified
= false;
3210 nv50_mstm_prepare(struct nv50_mstm
*mstm
)
3212 struct nouveau_drm
*drm
= nouveau_drm(mstm
->outp
->base
.base
.dev
);
3213 struct drm_encoder
*encoder
;
3216 NV_ATOMIC(drm
, "%s: mstm prepare\n", mstm
->outp
->base
.base
.name
);
3217 ret
= drm_dp_update_payload_part1(&mstm
->mgr
);
3219 drm_for_each_encoder(encoder
, mstm
->outp
->base
.base
.dev
) {
3220 if (encoder
->encoder_type
== DRM_MODE_ENCODER_DPMST
) {
3221 struct nv50_msto
*msto
= nv50_msto(encoder
);
3222 struct nv50_mstc
*mstc
= msto
->mstc
;
3223 if (mstc
&& mstc
->mstm
== mstm
)
3224 nv50_msto_prepare(msto
);
3228 if (mstm
->disabled
) {
3230 nv50_outp_release(mstm
->outp
);
3231 mstm
->disabled
= false;
3236 nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr
*mgr
)
3238 struct nv50_mstm
*mstm
= nv50_mstm(mgr
);
3239 drm_kms_helper_hotplug_event(mstm
->outp
->base
.base
.dev
);
3243 nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr
*mgr
,
3244 struct drm_connector
*connector
)
3246 struct nouveau_drm
*drm
= nouveau_drm(connector
->dev
);
3247 struct nv50_mstc
*mstc
= nv50_mstc(connector
);
3249 drm_connector_unregister(&mstc
->connector
);
3251 drm_fb_helper_remove_one_connector(&drm
->fbcon
->helper
, &mstc
->connector
);
3253 drm_modeset_lock(&drm
->dev
->mode_config
.connection_mutex
, NULL
);
3255 drm_modeset_unlock(&drm
->dev
->mode_config
.connection_mutex
);
3257 drm_connector_unreference(&mstc
->connector
);
3261 nv50_mstm_register_connector(struct drm_connector
*connector
)
3263 struct nouveau_drm
*drm
= nouveau_drm(connector
->dev
);
3265 drm_fb_helper_add_one_connector(&drm
->fbcon
->helper
, connector
);
3267 drm_connector_register(connector
);
3270 static struct drm_connector
*
3271 nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr
*mgr
,
3272 struct drm_dp_mst_port
*port
, const char *path
)
3274 struct nv50_mstm
*mstm
= nv50_mstm(mgr
);
3275 struct nv50_mstc
*mstc
;
3278 ret
= nv50_mstc_new(mstm
, port
, path
, &mstc
);
3281 mstc
->connector
.funcs
->destroy(&mstc
->connector
);
3285 return &mstc
->connector
;
3288 static const struct drm_dp_mst_topology_cbs
3290 .add_connector
= nv50_mstm_add_connector
,
3291 .register_connector
= nv50_mstm_register_connector
,
3292 .destroy_connector
= nv50_mstm_destroy_connector
,
3293 .hotplug
= nv50_mstm_hotplug
,
3297 nv50_mstm_service(struct nv50_mstm
*mstm
)
3299 struct drm_dp_aux
*aux
= mstm
? mstm
->mgr
.aux
: NULL
;
3300 bool handled
= true;
3308 ret
= drm_dp_dpcd_read(aux
, DP_SINK_COUNT_ESI
, esi
, 8);
3310 drm_dp_mst_topology_mgr_set_mst(&mstm
->mgr
, false);
3314 drm_dp_mst_hpd_irq(&mstm
->mgr
, esi
, &handled
);
3318 drm_dp_dpcd_write(aux
, DP_SINK_COUNT_ESI
+ 1, &esi
[1], 3);
3323 nv50_mstm_remove(struct nv50_mstm
*mstm
)
3326 drm_dp_mst_topology_mgr_set_mst(&mstm
->mgr
, false);
3330 nv50_mstm_enable(struct nv50_mstm
*mstm
, u8 dpcd
, int state
)
3332 struct nouveau_encoder
*outp
= mstm
->outp
;
3334 struct nv50_disp_mthd_v1 base
;
3335 struct nv50_disp_sor_dp_mst_link_v0 mst
;
3338 .base
.method
= NV50_DISP_MTHD_V1_SOR_DP_MST_LINK
,
3339 .base
.hasht
= outp
->dcb
->hasht
,
3340 .base
.hashm
= outp
->dcb
->hashm
,
3343 struct nouveau_drm
*drm
= nouveau_drm(outp
->base
.base
.dev
);
3344 struct nvif_object
*disp
= &drm
->display
->disp
.object
;
3348 ret
= drm_dp_dpcd_readb(mstm
->mgr
.aux
, DP_MSTM_CTRL
, &dpcd
);
3356 ret
= drm_dp_dpcd_writeb(mstm
->mgr
.aux
, DP_MSTM_CTRL
, dpcd
);
3361 return nvif_mthd(disp
, 0, &args
, sizeof(args
));
3365 nv50_mstm_detect(struct nv50_mstm
*mstm
, u8 dpcd
[8], int allow
)
3372 if (dpcd
[0] >= 0x12) {
3373 ret
= drm_dp_dpcd_readb(mstm
->mgr
.aux
, DP_MSTM_CAP
, &dpcd
[1]);
3377 if (!(dpcd
[1] & DP_MST_CAP
))
3383 ret
= nv50_mstm_enable(mstm
, dpcd
[0], state
);
3387 ret
= drm_dp_mst_topology_mgr_set_mst(&mstm
->mgr
, state
);
3389 return nv50_mstm_enable(mstm
, dpcd
[0], 0);
3391 return mstm
->mgr
.mst_state
;
3395 nv50_mstm_fini(struct nv50_mstm
*mstm
)
3397 if (mstm
&& mstm
->mgr
.mst_state
)
3398 drm_dp_mst_topology_mgr_suspend(&mstm
->mgr
);
3402 nv50_mstm_init(struct nv50_mstm
*mstm
)
3404 if (mstm
&& mstm
->mgr
.mst_state
)
3405 drm_dp_mst_topology_mgr_resume(&mstm
->mgr
);
3409 nv50_mstm_del(struct nv50_mstm
**pmstm
)
3411 struct nv50_mstm
*mstm
= *pmstm
;
3419 nv50_mstm_new(struct nouveau_encoder
*outp
, struct drm_dp_aux
*aux
, int aux_max
,
3420 int conn_base_id
, struct nv50_mstm
**pmstm
)
3422 const int max_payloads
= hweight8(outp
->dcb
->heads
);
3423 struct drm_device
*dev
= outp
->base
.base
.dev
;
3424 struct nv50_mstm
*mstm
;
3428 /* This is a workaround for some monitors not functioning
3429 * correctly in MST mode on initial module load. I think
3430 * some bad interaction with the VBIOS may be responsible.
3432 * A good ol' off and on again seems to work here ;)
3434 ret
= drm_dp_dpcd_readb(aux
, DP_DPCD_REV
, &dpcd
);
3435 if (ret
>= 0 && dpcd
>= 0x12)
3436 drm_dp_dpcd_writeb(aux
, DP_MSTM_CTRL
, 0);
3438 if (!(mstm
= *pmstm
= kzalloc(sizeof(*mstm
), GFP_KERNEL
)))
3441 mstm
->mgr
.cbs
= &nv50_mstm
;
3443 ret
= drm_dp_mst_topology_mgr_init(&mstm
->mgr
, dev
, aux
, aux_max
,
3444 max_payloads
, conn_base_id
);
3448 for (i
= 0; i
< max_payloads
; i
++) {
3449 ret
= nv50_msto_new(dev
, outp
->dcb
->heads
, outp
->base
.base
.name
,
3458 /******************************************************************************
3460 *****************************************************************************/
3462 nv50_sor_update(struct nouveau_encoder
*nv_encoder
, u8 head
,
3463 struct drm_display_mode
*mode
, u8 proto
, u8 depth
)
3465 struct nv50_dmac
*core
= &nv50_mast(nv_encoder
->base
.base
.dev
)->base
;
3469 nv_encoder
->ctrl
&= ~BIT(head
);
3470 if (!(nv_encoder
->ctrl
& 0x0000000f))
3471 nv_encoder
->ctrl
= 0;
3473 nv_encoder
->ctrl
|= proto
<< 8;
3474 nv_encoder
->ctrl
|= BIT(head
);
3477 if ((push
= evo_wait(core
, 6))) {
3478 if (core
->base
.user
.oclass
< GF110_DISP_CORE_CHANNEL_DMA
) {
3480 if (mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
3481 nv_encoder
->ctrl
|= 0x00001000;
3482 if (mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
3483 nv_encoder
->ctrl
|= 0x00002000;
3484 nv_encoder
->ctrl
|= depth
<< 16;
3486 evo_mthd(push
, 0x0600 + (nv_encoder
->or * 0x40), 1);
3489 u32 magic
= 0x31ec6000 | (head
<< 25);
3490 u32 syncs
= 0x00000001;
3491 if (mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
3492 syncs
|= 0x00000008;
3493 if (mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
3494 syncs
|= 0x00000010;
3495 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
3496 magic
|= 0x00000001;
3498 evo_mthd(push
, 0x0404 + (head
* 0x300), 2);
3499 evo_data(push
, syncs
| (depth
<< 6));
3500 evo_data(push
, magic
);
3502 evo_mthd(push
, 0x0200 + (nv_encoder
->or * 0x20), 1);
3504 evo_data(push
, nv_encoder
->ctrl
);
3505 evo_kick(push
, core
);
3510 nv50_sor_disable(struct drm_encoder
*encoder
)
3512 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
3513 struct nouveau_crtc
*nv_crtc
= nouveau_crtc(nv_encoder
->crtc
);
3515 nv_encoder
->crtc
= NULL
;
3518 struct nvkm_i2c_aux
*aux
= nv_encoder
->aux
;
3522 int ret
= nvkm_rdaux(aux
, DP_SET_POWER
, &pwr
, 1);
3524 pwr
&= ~DP_SET_POWER_MASK
;
3525 pwr
|= DP_SET_POWER_D3
;
3526 nvkm_wraux(aux
, DP_SET_POWER
, &pwr
, 1);
3530 nv_encoder
->update(nv_encoder
, nv_crtc
->index
, NULL
, 0, 0);
3531 nv50_audio_disable(encoder
, nv_crtc
);
3532 nv50_hdmi_disable(&nv_encoder
->base
.base
, nv_crtc
);
3533 nv50_outp_release(nv_encoder
);
3538 nv50_sor_enable(struct drm_encoder
*encoder
)
3540 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
3541 struct nouveau_crtc
*nv_crtc
= nouveau_crtc(encoder
->crtc
);
3542 struct drm_display_mode
*mode
= &nv_crtc
->base
.state
->adjusted_mode
;
3544 struct nv50_disp_mthd_v1 base
;
3545 struct nv50_disp_sor_lvds_script_v0 lvds
;
3548 .base
.method
= NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT
,
3549 .base
.hasht
= nv_encoder
->dcb
->hasht
,
3550 .base
.hashm
= nv_encoder
->dcb
->hashm
,
3552 struct nv50_disp
*disp
= nv50_disp(encoder
->dev
);
3553 struct drm_device
*dev
= encoder
->dev
;
3554 struct nouveau_drm
*drm
= nouveau_drm(dev
);
3555 struct nouveau_connector
*nv_connector
;
3556 struct nvbios
*bios
= &drm
->vbios
;
3560 nv_connector
= nouveau_encoder_connector_get(nv_encoder
);
3561 nv_encoder
->crtc
= encoder
->crtc
;
3562 nv50_outp_acquire(nv_encoder
);
3564 switch (nv_encoder
->dcb
->type
) {
3565 case DCB_OUTPUT_TMDS
:
3566 if (nv_encoder
->link
& 1) {
3568 /* Only enable dual-link if:
3569 * - Need to (i.e. rate > 165MHz)
3571 * - Not an HDMI monitor, since there's no dual-link
3574 if (mode
->clock
>= 165000 &&
3575 nv_encoder
->dcb
->duallink_possible
&&
3576 !drm_detect_hdmi_monitor(nv_connector
->edid
))
3582 nv50_hdmi_enable(&nv_encoder
->base
.base
, mode
);
3584 case DCB_OUTPUT_LVDS
:
3587 if (bios
->fp_no_ddc
) {
3588 if (bios
->fp
.dual_link
)
3589 lvds
.lvds
.script
|= 0x0100;
3590 if (bios
->fp
.if_is_24bit
)
3591 lvds
.lvds
.script
|= 0x0200;
3593 if (nv_connector
->type
== DCB_CONNECTOR_LVDS_SPWG
) {
3594 if (((u8
*)nv_connector
->edid
)[121] == 2)
3595 lvds
.lvds
.script
|= 0x0100;
3597 if (mode
->clock
>= bios
->fp
.duallink_transition_clk
) {
3598 lvds
.lvds
.script
|= 0x0100;
3601 if (lvds
.lvds
.script
& 0x0100) {
3602 if (bios
->fp
.strapless_is_24bit
& 2)
3603 lvds
.lvds
.script
|= 0x0200;
3605 if (bios
->fp
.strapless_is_24bit
& 1)
3606 lvds
.lvds
.script
|= 0x0200;
3609 if (nv_connector
->base
.display_info
.bpc
== 8)
3610 lvds
.lvds
.script
|= 0x0200;
3613 nvif_mthd(&disp
->disp
->object
, 0, &lvds
, sizeof(lvds
));
3616 if (nv_connector
->base
.display_info
.bpc
== 6)
3619 if (nv_connector
->base
.display_info
.bpc
== 8)
3624 if (nv_encoder
->link
& 1)
3629 nv50_audio_enable(encoder
, mode
);
3636 nv_encoder
->update(nv_encoder
, nv_crtc
->index
, mode
, proto
, depth
);
3639 static const struct drm_encoder_helper_funcs
3641 .atomic_check
= nv50_outp_atomic_check
,
3642 .enable
= nv50_sor_enable
,
3643 .disable
= nv50_sor_disable
,
3647 nv50_sor_destroy(struct drm_encoder
*encoder
)
3649 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
3650 nv50_mstm_del(&nv_encoder
->dp
.mstm
);
3651 drm_encoder_cleanup(encoder
);
3655 static const struct drm_encoder_funcs
3657 .destroy
= nv50_sor_destroy
,
3661 nv50_sor_create(struct drm_connector
*connector
, struct dcb_output
*dcbe
)
3663 struct nouveau_connector
*nv_connector
= nouveau_connector(connector
);
3664 struct nouveau_drm
*drm
= nouveau_drm(connector
->dev
);
3665 struct nvkm_i2c
*i2c
= nvxx_i2c(&drm
->client
.device
);
3666 struct nouveau_encoder
*nv_encoder
;
3667 struct drm_encoder
*encoder
;
3670 switch (dcbe
->type
) {
3671 case DCB_OUTPUT_LVDS
: type
= DRM_MODE_ENCODER_LVDS
; break;
3672 case DCB_OUTPUT_TMDS
:
3675 type
= DRM_MODE_ENCODER_TMDS
;
3679 nv_encoder
= kzalloc(sizeof(*nv_encoder
), GFP_KERNEL
);
3682 nv_encoder
->dcb
= dcbe
;
3683 nv_encoder
->update
= nv50_sor_update
;
3685 encoder
= to_drm_encoder(nv_encoder
);
3686 encoder
->possible_crtcs
= dcbe
->heads
;
3687 encoder
->possible_clones
= 0;
3688 drm_encoder_init(connector
->dev
, encoder
, &nv50_sor_func
, type
,
3689 "sor-%04x-%04x", dcbe
->hasht
, dcbe
->hashm
);
3690 drm_encoder_helper_add(encoder
, &nv50_sor_help
);
3692 drm_mode_connector_attach_encoder(connector
, encoder
);
3694 if (dcbe
->type
== DCB_OUTPUT_DP
) {
3695 struct nv50_disp
*disp
= nv50_disp(encoder
->dev
);
3696 struct nvkm_i2c_aux
*aux
=
3697 nvkm_i2c_aux_find(i2c
, dcbe
->i2c_index
);
3699 if (disp
->disp
->object
.oclass
< GF110_DISP
) {
3700 /* HW has no support for address-only
3701 * transactions, so we're required to
3702 * use custom I2C-over-AUX code.
3704 nv_encoder
->i2c
= &aux
->i2c
;
3706 nv_encoder
->i2c
= &nv_connector
->aux
.ddc
;
3708 nv_encoder
->aux
= aux
;
3711 /*TODO: Use DP Info Table to check for support. */
3712 if (disp
->disp
->object
.oclass
>= GF110_DISP
) {
3713 ret
= nv50_mstm_new(nv_encoder
, &nv_connector
->aux
, 16,
3714 nv_connector
->base
.base
.id
,
3715 &nv_encoder
->dp
.mstm
);
3720 struct nvkm_i2c_bus
*bus
=
3721 nvkm_i2c_bus_find(i2c
, dcbe
->i2c_index
);
3723 nv_encoder
->i2c
= &bus
->i2c
;
3729 /******************************************************************************
3731 *****************************************************************************/
3733 nv50_pior_atomic_check(struct drm_encoder
*encoder
,
3734 struct drm_crtc_state
*crtc_state
,
3735 struct drm_connector_state
*conn_state
)
3737 int ret
= nv50_outp_atomic_check(encoder
, crtc_state
, conn_state
);
3740 crtc_state
->adjusted_mode
.clock
*= 2;
3745 nv50_pior_disable(struct drm_encoder
*encoder
)
3747 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
3748 struct nv50_mast
*mast
= nv50_mast(encoder
->dev
);
3749 const int or = nv_encoder
->or;
3752 if (nv_encoder
->crtc
) {
3753 push
= evo_wait(mast
, 4);
3755 if (nv50_vers(mast
) < GF110_DISP_CORE_CHANNEL_DMA
) {
3756 evo_mthd(push
, 0x0700 + (or * 0x040), 1);
3757 evo_data(push
, 0x00000000);
3759 evo_kick(push
, mast
);
3763 nv_encoder
->crtc
= NULL
;
3764 nv50_outp_release(nv_encoder
);
3768 nv50_pior_enable(struct drm_encoder
*encoder
)
3770 struct nv50_mast
*mast
= nv50_mast(encoder
->dev
);
3771 struct nouveau_encoder
*nv_encoder
= nouveau_encoder(encoder
);
3772 struct nouveau_crtc
*nv_crtc
= nouveau_crtc(encoder
->crtc
);
3773 struct nouveau_connector
*nv_connector
;
3774 struct drm_display_mode
*mode
= &nv_crtc
->base
.state
->adjusted_mode
;
3775 u8 owner
= 1 << nv_crtc
->index
;
3779 nv50_outp_acquire(nv_encoder
);
3781 nv_connector
= nouveau_encoder_connector_get(nv_encoder
);
3782 switch (nv_connector
->base
.display_info
.bpc
) {
3783 case 10: depth
= 0x6; break;
3784 case 8: depth
= 0x5; break;
3785 case 6: depth
= 0x2; break;
3786 default: depth
= 0x0; break;
3789 switch (nv_encoder
->dcb
->type
) {
3790 case DCB_OUTPUT_TMDS
:
3799 push
= evo_wait(mast
, 8);
3801 if (nv50_vers(mast
) < GF110_DISP_CORE_CHANNEL_DMA
) {
3802 u32 ctrl
= (depth
<< 16) | (proto
<< 8) | owner
;
3803 if (mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
3805 if (mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
3807 evo_mthd(push
, 0x0700 + (nv_encoder
->or * 0x040), 1);
3808 evo_data(push
, ctrl
);
3811 evo_kick(push
, mast
);
3814 nv_encoder
->crtc
= encoder
->crtc
;
3817 static const struct drm_encoder_helper_funcs
3819 .atomic_check
= nv50_pior_atomic_check
,
3820 .enable
= nv50_pior_enable
,
3821 .disable
= nv50_pior_disable
,
3825 nv50_pior_destroy(struct drm_encoder
*encoder
)
3827 drm_encoder_cleanup(encoder
);
3831 static const struct drm_encoder_funcs
3833 .destroy
= nv50_pior_destroy
,
3837 nv50_pior_create(struct drm_connector
*connector
, struct dcb_output
*dcbe
)
3839 struct nouveau_connector
*nv_connector
= nouveau_connector(connector
);
3840 struct nouveau_drm
*drm
= nouveau_drm(connector
->dev
);
3841 struct nvkm_i2c
*i2c
= nvxx_i2c(&drm
->client
.device
);
3842 struct nvkm_i2c_bus
*bus
= NULL
;
3843 struct nvkm_i2c_aux
*aux
= NULL
;
3844 struct i2c_adapter
*ddc
;
3845 struct nouveau_encoder
*nv_encoder
;
3846 struct drm_encoder
*encoder
;
3849 switch (dcbe
->type
) {
3850 case DCB_OUTPUT_TMDS
:
3851 bus
= nvkm_i2c_bus_find(i2c
, NVKM_I2C_BUS_EXT(dcbe
->extdev
));
3852 ddc
= bus
? &bus
->i2c
: NULL
;
3853 type
= DRM_MODE_ENCODER_TMDS
;
3856 aux
= nvkm_i2c_aux_find(i2c
, NVKM_I2C_AUX_EXT(dcbe
->extdev
));
3857 ddc
= aux
? &nv_connector
->aux
.ddc
: NULL
;
3858 type
= DRM_MODE_ENCODER_TMDS
;
3864 nv_encoder
= kzalloc(sizeof(*nv_encoder
), GFP_KERNEL
);
3867 nv_encoder
->dcb
= dcbe
;
3868 nv_encoder
->i2c
= ddc
;
3869 nv_encoder
->aux
= aux
;
3871 encoder
= to_drm_encoder(nv_encoder
);
3872 encoder
->possible_crtcs
= dcbe
->heads
;
3873 encoder
->possible_clones
= 0;
3874 drm_encoder_init(connector
->dev
, encoder
, &nv50_pior_func
, type
,
3875 "pior-%04x-%04x", dcbe
->hasht
, dcbe
->hashm
);
3876 drm_encoder_helper_add(encoder
, &nv50_pior_help
);
3878 drm_mode_connector_attach_encoder(connector
, encoder
);
3882 /******************************************************************************
3884 *****************************************************************************/
3887 nv50_disp_atomic_commit_core(struct nouveau_drm
*drm
, u32 interlock
)
3889 struct nv50_disp
*disp
= nv50_disp(drm
->dev
);
3890 struct nv50_dmac
*core
= &disp
->mast
.base
;
3891 struct nv50_mstm
*mstm
;
3892 struct drm_encoder
*encoder
;
3895 NV_ATOMIC(drm
, "commit core %08x\n", interlock
);
3897 drm_for_each_encoder(encoder
, drm
->dev
) {
3898 if (encoder
->encoder_type
!= DRM_MODE_ENCODER_DPMST
) {
3899 mstm
= nouveau_encoder(encoder
)->dp
.mstm
;
3900 if (mstm
&& mstm
->modified
)
3901 nv50_mstm_prepare(mstm
);
3905 if ((push
= evo_wait(core
, 5))) {
3906 evo_mthd(push
, 0x0084, 1);
3907 evo_data(push
, 0x80000000);
3908 evo_mthd(push
, 0x0080, 2);
3909 evo_data(push
, interlock
);
3910 evo_data(push
, 0x00000000);
3911 nouveau_bo_wr32(disp
->sync
, 0, 0x00000000);
3912 evo_kick(push
, core
);
3913 if (nvif_msec(&drm
->client
.device
, 2000ULL,
3914 if (nouveau_bo_rd32(disp
->sync
, 0))
3918 NV_ERROR(drm
, "EVO timeout\n");
3921 drm_for_each_encoder(encoder
, drm
->dev
) {
3922 if (encoder
->encoder_type
!= DRM_MODE_ENCODER_DPMST
) {
3923 mstm
= nouveau_encoder(encoder
)->dp
.mstm
;
3924 if (mstm
&& mstm
->modified
)
3925 nv50_mstm_cleanup(mstm
);
3931 nv50_disp_atomic_commit_tail(struct drm_atomic_state
*state
)
3933 struct drm_device
*dev
= state
->dev
;
3934 struct drm_crtc_state
*new_crtc_state
, *old_crtc_state
;
3935 struct drm_crtc
*crtc
;
3936 struct drm_plane_state
*new_plane_state
;
3937 struct drm_plane
*plane
;
3938 struct nouveau_drm
*drm
= nouveau_drm(dev
);
3939 struct nv50_disp
*disp
= nv50_disp(dev
);
3940 struct nv50_atom
*atom
= nv50_atom(state
);
3941 struct nv50_outp_atom
*outp
, *outt
;
3942 u32 interlock_core
= 0;
3943 u32 interlock_chan
= 0;
3946 NV_ATOMIC(drm
, "commit %d %d\n", atom
->lock_core
, atom
->flush_disable
);
3947 drm_atomic_helper_wait_for_fences(dev
, state
, false);
3948 drm_atomic_helper_wait_for_dependencies(state
);
3949 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
3951 if (atom
->lock_core
)
3952 mutex_lock(&disp
->mutex
);
3954 /* Disable head(s). */
3955 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
3956 struct nv50_head_atom
*asyh
= nv50_head_atom(new_crtc_state
);
3957 struct nv50_head
*head
= nv50_head(crtc
);
3959 NV_ATOMIC(drm
, "%s: clr %04x (set %04x)\n", crtc
->name
,
3960 asyh
->clr
.mask
, asyh
->set
.mask
);
3961 if (old_crtc_state
->active
&& !new_crtc_state
->active
)
3962 drm_crtc_vblank_off(crtc
);
3964 if (asyh
->clr
.mask
) {
3965 nv50_head_flush_clr(head
, asyh
, atom
->flush_disable
);
3966 interlock_core
|= 1;
3970 /* Disable plane(s). */
3971 for_each_new_plane_in_state(state
, plane
, new_plane_state
, i
) {
3972 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(new_plane_state
);
3973 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
3975 NV_ATOMIC(drm
, "%s: clr %02x (set %02x)\n", plane
->name
,
3976 asyw
->clr
.mask
, asyw
->set
.mask
);
3977 if (!asyw
->clr
.mask
)
3980 interlock_chan
|= nv50_wndw_flush_clr(wndw
, interlock_core
,
3981 atom
->flush_disable
,
3985 /* Disable output path(s). */
3986 list_for_each_entry(outp
, &atom
->outp
, head
) {
3987 const struct drm_encoder_helper_funcs
*help
;
3988 struct drm_encoder
*encoder
;
3990 encoder
= outp
->encoder
;
3991 help
= encoder
->helper_private
;
3993 NV_ATOMIC(drm
, "%s: clr %02x (set %02x)\n", encoder
->name
,
3994 outp
->clr
.mask
, outp
->set
.mask
);
3996 if (outp
->clr
.mask
) {
3997 help
->disable(encoder
);
3998 interlock_core
|= 1;
3999 if (outp
->flush_disable
) {
4000 nv50_disp_atomic_commit_core(drm
, interlock_chan
);
4007 /* Flush disable. */
4008 if (interlock_core
) {
4009 if (atom
->flush_disable
) {
4010 nv50_disp_atomic_commit_core(drm
, interlock_chan
);
4016 /* Update output path(s). */
4017 list_for_each_entry_safe(outp
, outt
, &atom
->outp
, head
) {
4018 const struct drm_encoder_helper_funcs
*help
;
4019 struct drm_encoder
*encoder
;
4021 encoder
= outp
->encoder
;
4022 help
= encoder
->helper_private
;
4024 NV_ATOMIC(drm
, "%s: set %02x (clr %02x)\n", encoder
->name
,
4025 outp
->set
.mask
, outp
->clr
.mask
);
4027 if (outp
->set
.mask
) {
4028 help
->enable(encoder
);
4032 list_del(&outp
->head
);
4036 /* Update head(s). */
4037 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4038 struct nv50_head_atom
*asyh
= nv50_head_atom(new_crtc_state
);
4039 struct nv50_head
*head
= nv50_head(crtc
);
4041 NV_ATOMIC(drm
, "%s: set %04x (clr %04x)\n", crtc
->name
,
4042 asyh
->set
.mask
, asyh
->clr
.mask
);
4044 if (asyh
->set
.mask
) {
4045 nv50_head_flush_set(head
, asyh
);
4049 if (new_crtc_state
->active
) {
4050 if (!old_crtc_state
->active
)
4051 drm_crtc_vblank_on(crtc
);
4052 if (new_crtc_state
->event
)
4053 drm_crtc_vblank_get(crtc
);
4057 /* Update plane(s). */
4058 for_each_new_plane_in_state(state
, plane
, new_plane_state
, i
) {
4059 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(new_plane_state
);
4060 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
4062 NV_ATOMIC(drm
, "%s: set %02x (clr %02x)\n", plane
->name
,
4063 asyw
->set
.mask
, asyw
->clr
.mask
);
4064 if ( !asyw
->set
.mask
&&
4065 (!asyw
->clr
.mask
|| atom
->flush_disable
))
4068 interlock_chan
|= nv50_wndw_flush_set(wndw
, interlock_core
, asyw
);
4072 if (interlock_core
) {
4073 if (!interlock_chan
&& atom
->state
.legacy_cursor_update
) {
4074 u32
*push
= evo_wait(&disp
->mast
, 2);
4076 evo_mthd(push
, 0x0080, 1);
4077 evo_data(push
, 0x00000000);
4078 evo_kick(push
, &disp
->mast
);
4081 nv50_disp_atomic_commit_core(drm
, interlock_chan
);
4085 if (atom
->lock_core
)
4086 mutex_unlock(&disp
->mutex
);
4088 /* Wait for HW to signal completion. */
4089 for_each_new_plane_in_state(state
, plane
, new_plane_state
, i
) {
4090 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(new_plane_state
);
4091 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
4092 int ret
= nv50_wndw_wait_armed(wndw
, asyw
);
4094 NV_ERROR(drm
, "%s: timeout\n", plane
->name
);
4097 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4098 if (new_crtc_state
->event
) {
4099 unsigned long flags
;
4100 /* Get correct count/ts if racing with vblank irq */
4101 if (new_crtc_state
->active
)
4102 drm_crtc_accurate_vblank_count(crtc
);
4103 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
4104 drm_crtc_send_vblank_event(crtc
, new_crtc_state
->event
);
4105 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
4107 new_crtc_state
->event
= NULL
;
4108 if (new_crtc_state
->active
)
4109 drm_crtc_vblank_put(crtc
);
4113 drm_atomic_helper_commit_hw_done(state
);
4114 drm_atomic_helper_cleanup_planes(dev
, state
);
4115 drm_atomic_helper_commit_cleanup_done(state
);
4116 drm_atomic_state_put(state
);
4120 nv50_disp_atomic_commit_work(struct work_struct
*work
)
4122 struct drm_atomic_state
*state
=
4123 container_of(work
, typeof(*state
), commit_work
);
4124 nv50_disp_atomic_commit_tail(state
);
4128 nv50_disp_atomic_commit(struct drm_device
*dev
,
4129 struct drm_atomic_state
*state
, bool nonblock
)
4131 struct nouveau_drm
*drm
= nouveau_drm(dev
);
4132 struct nv50_disp
*disp
= nv50_disp(dev
);
4133 struct drm_plane_state
*new_plane_state
;
4134 struct drm_plane
*plane
;
4135 struct drm_crtc
*crtc
;
4136 bool active
= false;
4139 ret
= pm_runtime_get_sync(dev
->dev
);
4140 if (ret
< 0 && ret
!= -EACCES
)
4143 ret
= drm_atomic_helper_setup_commit(state
, nonblock
);
4147 INIT_WORK(&state
->commit_work
, nv50_disp_atomic_commit_work
);
4149 ret
= drm_atomic_helper_prepare_planes(dev
, state
);
4154 ret
= drm_atomic_helper_wait_for_fences(dev
, state
, true);
4159 ret
= drm_atomic_helper_swap_state(state
, true);
4163 for_each_new_plane_in_state(state
, plane
, new_plane_state
, i
) {
4164 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(new_plane_state
);
4165 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
4167 if (asyw
->set
.image
) {
4168 asyw
->ntfy
.handle
= wndw
->dmac
->sync
.handle
;
4169 asyw
->ntfy
.offset
= wndw
->ntfy
;
4170 asyw
->ntfy
.awaken
= false;
4171 asyw
->set
.ntfy
= true;
4172 nouveau_bo_wr32(disp
->sync
, wndw
->ntfy
/ 4, 0x00000000);
4177 drm_atomic_state_get(state
);
4180 queue_work(system_unbound_wq
, &state
->commit_work
);
4182 nv50_disp_atomic_commit_tail(state
);
4184 drm_for_each_crtc(crtc
, dev
) {
4185 if (crtc
->state
->enable
) {
4186 if (!drm
->have_disp_power_ref
) {
4187 drm
->have_disp_power_ref
= true;
4195 if (!active
&& drm
->have_disp_power_ref
) {
4196 pm_runtime_put_autosuspend(dev
->dev
);
4197 drm
->have_disp_power_ref
= false;
4202 drm_atomic_helper_cleanup_planes(dev
, state
);
4204 pm_runtime_put_autosuspend(dev
->dev
);
4208 static struct nv50_outp_atom
*
4209 nv50_disp_outp_atomic_add(struct nv50_atom
*atom
, struct drm_encoder
*encoder
)
4211 struct nv50_outp_atom
*outp
;
4213 list_for_each_entry(outp
, &atom
->outp
, head
) {
4214 if (outp
->encoder
== encoder
)
4218 outp
= kzalloc(sizeof(*outp
), GFP_KERNEL
);
4220 return ERR_PTR(-ENOMEM
);
4222 list_add(&outp
->head
, &atom
->outp
);
4223 outp
->encoder
= encoder
;
4228 nv50_disp_outp_atomic_check_clr(struct nv50_atom
*atom
,
4229 struct drm_connector_state
*old_connector_state
)
4231 struct drm_encoder
*encoder
= old_connector_state
->best_encoder
;
4232 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4233 struct drm_crtc
*crtc
;
4234 struct nv50_outp_atom
*outp
;
4236 if (!(crtc
= old_connector_state
->crtc
))
4239 old_crtc_state
= drm_atomic_get_old_crtc_state(&atom
->state
, crtc
);
4240 new_crtc_state
= drm_atomic_get_new_crtc_state(&atom
->state
, crtc
);
4241 if (old_crtc_state
->active
&& drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
4242 outp
= nv50_disp_outp_atomic_add(atom
, encoder
);
4244 return PTR_ERR(outp
);
4246 if (outp
->encoder
->encoder_type
== DRM_MODE_ENCODER_DPMST
) {
4247 outp
->flush_disable
= true;
4248 atom
->flush_disable
= true;
4250 outp
->clr
.ctrl
= true;
4251 atom
->lock_core
= true;
4258 nv50_disp_outp_atomic_check_set(struct nv50_atom
*atom
,
4259 struct drm_connector_state
*connector_state
)
4261 struct drm_encoder
*encoder
= connector_state
->best_encoder
;
4262 struct drm_crtc_state
*new_crtc_state
;
4263 struct drm_crtc
*crtc
;
4264 struct nv50_outp_atom
*outp
;
4266 if (!(crtc
= connector_state
->crtc
))
4269 new_crtc_state
= drm_atomic_get_new_crtc_state(&atom
->state
, crtc
);
4270 if (new_crtc_state
->active
&& drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
4271 outp
= nv50_disp_outp_atomic_add(atom
, encoder
);
4273 return PTR_ERR(outp
);
4275 outp
->set
.ctrl
= true;
4276 atom
->lock_core
= true;
4283 nv50_disp_atomic_check(struct drm_device
*dev
, struct drm_atomic_state
*state
)
4285 struct nv50_atom
*atom
= nv50_atom(state
);
4286 struct drm_connector_state
*old_connector_state
, *new_connector_state
;
4287 struct drm_connector
*connector
;
4290 ret
= drm_atomic_helper_check(dev
, state
);
4294 for_each_oldnew_connector_in_state(state
, connector
, old_connector_state
, new_connector_state
, i
) {
4295 ret
= nv50_disp_outp_atomic_check_clr(atom
, old_connector_state
);
4299 ret
= nv50_disp_outp_atomic_check_set(atom
, new_connector_state
);
4308 nv50_disp_atomic_state_clear(struct drm_atomic_state
*state
)
4310 struct nv50_atom
*atom
= nv50_atom(state
);
4311 struct nv50_outp_atom
*outp
, *outt
;
4313 list_for_each_entry_safe(outp
, outt
, &atom
->outp
, head
) {
4314 list_del(&outp
->head
);
4318 drm_atomic_state_default_clear(state
);
4322 nv50_disp_atomic_state_free(struct drm_atomic_state
*state
)
4324 struct nv50_atom
*atom
= nv50_atom(state
);
4325 drm_atomic_state_default_release(&atom
->state
);
4329 static struct drm_atomic_state
*
4330 nv50_disp_atomic_state_alloc(struct drm_device
*dev
)
4332 struct nv50_atom
*atom
;
4333 if (!(atom
= kzalloc(sizeof(*atom
), GFP_KERNEL
)) ||
4334 drm_atomic_state_init(dev
, &atom
->state
) < 0) {
4338 INIT_LIST_HEAD(&atom
->outp
);
4339 return &atom
->state
;
4342 static const struct drm_mode_config_funcs
4344 .fb_create
= nouveau_user_framebuffer_create
,
4345 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
4346 .atomic_check
= nv50_disp_atomic_check
,
4347 .atomic_commit
= nv50_disp_atomic_commit
,
4348 .atomic_state_alloc
= nv50_disp_atomic_state_alloc
,
4349 .atomic_state_clear
= nv50_disp_atomic_state_clear
,
4350 .atomic_state_free
= nv50_disp_atomic_state_free
,
4353 /******************************************************************************
4355 *****************************************************************************/
4358 nv50_display_fini(struct drm_device
*dev
)
4360 struct nouveau_encoder
*nv_encoder
;
4361 struct drm_encoder
*encoder
;
4362 struct drm_plane
*plane
;
4364 drm_for_each_plane(plane
, dev
) {
4365 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
4366 if (plane
->funcs
!= &nv50_wndw
)
4368 nv50_wndw_fini(wndw
);
4371 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
4372 if (encoder
->encoder_type
!= DRM_MODE_ENCODER_DPMST
) {
4373 nv_encoder
= nouveau_encoder(encoder
);
4374 nv50_mstm_fini(nv_encoder
->dp
.mstm
);
4380 nv50_display_init(struct drm_device
*dev
)
4382 struct drm_encoder
*encoder
;
4383 struct drm_plane
*plane
;
4386 push
= evo_wait(nv50_mast(dev
), 32);
4390 evo_mthd(push
, 0x0088, 1);
4391 evo_data(push
, nv50_mast(dev
)->base
.sync
.handle
);
4392 evo_kick(push
, nv50_mast(dev
));
4394 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
4395 if (encoder
->encoder_type
!= DRM_MODE_ENCODER_DPMST
) {
4396 struct nouveau_encoder
*nv_encoder
=
4397 nouveau_encoder(encoder
);
4398 nv50_mstm_init(nv_encoder
->dp
.mstm
);
4402 drm_for_each_plane(plane
, dev
) {
4403 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
4404 if (plane
->funcs
!= &nv50_wndw
)
4406 nv50_wndw_init(wndw
);
4413 nv50_display_destroy(struct drm_device
*dev
)
4415 struct nv50_disp
*disp
= nv50_disp(dev
);
4417 nv50_dmac_destroy(&disp
->mast
.base
);
4419 nouveau_bo_unmap(disp
->sync
);
4421 nouveau_bo_unpin(disp
->sync
);
4422 nouveau_bo_ref(NULL
, &disp
->sync
);
4424 nouveau_display(dev
)->priv
= NULL
;
4428 MODULE_PARM_DESC(atomic
, "Expose atomic ioctl (default: disabled)");
4429 static int nouveau_atomic
= 0;
4430 module_param_named(atomic
, nouveau_atomic
, int, 0400);
4433 nv50_display_create(struct drm_device
*dev
)
4435 struct nvif_device
*device
= &nouveau_drm(dev
)->client
.device
;
4436 struct nouveau_drm
*drm
= nouveau_drm(dev
);
4437 struct dcb_table
*dcb
= &drm
->vbios
.dcb
;
4438 struct drm_connector
*connector
, *tmp
;
4439 struct nv50_disp
*disp
;
4440 struct dcb_output
*dcbe
;
4443 disp
= kzalloc(sizeof(*disp
), GFP_KERNEL
);
4447 mutex_init(&disp
->mutex
);
4449 nouveau_display(dev
)->priv
= disp
;
4450 nouveau_display(dev
)->dtor
= nv50_display_destroy
;
4451 nouveau_display(dev
)->init
= nv50_display_init
;
4452 nouveau_display(dev
)->fini
= nv50_display_fini
;
4453 disp
->disp
= &nouveau_display(dev
)->disp
;
4454 dev
->mode_config
.funcs
= &nv50_disp_func
;
4455 dev
->driver
->driver_features
|= DRIVER_PREFER_XBGR_30BPP
;
4457 dev
->driver
->driver_features
|= DRIVER_ATOMIC
;
4459 /* small shared memory area we use for notifiers and semaphores */
4460 ret
= nouveau_bo_new(&drm
->client
, 4096, 0x1000, TTM_PL_FLAG_VRAM
,
4461 0, 0x0000, NULL
, NULL
, &disp
->sync
);
4463 ret
= nouveau_bo_pin(disp
->sync
, TTM_PL_FLAG_VRAM
, true);
4465 ret
= nouveau_bo_map(disp
->sync
);
4467 nouveau_bo_unpin(disp
->sync
);
4470 nouveau_bo_ref(NULL
, &disp
->sync
);
4476 /* allocate master evo channel */
4477 ret
= nv50_core_create(device
, &disp
->disp
->object
,
4478 disp
->sync
->bo
.offset
, &disp
->mast
);
4482 /* create crtc objects to represent the hw heads */
4483 if (disp
->disp
->object
.oclass
>= GF110_DISP
)
4484 crtcs
= nvif_rd32(&device
->object
, 0x612004) & 0xf;
4488 for (i
= 0; i
< fls(crtcs
); i
++) {
4489 if (!(crtcs
& (1 << i
)))
4491 ret
= nv50_head_create(dev
, i
);
4496 /* create encoder/connector objects based on VBIOS DCB table */
4497 for (i
= 0, dcbe
= &dcb
->entry
[0]; i
< dcb
->entries
; i
++, dcbe
++) {
4498 connector
= nouveau_connector_create(dev
, dcbe
->connector
);
4499 if (IS_ERR(connector
))
4502 if (dcbe
->location
== DCB_LOC_ON_CHIP
) {
4503 switch (dcbe
->type
) {
4504 case DCB_OUTPUT_TMDS
:
4505 case DCB_OUTPUT_LVDS
:
4507 ret
= nv50_sor_create(connector
, dcbe
);
4509 case DCB_OUTPUT_ANALOG
:
4510 ret
= nv50_dac_create(connector
, dcbe
);
4517 ret
= nv50_pior_create(connector
, dcbe
);
4521 NV_WARN(drm
, "failed to create encoder %d/%d/%d: %d\n",
4522 dcbe
->location
, dcbe
->type
,
4523 ffs(dcbe
->or) - 1, ret
);
4528 /* cull any connectors we created that don't have an encoder */
4529 list_for_each_entry_safe(connector
, tmp
, &dev
->mode_config
.connector_list
, head
) {
4530 if (connector
->encoder_ids
[0])
4533 NV_WARN(drm
, "%s has no encoders, removing\n",
4535 connector
->funcs
->destroy(connector
);
4540 nv50_display_destroy(dev
);