]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/nouveau/nv50_display.c
f2156c8ca90f5bb2ac37ee671b7ae2c8091e8513
[thirdparty/linux.git] / drivers / gpu / drm / nouveau / nv50_display.c
1 /*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include <linux/dma-mapping.h>
26 #include <linux/hdmi.h>
27
28 #include <drm/drmP.h>
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_crtc_helper.h>
32 #include <drm/drm_dp_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include <drm/drm_plane_helper.h>
35 #include <drm/drm_edid.h>
36
37 #include <nvif/mem.h>
38
39 #include <nvif/class.h>
40 #include <nvif/cl0002.h>
41 #include <nvif/cl5070.h>
42 #include <nvif/cl507a.h>
43 #include <nvif/cl507b.h>
44 #include <nvif/cl507c.h>
45 #include <nvif/cl507d.h>
46 #include <nvif/cl507e.h>
47 #include <nvif/event.h>
48
49 #include "nouveau_drv.h"
50 #include "nouveau_dma.h"
51 #include "nouveau_gem.h"
52 #include "nouveau_connector.h"
53 #include "nouveau_encoder.h"
54 #include "nouveau_crtc.h"
55 #include "nouveau_fence.h"
56 #include "nouveau_fbcon.h"
57 #include "nv50_display.h"
58
59 #define EVO_DMA_NR 9
60
61 #define EVO_MASTER (0x00)
62 #define EVO_FLIP(c) (0x01 + (c))
63 #define EVO_OVLY(c) (0x05 + (c))
64 #define EVO_OIMM(c) (0x09 + (c))
65 #define EVO_CURS(c) (0x0d + (c))
66
67 /* offsets in shared sync bo of various structures */
68 #define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
69 #define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
70 #define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
71 #define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
72 #define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20)
73 #define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30)
74
75 /******************************************************************************
76 * Atomic state
77 *****************************************************************************/
78 #define nv50_atom(p) container_of((p), struct nv50_atom, state)
79
80 struct nv50_atom {
81 struct drm_atomic_state state;
82
83 struct list_head outp;
84 bool lock_core;
85 bool flush_disable;
86 };
87
88 struct nv50_outp_atom {
89 struct list_head head;
90
91 struct drm_encoder *encoder;
92 bool flush_disable;
93
94 union {
95 struct {
96 bool ctrl:1;
97 };
98 u8 mask;
99 } clr;
100
101 union {
102 struct {
103 bool ctrl:1;
104 };
105 u8 mask;
106 } set;
107 };
108
109 #define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
110
111 struct nv50_head_atom {
112 struct drm_crtc_state state;
113
114 struct {
115 u16 iW;
116 u16 iH;
117 u16 oW;
118 u16 oH;
119 } view;
120
121 struct nv50_head_mode {
122 bool interlace;
123 u32 clock;
124 struct {
125 u16 active;
126 u16 synce;
127 u16 blanke;
128 u16 blanks;
129 } h;
130 struct {
131 u32 active;
132 u16 synce;
133 u16 blanke;
134 u16 blanks;
135 u16 blank2s;
136 u16 blank2e;
137 u16 blankus;
138 } v;
139 } mode;
140
141 struct {
142 bool visible;
143 u32 handle;
144 u64 offset:40;
145 u8 mode:4;
146 } lut;
147
148 struct {
149 bool visible;
150 u32 handle;
151 u64 offset:40;
152 u8 format;
153 u8 kind:7;
154 u8 layout:1;
155 u8 block:4;
156 u32 pitch:20;
157 u16 x;
158 u16 y;
159 u16 w;
160 u16 h;
161 } core;
162
163 struct {
164 bool visible;
165 u32 handle;
166 u64 offset:40;
167 u8 layout:1;
168 u8 format:1;
169 } curs;
170
171 struct {
172 u8 depth;
173 u8 cpp;
174 u16 x;
175 u16 y;
176 u16 w;
177 u16 h;
178 } base;
179
180 struct {
181 u8 cpp;
182 } ovly;
183
184 struct {
185 bool enable:1;
186 u8 bits:2;
187 u8 mode:4;
188 } dither;
189
190 struct {
191 struct {
192 u16 cos:12;
193 u16 sin:12;
194 } sat;
195 } procamp;
196
197 union {
198 struct {
199 bool ilut:1;
200 bool core:1;
201 bool curs:1;
202 };
203 u8 mask;
204 } clr;
205
206 union {
207 struct {
208 bool ilut:1;
209 bool core:1;
210 bool curs:1;
211 bool view:1;
212 bool mode:1;
213 bool base:1;
214 bool ovly:1;
215 bool dither:1;
216 bool procamp:1;
217 };
218 u16 mask;
219 } set;
220 };
221
222 static inline struct nv50_head_atom *
223 nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
224 {
225 struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
226 if (IS_ERR(statec))
227 return (void *)statec;
228 return nv50_head_atom(statec);
229 }
230
231 #define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
232
233 struct nv50_wndw_atom {
234 struct drm_plane_state state;
235 u8 interval;
236
237 struct {
238 u32 handle;
239 u16 offset:12;
240 bool awaken:1;
241 } ntfy;
242
243 struct {
244 u32 handle;
245 u16 offset:12;
246 u32 acquire;
247 u32 release;
248 } sema;
249
250 struct {
251 u8 enable:2;
252 } lut;
253
254 struct {
255 u8 mode:2;
256 u8 interval:4;
257
258 u8 format;
259 u8 kind:7;
260 u8 layout:1;
261 u8 block:4;
262 u32 pitch:20;
263 u16 w;
264 u16 h;
265
266 u32 handle;
267 u64 offset;
268 } image;
269
270 struct {
271 u16 x;
272 u16 y;
273 } point;
274
275 union {
276 struct {
277 bool ntfy:1;
278 bool sema:1;
279 bool image:1;
280 };
281 u8 mask;
282 } clr;
283
284 union {
285 struct {
286 bool ntfy:1;
287 bool sema:1;
288 bool image:1;
289 bool lut:1;
290 bool point:1;
291 };
292 u8 mask;
293 } set;
294 };
295
296 /******************************************************************************
297 * EVO channel
298 *****************************************************************************/
299
300 struct nv50_chan {
301 struct nvif_object user;
302 struct nvif_device *device;
303 };
304
305 static int
306 nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
307 const s32 *oclass, u8 head, void *data, u32 size,
308 struct nv50_chan *chan)
309 {
310 struct nvif_sclass *sclass;
311 int ret, i, n;
312
313 chan->device = device;
314
315 ret = n = nvif_object_sclass_get(disp, &sclass);
316 if (ret < 0)
317 return ret;
318
319 while (oclass[0]) {
320 for (i = 0; i < n; i++) {
321 if (sclass[i].oclass == oclass[0]) {
322 ret = nvif_object_init(disp, 0, oclass[0],
323 data, size, &chan->user);
324 if (ret == 0)
325 nvif_object_map(&chan->user, NULL, 0);
326 nvif_object_sclass_put(&sclass);
327 return ret;
328 }
329 }
330 oclass++;
331 }
332
333 nvif_object_sclass_put(&sclass);
334 return -ENOSYS;
335 }
336
337 static void
338 nv50_chan_destroy(struct nv50_chan *chan)
339 {
340 nvif_object_fini(&chan->user);
341 }
342
343 /******************************************************************************
344 * PIO EVO channel
345 *****************************************************************************/
346
347 struct nv50_pioc {
348 struct nv50_chan base;
349 };
350
351 static void
352 nv50_pioc_destroy(struct nv50_pioc *pioc)
353 {
354 nv50_chan_destroy(&pioc->base);
355 }
356
357 static int
358 nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
359 const s32 *oclass, u8 head, void *data, u32 size,
360 struct nv50_pioc *pioc)
361 {
362 return nv50_chan_create(device, disp, oclass, head, data, size,
363 &pioc->base);
364 }
365
366 /******************************************************************************
367 * Overlay Immediate
368 *****************************************************************************/
369
370 struct nv50_oimm {
371 struct nv50_pioc base;
372 };
373
374 static int
375 nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
376 int head, struct nv50_oimm *oimm)
377 {
378 struct nv50_disp_cursor_v0 args = {
379 .head = head,
380 };
381 static const s32 oclass[] = {
382 GK104_DISP_OVERLAY,
383 GF110_DISP_OVERLAY,
384 GT214_DISP_OVERLAY,
385 G82_DISP_OVERLAY,
386 NV50_DISP_OVERLAY,
387 0
388 };
389
390 return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
391 &oimm->base);
392 }
393
394 /******************************************************************************
395 * DMA EVO channel
396 *****************************************************************************/
397
398 struct nv50_dmac_ctxdma {
399 struct list_head head;
400 struct nvif_object object;
401 };
402
403 struct nv50_dmac {
404 struct nv50_chan base;
405
406 struct nvif_mem push;
407 u32 *ptr;
408
409 struct nvif_object sync;
410 struct nvif_object vram;
411 struct list_head ctxdma;
412
413 /* Protects against concurrent pushbuf access to this channel, lock is
414 * grabbed by evo_wait (if the pushbuf reservation is successful) and
415 * dropped again by evo_kick. */
416 struct mutex lock;
417 };
418
419 static void
420 nv50_dmac_ctxdma_del(struct nv50_dmac_ctxdma *ctxdma)
421 {
422 nvif_object_fini(&ctxdma->object);
423 list_del(&ctxdma->head);
424 kfree(ctxdma);
425 }
426
427 static struct nv50_dmac_ctxdma *
428 nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
429 {
430 struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
431 struct nv50_dmac_ctxdma *ctxdma;
432 const u8 kind = fb->nvbo->kind;
433 const u32 handle = 0xfb000000 | kind;
434 struct {
435 struct nv_dma_v0 base;
436 union {
437 struct nv50_dma_v0 nv50;
438 struct gf100_dma_v0 gf100;
439 struct gf119_dma_v0 gf119;
440 };
441 } args = {};
442 u32 argc = sizeof(args.base);
443 int ret;
444
445 list_for_each_entry(ctxdma, &dmac->ctxdma, head) {
446 if (ctxdma->object.handle == handle)
447 return ctxdma;
448 }
449
450 if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
451 return ERR_PTR(-ENOMEM);
452 list_add(&ctxdma->head, &dmac->ctxdma);
453
454 args.base.target = NV_DMA_V0_TARGET_VRAM;
455 args.base.access = NV_DMA_V0_ACCESS_RDWR;
456 args.base.start = 0;
457 args.base.limit = drm->client.device.info.ram_user - 1;
458
459 if (drm->client.device.info.chipset < 0x80) {
460 args.nv50.part = NV50_DMA_V0_PART_256;
461 argc += sizeof(args.nv50);
462 } else
463 if (drm->client.device.info.chipset < 0xc0) {
464 args.nv50.part = NV50_DMA_V0_PART_256;
465 args.nv50.kind = kind;
466 argc += sizeof(args.nv50);
467 } else
468 if (drm->client.device.info.chipset < 0xd0) {
469 args.gf100.kind = kind;
470 argc += sizeof(args.gf100);
471 } else {
472 args.gf119.page = GF119_DMA_V0_PAGE_LP;
473 args.gf119.kind = kind;
474 argc += sizeof(args.gf119);
475 }
476
477 ret = nvif_object_init(&dmac->base.user, handle, NV_DMA_IN_MEMORY,
478 &args, argc, &ctxdma->object);
479 if (ret) {
480 nv50_dmac_ctxdma_del(ctxdma);
481 return ERR_PTR(ret);
482 }
483
484 return ctxdma;
485 }
486
487 static void
488 nv50_dmac_destroy(struct nv50_dmac *dmac)
489 {
490 struct nv50_dmac_ctxdma *ctxdma, *ctxtmp;
491
492 list_for_each_entry_safe(ctxdma, ctxtmp, &dmac->ctxdma, head) {
493 nv50_dmac_ctxdma_del(ctxdma);
494 }
495
496 nvif_object_fini(&dmac->vram);
497 nvif_object_fini(&dmac->sync);
498
499 nv50_chan_destroy(&dmac->base);
500
501 nvif_mem_fini(&dmac->push);
502 }
503
504 static int
505 nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
506 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
507 struct nv50_dmac *dmac)
508 {
509 struct nouveau_cli *cli = (void *)device->object.client;
510 struct nv50_disp_core_channel_dma_v0 *args = data;
511 int ret;
512
513 mutex_init(&dmac->lock);
514 INIT_LIST_HEAD(&dmac->ctxdma);
515
516 ret = nvif_mem_init_map(&cli->mmu, NVIF_MEM_COHERENT, 0x1000,
517 &dmac->push);
518 if (ret)
519 return ret;
520
521 dmac->ptr = dmac->push.object.map.ptr;
522
523 args->pushbuf = nvif_handle(&dmac->push.object);
524
525 ret = nv50_chan_create(device, disp, oclass, head, data, size,
526 &dmac->base);
527 if (ret)
528 return ret;
529
530 ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
531 &(struct nv_dma_v0) {
532 .target = NV_DMA_V0_TARGET_VRAM,
533 .access = NV_DMA_V0_ACCESS_RDWR,
534 .start = syncbuf + 0x0000,
535 .limit = syncbuf + 0x0fff,
536 }, sizeof(struct nv_dma_v0),
537 &dmac->sync);
538 if (ret)
539 return ret;
540
541 ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
542 &(struct nv_dma_v0) {
543 .target = NV_DMA_V0_TARGET_VRAM,
544 .access = NV_DMA_V0_ACCESS_RDWR,
545 .start = 0,
546 .limit = device->info.ram_user - 1,
547 }, sizeof(struct nv_dma_v0),
548 &dmac->vram);
549 if (ret)
550 return ret;
551
552 return ret;
553 }
554
555 /******************************************************************************
556 * Core
557 *****************************************************************************/
558
559 struct nv50_mast {
560 struct nv50_dmac base;
561 };
562
563 static int
564 nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
565 u64 syncbuf, struct nv50_mast *core)
566 {
567 struct nv50_disp_core_channel_dma_v0 args = {};
568 static const s32 oclass[] = {
569 GP102_DISP_CORE_CHANNEL_DMA,
570 GP100_DISP_CORE_CHANNEL_DMA,
571 GM200_DISP_CORE_CHANNEL_DMA,
572 GM107_DISP_CORE_CHANNEL_DMA,
573 GK110_DISP_CORE_CHANNEL_DMA,
574 GK104_DISP_CORE_CHANNEL_DMA,
575 GF110_DISP_CORE_CHANNEL_DMA,
576 GT214_DISP_CORE_CHANNEL_DMA,
577 GT206_DISP_CORE_CHANNEL_DMA,
578 GT200_DISP_CORE_CHANNEL_DMA,
579 G82_DISP_CORE_CHANNEL_DMA,
580 NV50_DISP_CORE_CHANNEL_DMA,
581 0
582 };
583
584 return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args),
585 syncbuf, &core->base);
586 }
587
588 /******************************************************************************
589 * Base
590 *****************************************************************************/
591
592 struct nv50_sync {
593 struct nv50_dmac base;
594 u32 addr;
595 u32 data;
596 };
597
598 static int
599 nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
600 int head, u64 syncbuf, struct nv50_sync *base)
601 {
602 struct nv50_disp_base_channel_dma_v0 args = {
603 .head = head,
604 };
605 static const s32 oclass[] = {
606 GK110_DISP_BASE_CHANNEL_DMA,
607 GK104_DISP_BASE_CHANNEL_DMA,
608 GF110_DISP_BASE_CHANNEL_DMA,
609 GT214_DISP_BASE_CHANNEL_DMA,
610 GT200_DISP_BASE_CHANNEL_DMA,
611 G82_DISP_BASE_CHANNEL_DMA,
612 NV50_DISP_BASE_CHANNEL_DMA,
613 0
614 };
615
616 return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
617 syncbuf, &base->base);
618 }
619
620 /******************************************************************************
621 * Overlay
622 *****************************************************************************/
623
624 struct nv50_ovly {
625 struct nv50_dmac base;
626 };
627
628 static int
629 nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
630 int head, u64 syncbuf, struct nv50_ovly *ovly)
631 {
632 struct nv50_disp_overlay_channel_dma_v0 args = {
633 .head = head,
634 };
635 static const s32 oclass[] = {
636 GK104_DISP_OVERLAY_CONTROL_DMA,
637 GF110_DISP_OVERLAY_CONTROL_DMA,
638 GT214_DISP_OVERLAY_CHANNEL_DMA,
639 GT200_DISP_OVERLAY_CHANNEL_DMA,
640 G82_DISP_OVERLAY_CHANNEL_DMA,
641 NV50_DISP_OVERLAY_CHANNEL_DMA,
642 0
643 };
644
645 return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
646 syncbuf, &ovly->base);
647 }
648
649 struct nv50_head {
650 struct nouveau_crtc base;
651 struct {
652 struct nouveau_bo *nvbo[2];
653 int next;
654 } lut;
655 struct nv50_ovly ovly;
656 struct nv50_oimm oimm;
657 };
658
659 #define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
660 #define nv50_ovly(c) (&nv50_head(c)->ovly)
661 #define nv50_oimm(c) (&nv50_head(c)->oimm)
662 #define nv50_chan(c) (&(c)->base.base)
663 #define nv50_vers(c) nv50_chan(c)->user.oclass
664
665 struct nv50_disp {
666 struct nvif_disp *disp;
667 struct nv50_mast mast;
668
669 struct nouveau_bo *sync;
670
671 struct mutex mutex;
672 };
673
674 static struct nv50_disp *
675 nv50_disp(struct drm_device *dev)
676 {
677 return nouveau_display(dev)->priv;
678 }
679
680 #define nv50_mast(d) (&nv50_disp(d)->mast)
681
682 /******************************************************************************
683 * EVO channel helpers
684 *****************************************************************************/
685 static u32 *
686 evo_wait(void *evoc, int nr)
687 {
688 struct nv50_dmac *dmac = evoc;
689 struct nvif_device *device = dmac->base.device;
690 u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
691
692 mutex_lock(&dmac->lock);
693 if (put + nr >= (PAGE_SIZE / 4) - 8) {
694 dmac->ptr[put] = 0x20000000;
695
696 nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
697 if (nvif_msec(device, 2000,
698 if (!nvif_rd32(&dmac->base.user, 0x0004))
699 break;
700 ) < 0) {
701 mutex_unlock(&dmac->lock);
702 pr_err("nouveau: evo channel stalled\n");
703 return NULL;
704 }
705
706 put = 0;
707 }
708
709 return dmac->ptr + put;
710 }
711
712 static void
713 evo_kick(u32 *push, void *evoc)
714 {
715 struct nv50_dmac *dmac = evoc;
716 nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
717 mutex_unlock(&dmac->lock);
718 }
719
720 #define evo_mthd(p, m, s) do { \
721 const u32 _m = (m), _s = (s); \
722 if (drm_debug & DRM_UT_KMS) \
723 pr_err("%04x %d %s\n", _m, _s, __func__); \
724 *((p)++) = ((_s << 18) | _m); \
725 } while(0)
726
727 #define evo_data(p, d) do { \
728 const u32 _d = (d); \
729 if (drm_debug & DRM_UT_KMS) \
730 pr_err("\t%08x\n", _d); \
731 *((p)++) = _d; \
732 } while(0)
733
734 /******************************************************************************
735 * Plane
736 *****************************************************************************/
737 #define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
738
739 struct nv50_wndw {
740 const struct nv50_wndw_func *func;
741 struct nv50_dmac *dmac;
742
743 struct drm_plane plane;
744
745 struct nvif_notify notify;
746 u16 ntfy;
747 u16 sema;
748 u32 data;
749 };
750
751 struct nv50_wndw_func {
752 void *(*dtor)(struct nv50_wndw *);
753 int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
754 struct nv50_head_atom *asyh);
755 void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
756 struct nv50_head_atom *asyh);
757 void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
758 struct nv50_wndw_atom *asyw);
759
760 void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
761 void (*sema_clr)(struct nv50_wndw *);
762 void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
763 void (*ntfy_clr)(struct nv50_wndw *);
764 int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
765 void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
766 void (*image_clr)(struct nv50_wndw *);
767 void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
768 void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
769
770 u32 (*update)(struct nv50_wndw *, u32 interlock);
771 };
772
773 static int
774 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
775 {
776 if (asyw->set.ntfy)
777 return wndw->func->ntfy_wait_begun(wndw, asyw);
778 return 0;
779 }
780
781 static u32
782 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
783 struct nv50_wndw_atom *asyw)
784 {
785 if (asyw->clr.sema && (!asyw->set.sema || flush))
786 wndw->func->sema_clr(wndw);
787 if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
788 wndw->func->ntfy_clr(wndw);
789 if (asyw->clr.image && (!asyw->set.image || flush))
790 wndw->func->image_clr(wndw);
791
792 return flush ? wndw->func->update(wndw, interlock) : 0;
793 }
794
795 static u32
796 nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
797 struct nv50_wndw_atom *asyw)
798 {
799 if (interlock) {
800 asyw->image.mode = 0;
801 asyw->image.interval = 1;
802 }
803
804 if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
805 if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
806 if (asyw->set.image) wndw->func->image_set(wndw, asyw);
807 if (asyw->set.lut ) wndw->func->lut (wndw, asyw);
808 if (asyw->set.point) wndw->func->point (wndw, asyw);
809
810 return wndw->func->update(wndw, interlock);
811 }
812
813 static void
814 nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
815 struct nv50_wndw_atom *asyw,
816 struct nv50_head_atom *asyh)
817 {
818 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
819 NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
820 wndw->func->release(wndw, asyw, asyh);
821 asyw->ntfy.handle = 0;
822 asyw->sema.handle = 0;
823 }
824
825 static int
826 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
827 struct nv50_wndw_atom *asyw,
828 struct nv50_head_atom *asyh)
829 {
830 struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
831 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
832 int ret;
833
834 NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
835
836 asyw->image.w = fb->base.width;
837 asyw->image.h = fb->base.height;
838 asyw->image.kind = fb->nvbo->kind;
839
840 if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
841 asyw->interval = 0;
842 else
843 asyw->interval = 1;
844
845 if (asyw->image.kind) {
846 asyw->image.layout = 0;
847 if (drm->client.device.info.chipset >= 0xc0)
848 asyw->image.block = fb->nvbo->mode >> 4;
849 else
850 asyw->image.block = fb->nvbo->mode;
851 asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
852 } else {
853 asyw->image.layout = 1;
854 asyw->image.block = 0;
855 asyw->image.pitch = fb->base.pitches[0];
856 }
857
858 ret = wndw->func->acquire(wndw, asyw, asyh);
859 if (ret)
860 return ret;
861
862 if (asyw->set.image) {
863 if (!(asyw->image.mode = asyw->interval ? 0 : 1))
864 asyw->image.interval = asyw->interval;
865 else
866 asyw->image.interval = 0;
867 }
868
869 return 0;
870 }
871
872 static int
873 nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
874 {
875 struct nouveau_drm *drm = nouveau_drm(plane->dev);
876 struct nv50_wndw *wndw = nv50_wndw(plane);
877 struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
878 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
879 struct nv50_head_atom *harm = NULL, *asyh = NULL;
880 bool varm = false, asyv = false, asym = false;
881 int ret;
882
883 NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
884 if (asyw->state.crtc) {
885 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
886 if (IS_ERR(asyh))
887 return PTR_ERR(asyh);
888 asym = drm_atomic_crtc_needs_modeset(&asyh->state);
889 asyv = asyh->state.active;
890 }
891
892 if (armw->state.crtc) {
893 harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
894 if (IS_ERR(harm))
895 return PTR_ERR(harm);
896 varm = harm->state.crtc->state->active;
897 }
898
899 if (asyv) {
900 asyw->point.x = asyw->state.crtc_x;
901 asyw->point.y = asyw->state.crtc_y;
902 if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
903 asyw->set.point = true;
904
905 ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
906 if (ret)
907 return ret;
908 } else
909 if (varm) {
910 nv50_wndw_atomic_check_release(wndw, asyw, harm);
911 } else {
912 return 0;
913 }
914
915 if (!asyv || asym) {
916 asyw->clr.ntfy = armw->ntfy.handle != 0;
917 asyw->clr.sema = armw->sema.handle != 0;
918 if (wndw->func->image_clr)
919 asyw->clr.image = armw->image.handle != 0;
920 asyw->set.lut = wndw->func->lut && asyv;
921 }
922
923 return 0;
924 }
925
926 static void
927 nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
928 {
929 struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
930 struct nouveau_drm *drm = nouveau_drm(plane->dev);
931
932 NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
933 if (!old_state->fb)
934 return;
935
936 nouveau_bo_unpin(fb->nvbo);
937 }
938
939 static int
940 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
941 {
942 struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
943 struct nouveau_drm *drm = nouveau_drm(plane->dev);
944 struct nv50_wndw *wndw = nv50_wndw(plane);
945 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
946 struct nv50_head_atom *asyh;
947 struct nv50_dmac_ctxdma *ctxdma;
948 int ret;
949
950 NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
951 if (!asyw->state.fb)
952 return 0;
953
954 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
955 if (ret)
956 return ret;
957
958 ctxdma = nv50_dmac_ctxdma_new(wndw->dmac, fb);
959 if (IS_ERR(ctxdma)) {
960 nouveau_bo_unpin(fb->nvbo);
961 return PTR_ERR(ctxdma);
962 }
963
964 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
965 asyw->image.handle = ctxdma->object.handle;
966 asyw->image.offset = fb->nvbo->bo.offset;
967
968 if (wndw->func->prepare) {
969 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
970 if (IS_ERR(asyh))
971 return PTR_ERR(asyh);
972
973 wndw->func->prepare(wndw, asyh, asyw);
974 }
975
976 return 0;
977 }
978
979 static const struct drm_plane_helper_funcs
980 nv50_wndw_helper = {
981 .prepare_fb = nv50_wndw_prepare_fb,
982 .cleanup_fb = nv50_wndw_cleanup_fb,
983 .atomic_check = nv50_wndw_atomic_check,
984 };
985
986 static void
987 nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
988 struct drm_plane_state *state)
989 {
990 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
991 __drm_atomic_helper_plane_destroy_state(&asyw->state);
992 kfree(asyw);
993 }
994
995 static struct drm_plane_state *
996 nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
997 {
998 struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
999 struct nv50_wndw_atom *asyw;
1000 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
1001 return NULL;
1002 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
1003 asyw->interval = 1;
1004 asyw->sema = armw->sema;
1005 asyw->ntfy = armw->ntfy;
1006 asyw->image = armw->image;
1007 asyw->point = armw->point;
1008 asyw->lut = armw->lut;
1009 asyw->clr.mask = 0;
1010 asyw->set.mask = 0;
1011 return &asyw->state;
1012 }
1013
1014 static void
1015 nv50_wndw_reset(struct drm_plane *plane)
1016 {
1017 struct nv50_wndw_atom *asyw;
1018
1019 if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
1020 return;
1021
1022 if (plane->state)
1023 plane->funcs->atomic_destroy_state(plane, plane->state);
1024 plane->state = &asyw->state;
1025 plane->state->plane = plane;
1026 plane->state->rotation = DRM_MODE_ROTATE_0;
1027 }
1028
1029 static void
1030 nv50_wndw_destroy(struct drm_plane *plane)
1031 {
1032 struct nv50_wndw *wndw = nv50_wndw(plane);
1033 void *data;
1034 nvif_notify_fini(&wndw->notify);
1035 data = wndw->func->dtor(wndw);
1036 drm_plane_cleanup(&wndw->plane);
1037 kfree(data);
1038 }
1039
1040 static const struct drm_plane_funcs
1041 nv50_wndw = {
1042 .update_plane = drm_atomic_helper_update_plane,
1043 .disable_plane = drm_atomic_helper_disable_plane,
1044 .destroy = nv50_wndw_destroy,
1045 .reset = nv50_wndw_reset,
1046 .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
1047 .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
1048 };
1049
1050 static void
1051 nv50_wndw_fini(struct nv50_wndw *wndw)
1052 {
1053 nvif_notify_put(&wndw->notify);
1054 }
1055
1056 static void
1057 nv50_wndw_init(struct nv50_wndw *wndw)
1058 {
1059 nvif_notify_get(&wndw->notify);
1060 }
1061
1062 static int
1063 nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
1064 enum drm_plane_type type, const char *name, int index,
1065 struct nv50_dmac *dmac, const u32 *format, int nformat,
1066 struct nv50_wndw *wndw)
1067 {
1068 int ret;
1069
1070 wndw->func = func;
1071 wndw->dmac = dmac;
1072
1073 ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
1074 format, nformat, NULL,
1075 type, "%s-%d", name, index);
1076 if (ret)
1077 return ret;
1078
1079 drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
1080 return 0;
1081 }
1082
1083 /******************************************************************************
1084 * Cursor plane
1085 *****************************************************************************/
1086 #define nv50_curs(p) container_of((p), struct nv50_curs, wndw)
1087
1088 struct nv50_curs {
1089 struct nv50_wndw wndw;
1090 struct nvif_object chan;
1091 };
1092
1093 static u32
1094 nv50_curs_update(struct nv50_wndw *wndw, u32 interlock)
1095 {
1096 struct nv50_curs *curs = nv50_curs(wndw);
1097 nvif_wr32(&curs->chan, 0x0080, 0x00000000);
1098 return 0;
1099 }
1100
1101 static void
1102 nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1103 {
1104 struct nv50_curs *curs = nv50_curs(wndw);
1105 nvif_wr32(&curs->chan, 0x0084, (asyw->point.y << 16) | asyw->point.x);
1106 }
1107
1108 static void
1109 nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
1110 struct nv50_wndw_atom *asyw)
1111 {
1112 u32 handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
1113 u32 offset = asyw->image.offset;
1114 if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
1115 asyh->curs.handle = handle;
1116 asyh->curs.offset = offset;
1117 asyh->set.curs = asyh->curs.visible;
1118 }
1119 }
1120
1121 static void
1122 nv50_curs_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1123 struct nv50_head_atom *asyh)
1124 {
1125 asyh->curs.visible = false;
1126 }
1127
1128 static int
1129 nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1130 struct nv50_head_atom *asyh)
1131 {
1132 int ret;
1133
1134 ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
1135 DRM_PLANE_HELPER_NO_SCALING,
1136 DRM_PLANE_HELPER_NO_SCALING,
1137 true, true);
1138 asyh->curs.visible = asyw->state.visible;
1139 if (ret || !asyh->curs.visible)
1140 return ret;
1141
1142 switch (asyw->state.fb->width) {
1143 case 32: asyh->curs.layout = 0; break;
1144 case 64: asyh->curs.layout = 1; break;
1145 default:
1146 return -EINVAL;
1147 }
1148
1149 if (asyw->state.fb->width != asyw->state.fb->height)
1150 return -EINVAL;
1151
1152 switch (asyw->state.fb->format->format) {
1153 case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
1154 default:
1155 WARN_ON(1);
1156 return -EINVAL;
1157 }
1158
1159 return 0;
1160 }
1161
1162 static void *
1163 nv50_curs_dtor(struct nv50_wndw *wndw)
1164 {
1165 struct nv50_curs *curs = nv50_curs(wndw);
1166 nvif_object_fini(&curs->chan);
1167 return curs;
1168 }
1169
1170 static const u32
1171 nv50_curs_format[] = {
1172 DRM_FORMAT_ARGB8888,
1173 };
1174
1175 static const struct nv50_wndw_func
1176 nv50_curs = {
1177 .dtor = nv50_curs_dtor,
1178 .acquire = nv50_curs_acquire,
1179 .release = nv50_curs_release,
1180 .prepare = nv50_curs_prepare,
1181 .point = nv50_curs_point,
1182 .update = nv50_curs_update,
1183 };
1184
1185 static int
1186 nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
1187 struct nv50_curs **pcurs)
1188 {
1189 static const struct nvif_mclass curses[] = {
1190 { GK104_DISP_CURSOR, 0 },
1191 { GF110_DISP_CURSOR, 0 },
1192 { GT214_DISP_CURSOR, 0 },
1193 { G82_DISP_CURSOR, 0 },
1194 { NV50_DISP_CURSOR, 0 },
1195 {}
1196 };
1197 struct nv50_disp_cursor_v0 args = {
1198 .head = head->base.index,
1199 };
1200 struct nv50_disp *disp = nv50_disp(drm->dev);
1201 struct nv50_curs *curs;
1202 int cid, ret;
1203
1204 cid = nvif_mclass(&disp->disp->object, curses);
1205 if (cid < 0) {
1206 NV_ERROR(drm, "No supported cursor immediate class\n");
1207 return cid;
1208 }
1209
1210 if (!(curs = *pcurs = kzalloc(sizeof(*curs), GFP_KERNEL)))
1211 return -ENOMEM;
1212
1213 ret = nv50_wndw_ctor(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
1214 "curs", head->base.index, &disp->mast.base,
1215 nv50_curs_format, ARRAY_SIZE(nv50_curs_format),
1216 &curs->wndw);
1217 if (ret) {
1218 kfree(curs);
1219 return ret;
1220 }
1221
1222 ret = nvif_object_init(&disp->disp->object, 0, curses[cid].oclass,
1223 &args, sizeof(args), &curs->chan);
1224 if (ret) {
1225 NV_ERROR(drm, "curs%04x allocation failed: %d\n",
1226 curses[cid].oclass, ret);
1227 return ret;
1228 }
1229
1230 return 0;
1231 }
1232
1233 /******************************************************************************
1234 * Primary plane
1235 *****************************************************************************/
1236 #define nv50_base(p) container_of((p), struct nv50_base, wndw)
1237
1238 struct nv50_base {
1239 struct nv50_wndw wndw;
1240 struct nv50_sync chan;
1241 int id;
1242 };
1243
1244 static int
1245 nv50_base_notify(struct nvif_notify *notify)
1246 {
1247 return NVIF_NOTIFY_KEEP;
1248 }
1249
1250 static void
1251 nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1252 {
1253 struct nv50_base *base = nv50_base(wndw);
1254 u32 *push;
1255 if ((push = evo_wait(&base->chan, 2))) {
1256 evo_mthd(push, 0x00e0, 1);
1257 evo_data(push, asyw->lut.enable << 30);
1258 evo_kick(push, &base->chan);
1259 }
1260 }
1261
1262 static void
1263 nv50_base_image_clr(struct nv50_wndw *wndw)
1264 {
1265 struct nv50_base *base = nv50_base(wndw);
1266 u32 *push;
1267 if ((push = evo_wait(&base->chan, 4))) {
1268 evo_mthd(push, 0x0084, 1);
1269 evo_data(push, 0x00000000);
1270 evo_mthd(push, 0x00c0, 1);
1271 evo_data(push, 0x00000000);
1272 evo_kick(push, &base->chan);
1273 }
1274 }
1275
1276 static void
1277 nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1278 {
1279 struct nv50_base *base = nv50_base(wndw);
1280 const s32 oclass = base->chan.base.base.user.oclass;
1281 u32 *push;
1282 if ((push = evo_wait(&base->chan, 10))) {
1283 evo_mthd(push, 0x0084, 1);
1284 evo_data(push, (asyw->image.mode << 8) |
1285 (asyw->image.interval << 4));
1286 evo_mthd(push, 0x00c0, 1);
1287 evo_data(push, asyw->image.handle);
1288 if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
1289 evo_mthd(push, 0x0800, 5);
1290 evo_data(push, asyw->image.offset >> 8);
1291 evo_data(push, 0x00000000);
1292 evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1293 evo_data(push, (asyw->image.layout << 20) |
1294 asyw->image.pitch |
1295 asyw->image.block);
1296 evo_data(push, (asyw->image.kind << 16) |
1297 (asyw->image.format << 8));
1298 } else
1299 if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
1300 evo_mthd(push, 0x0800, 5);
1301 evo_data(push, asyw->image.offset >> 8);
1302 evo_data(push, 0x00000000);
1303 evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1304 evo_data(push, (asyw->image.layout << 20) |
1305 asyw->image.pitch |
1306 asyw->image.block);
1307 evo_data(push, asyw->image.format << 8);
1308 } else {
1309 evo_mthd(push, 0x0400, 5);
1310 evo_data(push, asyw->image.offset >> 8);
1311 evo_data(push, 0x00000000);
1312 evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1313 evo_data(push, (asyw->image.layout << 24) |
1314 asyw->image.pitch |
1315 asyw->image.block);
1316 evo_data(push, asyw->image.format << 8);
1317 }
1318 evo_kick(push, &base->chan);
1319 }
1320 }
1321
1322 static void
1323 nv50_base_ntfy_clr(struct nv50_wndw *wndw)
1324 {
1325 struct nv50_base *base = nv50_base(wndw);
1326 u32 *push;
1327 if ((push = evo_wait(&base->chan, 2))) {
1328 evo_mthd(push, 0x00a4, 1);
1329 evo_data(push, 0x00000000);
1330 evo_kick(push, &base->chan);
1331 }
1332 }
1333
1334 static void
1335 nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1336 {
1337 struct nv50_base *base = nv50_base(wndw);
1338 u32 *push;
1339 if ((push = evo_wait(&base->chan, 3))) {
1340 evo_mthd(push, 0x00a0, 2);
1341 evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset);
1342 evo_data(push, asyw->ntfy.handle);
1343 evo_kick(push, &base->chan);
1344 }
1345 }
1346
1347 static void
1348 nv50_base_sema_clr(struct nv50_wndw *wndw)
1349 {
1350 struct nv50_base *base = nv50_base(wndw);
1351 u32 *push;
1352 if ((push = evo_wait(&base->chan, 2))) {
1353 evo_mthd(push, 0x0094, 1);
1354 evo_data(push, 0x00000000);
1355 evo_kick(push, &base->chan);
1356 }
1357 }
1358
1359 static void
1360 nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1361 {
1362 struct nv50_base *base = nv50_base(wndw);
1363 u32 *push;
1364 if ((push = evo_wait(&base->chan, 5))) {
1365 evo_mthd(push, 0x0088, 4);
1366 evo_data(push, asyw->sema.offset);
1367 evo_data(push, asyw->sema.acquire);
1368 evo_data(push, asyw->sema.release);
1369 evo_data(push, asyw->sema.handle);
1370 evo_kick(push, &base->chan);
1371 }
1372 }
1373
1374 static u32
1375 nv50_base_update(struct nv50_wndw *wndw, u32 interlock)
1376 {
1377 struct nv50_base *base = nv50_base(wndw);
1378 u32 *push;
1379
1380 if (!(push = evo_wait(&base->chan, 2)))
1381 return 0;
1382 evo_mthd(push, 0x0080, 1);
1383 evo_data(push, interlock);
1384 evo_kick(push, &base->chan);
1385
1386 if (base->chan.base.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
1387 return interlock ? 2 << (base->id * 8) : 0;
1388 return interlock ? 2 << (base->id * 4) : 0;
1389 }
1390
1391 static int
1392 nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1393 {
1394 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
1395 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
1396 if (nvif_msec(&drm->client.device, 2000ULL,
1397 u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
1398 if ((data & 0xc0000000) == 0x40000000)
1399 break;
1400 usleep_range(1, 2);
1401 ) < 0)
1402 return -ETIMEDOUT;
1403 return 0;
1404 }
1405
1406 static void
1407 nv50_base_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1408 struct nv50_head_atom *asyh)
1409 {
1410 asyh->base.cpp = 0;
1411 }
1412
1413 static int
1414 nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1415 struct nv50_head_atom *asyh)
1416 {
1417 const struct drm_framebuffer *fb = asyw->state.fb;
1418 int ret;
1419
1420 if (!fb->format->depth)
1421 return -EINVAL;
1422
1423 ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
1424 DRM_PLANE_HELPER_NO_SCALING,
1425 DRM_PLANE_HELPER_NO_SCALING,
1426 false, true);
1427 if (ret)
1428 return ret;
1429
1430 asyh->base.depth = fb->format->depth;
1431 asyh->base.cpp = fb->format->cpp[0];
1432 asyh->base.x = asyw->state.src.x1 >> 16;
1433 asyh->base.y = asyw->state.src.y1 >> 16;
1434 asyh->base.w = asyw->state.fb->width;
1435 asyh->base.h = asyw->state.fb->height;
1436
1437 switch (fb->format->format) {
1438 case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
1439 case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
1440 case DRM_FORMAT_XRGB1555 :
1441 case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
1442 case DRM_FORMAT_XRGB8888 :
1443 case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
1444 case DRM_FORMAT_XBGR2101010:
1445 case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
1446 case DRM_FORMAT_XBGR8888 :
1447 case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
1448 default:
1449 WARN_ON(1);
1450 return -EINVAL;
1451 }
1452
1453 asyw->lut.enable = 1;
1454 asyw->set.image = true;
1455 return 0;
1456 }
1457
1458 static void *
1459 nv50_base_dtor(struct nv50_wndw *wndw)
1460 {
1461 struct nv50_base *base = nv50_base(wndw);
1462 nv50_dmac_destroy(&base->chan.base);
1463 return base;
1464 }
1465
1466 static const u32
1467 nv50_base_format[] = {
1468 DRM_FORMAT_C8,
1469 DRM_FORMAT_RGB565,
1470 DRM_FORMAT_XRGB1555,
1471 DRM_FORMAT_ARGB1555,
1472 DRM_FORMAT_XRGB8888,
1473 DRM_FORMAT_ARGB8888,
1474 DRM_FORMAT_XBGR2101010,
1475 DRM_FORMAT_ABGR2101010,
1476 DRM_FORMAT_XBGR8888,
1477 DRM_FORMAT_ABGR8888,
1478 };
1479
1480 static const struct nv50_wndw_func
1481 nv50_base = {
1482 .dtor = nv50_base_dtor,
1483 .acquire = nv50_base_acquire,
1484 .release = nv50_base_release,
1485 .sema_set = nv50_base_sema_set,
1486 .sema_clr = nv50_base_sema_clr,
1487 .ntfy_set = nv50_base_ntfy_set,
1488 .ntfy_clr = nv50_base_ntfy_clr,
1489 .ntfy_wait_begun = nv50_base_ntfy_wait_begun,
1490 .image_set = nv50_base_image_set,
1491 .image_clr = nv50_base_image_clr,
1492 .lut = nv50_base_lut,
1493 .update = nv50_base_update,
1494 };
1495
1496 static int
1497 nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
1498 struct nv50_base **pbase)
1499 {
1500 struct nv50_disp *disp = nv50_disp(drm->dev);
1501 struct nv50_base *base;
1502 int ret;
1503
1504 if (!(base = *pbase = kzalloc(sizeof(*base), GFP_KERNEL)))
1505 return -ENOMEM;
1506 base->id = head->base.index;
1507 base->wndw.ntfy = EVO_FLIP_NTFY0(base->id);
1508 base->wndw.sema = EVO_FLIP_SEM0(base->id);
1509 base->wndw.data = 0x00000000;
1510
1511 ret = nv50_wndw_ctor(&nv50_base, drm->dev, DRM_PLANE_TYPE_PRIMARY,
1512 "base", base->id, &base->chan.base,
1513 nv50_base_format, ARRAY_SIZE(nv50_base_format),
1514 &base->wndw);
1515 if (ret) {
1516 kfree(base);
1517 return ret;
1518 }
1519
1520 ret = nv50_base_create(&drm->client.device, &disp->disp->object,
1521 base->id, disp->sync->bo.offset, &base->chan);
1522 if (ret)
1523 return ret;
1524
1525 return nvif_notify_init(&base->chan.base.base.user, nv50_base_notify,
1526 false,
1527 NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
1528 &(struct nvif_notify_uevent_req) {},
1529 sizeof(struct nvif_notify_uevent_req),
1530 sizeof(struct nvif_notify_uevent_rep),
1531 &base->wndw.notify);
1532 }
1533
1534 /******************************************************************************
1535 * Head
1536 *****************************************************************************/
1537 static void
1538 nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
1539 {
1540 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1541 u32 *push;
1542 if ((push = evo_wait(core, 2))) {
1543 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1544 evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
1545 else
1546 evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
1547 evo_data(push, (asyh->procamp.sat.sin << 20) |
1548 (asyh->procamp.sat.cos << 8));
1549 evo_kick(push, core);
1550 }
1551 }
1552
1553 static void
1554 nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
1555 {
1556 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1557 u32 *push;
1558 if ((push = evo_wait(core, 2))) {
1559 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1560 evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
1561 else
1562 if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
1563 evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
1564 else
1565 evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
1566 evo_data(push, (asyh->dither.mode << 3) |
1567 (asyh->dither.bits << 1) |
1568 asyh->dither.enable);
1569 evo_kick(push, core);
1570 }
1571 }
1572
1573 static void
1574 nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
1575 {
1576 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1577 u32 bounds = 0;
1578 u32 *push;
1579
1580 if (asyh->base.cpp) {
1581 switch (asyh->base.cpp) {
1582 case 8: bounds |= 0x00000500; break;
1583 case 4: bounds |= 0x00000300; break;
1584 case 2: bounds |= 0x00000100; break;
1585 default:
1586 WARN_ON(1);
1587 break;
1588 }
1589 bounds |= 0x00000001;
1590 }
1591
1592 if ((push = evo_wait(core, 2))) {
1593 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1594 evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
1595 else
1596 evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
1597 evo_data(push, bounds);
1598 evo_kick(push, core);
1599 }
1600 }
1601
1602 static void
1603 nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
1604 {
1605 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1606 u32 bounds = 0;
1607 u32 *push;
1608
1609 if (asyh->base.cpp) {
1610 switch (asyh->base.cpp) {
1611 case 8: bounds |= 0x00000500; break;
1612 case 4: bounds |= 0x00000300; break;
1613 case 2: bounds |= 0x00000100; break;
1614 case 1: bounds |= 0x00000000; break;
1615 default:
1616 WARN_ON(1);
1617 break;
1618 }
1619 bounds |= 0x00000001;
1620 }
1621
1622 if ((push = evo_wait(core, 2))) {
1623 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1624 evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
1625 else
1626 evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
1627 evo_data(push, bounds);
1628 evo_kick(push, core);
1629 }
1630 }
1631
1632 static void
1633 nv50_head_curs_clr(struct nv50_head *head)
1634 {
1635 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1636 u32 *push;
1637 if ((push = evo_wait(core, 4))) {
1638 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1639 evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
1640 evo_data(push, 0x05000000);
1641 } else
1642 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1643 evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
1644 evo_data(push, 0x05000000);
1645 evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
1646 evo_data(push, 0x00000000);
1647 } else {
1648 evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
1649 evo_data(push, 0x05000000);
1650 evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
1651 evo_data(push, 0x00000000);
1652 }
1653 evo_kick(push, core);
1654 }
1655 }
1656
1657 static void
1658 nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1659 {
1660 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1661 u32 *push;
1662 if ((push = evo_wait(core, 5))) {
1663 if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
1664 evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
1665 evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1666 (asyh->curs.format << 24));
1667 evo_data(push, asyh->curs.offset >> 8);
1668 } else
1669 if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
1670 evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
1671 evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1672 (asyh->curs.format << 24));
1673 evo_data(push, asyh->curs.offset >> 8);
1674 evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
1675 evo_data(push, asyh->curs.handle);
1676 } else {
1677 evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
1678 evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1679 (asyh->curs.format << 24));
1680 evo_data(push, asyh->curs.offset >> 8);
1681 evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
1682 evo_data(push, asyh->curs.handle);
1683 }
1684 evo_kick(push, core);
1685 }
1686 }
1687
1688 static void
1689 nv50_head_core_clr(struct nv50_head *head)
1690 {
1691 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1692 u32 *push;
1693 if ((push = evo_wait(core, 2))) {
1694 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1695 evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
1696 else
1697 evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
1698 evo_data(push, 0x00000000);
1699 evo_kick(push, core);
1700 }
1701 }
1702
1703 static void
1704 nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1705 {
1706 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1707 u32 *push;
1708 if ((push = evo_wait(core, 9))) {
1709 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1710 evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
1711 evo_data(push, asyh->core.offset >> 8);
1712 evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
1713 evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1714 evo_data(push, asyh->core.layout << 20 |
1715 (asyh->core.pitch >> 8) << 8 |
1716 asyh->core.block);
1717 evo_data(push, asyh->core.kind << 16 |
1718 asyh->core.format << 8);
1719 evo_data(push, asyh->core.handle);
1720 evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
1721 evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1722 /* EVO will complain with INVALID_STATE if we have an
1723 * active cursor and (re)specify HeadSetContextDmaIso
1724 * without also updating HeadSetOffsetCursor.
1725 */
1726 asyh->set.curs = asyh->curs.visible;
1727 } else
1728 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1729 evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
1730 evo_data(push, asyh->core.offset >> 8);
1731 evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
1732 evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1733 evo_data(push, asyh->core.layout << 20 |
1734 (asyh->core.pitch >> 8) << 8 |
1735 asyh->core.block);
1736 evo_data(push, asyh->core.format << 8);
1737 evo_data(push, asyh->core.handle);
1738 evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
1739 evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1740 } else {
1741 evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
1742 evo_data(push, asyh->core.offset >> 8);
1743 evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
1744 evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1745 evo_data(push, asyh->core.layout << 24 |
1746 (asyh->core.pitch >> 8) << 8 |
1747 asyh->core.block);
1748 evo_data(push, asyh->core.format << 8);
1749 evo_data(push, asyh->core.handle);
1750 evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
1751 evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1752 }
1753 evo_kick(push, core);
1754 }
1755 }
1756
1757 static void
1758 nv50_head_lut_clr(struct nv50_head *head)
1759 {
1760 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1761 u32 *push;
1762 if ((push = evo_wait(core, 4))) {
1763 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1764 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
1765 evo_data(push, 0x40000000);
1766 } else
1767 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1768 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
1769 evo_data(push, 0x40000000);
1770 evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1771 evo_data(push, 0x00000000);
1772 } else {
1773 evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
1774 evo_data(push, 0x03000000);
1775 evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
1776 evo_data(push, 0x00000000);
1777 }
1778 evo_kick(push, core);
1779 }
1780 }
1781
1782 static void
1783 nv50_head_lut_load(struct drm_property_blob *blob, int mode,
1784 struct nouveau_bo *nvbo)
1785 {
1786 struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
1787 void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
1788 const int size = blob->length / sizeof(*in);
1789 int bits, shift, i;
1790 u16 zero, r, g, b;
1791
1792 /* This can't happen.. But it shuts the compiler up. */
1793 if (WARN_ON(size != 256))
1794 return;
1795
1796 switch (mode) {
1797 case 0: /* LORES. */
1798 case 1: /* HIRES. */
1799 bits = 11;
1800 shift = 3;
1801 zero = 0x0000;
1802 break;
1803 case 7: /* INTERPOLATE_257_UNITY_RANGE. */
1804 bits = 14;
1805 shift = 0;
1806 zero = 0x6000;
1807 break;
1808 default:
1809 WARN_ON(1);
1810 return;
1811 }
1812
1813 for (i = 0; i < size; i++) {
1814 r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift;
1815 g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
1816 b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
1817 writew(r, lut + (i * 0x08) + 0);
1818 writew(g, lut + (i * 0x08) + 2);
1819 writew(b, lut + (i * 0x08) + 4);
1820 }
1821
1822 /* INTERPOLATE modes require a "next" entry to interpolate with,
1823 * so we replicate the last entry to deal with this for now.
1824 */
1825 writew(r, lut + (i * 0x08) + 0);
1826 writew(g, lut + (i * 0x08) + 2);
1827 writew(b, lut + (i * 0x08) + 4);
1828 }
1829
1830 static void
1831 nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1832 {
1833 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1834 u32 *push;
1835 if ((push = evo_wait(core, 7))) {
1836 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1837 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1838 evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1839 evo_data(push, asyh->lut.offset >> 8);
1840 } else
1841 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1842 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1843 evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1844 evo_data(push, asyh->lut.offset >> 8);
1845 evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1846 evo_data(push, asyh->lut.handle);
1847 } else {
1848 evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
1849 evo_data(push, 0x80000000 | asyh->lut.mode << 24);
1850 evo_data(push, asyh->lut.offset >> 8);
1851 evo_data(push, 0x00000000);
1852 evo_data(push, 0x00000000);
1853 evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
1854 evo_data(push, asyh->lut.handle);
1855 }
1856 evo_kick(push, core);
1857 }
1858 }
1859
1860 static void
1861 nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
1862 {
1863 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1864 struct nv50_head_mode *m = &asyh->mode;
1865 u32 *push;
1866 if ((push = evo_wait(core, 14))) {
1867 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1868 evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
1869 evo_data(push, 0x00800000 | m->clock);
1870 evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
1871 evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
1872 evo_data(push, 0x00000000);
1873 evo_data(push, (m->v.active << 16) | m->h.active );
1874 evo_data(push, (m->v.synce << 16) | m->h.synce );
1875 evo_data(push, (m->v.blanke << 16) | m->h.blanke );
1876 evo_data(push, (m->v.blanks << 16) | m->h.blanks );
1877 evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
1878 evo_data(push, asyh->mode.v.blankus);
1879 evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
1880 evo_data(push, 0x00000000);
1881 } else {
1882 evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
1883 evo_data(push, 0x00000000);
1884 evo_data(push, (m->v.active << 16) | m->h.active );
1885 evo_data(push, (m->v.synce << 16) | m->h.synce );
1886 evo_data(push, (m->v.blanke << 16) | m->h.blanke );
1887 evo_data(push, (m->v.blanks << 16) | m->h.blanks );
1888 evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
1889 evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
1890 evo_data(push, 0x00000000); /* ??? */
1891 evo_data(push, 0xffffff00);
1892 evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
1893 evo_data(push, m->clock * 1000);
1894 evo_data(push, 0x00200000); /* ??? */
1895 evo_data(push, m->clock * 1000);
1896 }
1897 evo_kick(push, core);
1898 }
1899 }
1900
1901 static void
1902 nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
1903 {
1904 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1905 u32 *push;
1906 if ((push = evo_wait(core, 10))) {
1907 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1908 evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
1909 evo_data(push, 0x00000000);
1910 evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
1911 evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
1912 evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
1913 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1914 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1915 } else {
1916 evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
1917 evo_data(push, 0x00000000);
1918 evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
1919 evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
1920 evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
1921 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1922 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1923 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1924 }
1925 evo_kick(push, core);
1926 }
1927 }
1928
1929 static void
1930 nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
1931 {
1932 if (asyh->clr.ilut && (!asyh->set.ilut || y))
1933 nv50_head_lut_clr(head);
1934 if (asyh->clr.core && (!asyh->set.core || y))
1935 nv50_head_core_clr(head);
1936 if (asyh->clr.curs && (!asyh->set.curs || y))
1937 nv50_head_curs_clr(head);
1938 }
1939
1940 static void
1941 nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1942 {
1943 if (asyh->set.view ) nv50_head_view (head, asyh);
1944 if (asyh->set.mode ) nv50_head_mode (head, asyh);
1945 if (asyh->set.ilut ) {
1946 struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next];
1947 struct drm_property_blob *blob = asyh->state.gamma_lut;
1948 if (blob)
1949 nv50_head_lut_load(blob, asyh->lut.mode, nvbo);
1950 asyh->lut.offset = nvbo->bo.offset;
1951 head->lut.next ^= 1;
1952 nv50_head_lut_set(head, asyh);
1953 }
1954 if (asyh->set.core ) nv50_head_core_set(head, asyh);
1955 if (asyh->set.curs ) nv50_head_curs_set(head, asyh);
1956 if (asyh->set.base ) nv50_head_base (head, asyh);
1957 if (asyh->set.ovly ) nv50_head_ovly (head, asyh);
1958 if (asyh->set.dither ) nv50_head_dither (head, asyh);
1959 if (asyh->set.procamp) nv50_head_procamp (head, asyh);
1960 }
1961
1962 static void
1963 nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
1964 struct nv50_head_atom *asyh,
1965 struct nouveau_conn_atom *asyc)
1966 {
1967 const int vib = asyc->procamp.color_vibrance - 100;
1968 const int hue = asyc->procamp.vibrant_hue - 90;
1969 const int adj = (vib > 0) ? 50 : 0;
1970 asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
1971 asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
1972 asyh->set.procamp = true;
1973 }
1974
1975 static void
1976 nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
1977 struct nv50_head_atom *asyh,
1978 struct nouveau_conn_atom *asyc)
1979 {
1980 struct drm_connector *connector = asyc->state.connector;
1981 u32 mode = 0x00;
1982
1983 if (asyc->dither.mode == DITHERING_MODE_AUTO) {
1984 if (asyh->base.depth > connector->display_info.bpc * 3)
1985 mode = DITHERING_MODE_DYNAMIC2X2;
1986 } else {
1987 mode = asyc->dither.mode;
1988 }
1989
1990 if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
1991 if (connector->display_info.bpc >= 8)
1992 mode |= DITHERING_DEPTH_8BPC;
1993 } else {
1994 mode |= asyc->dither.depth;
1995 }
1996
1997 asyh->dither.enable = mode;
1998 asyh->dither.bits = mode >> 1;
1999 asyh->dither.mode = mode >> 3;
2000 asyh->set.dither = true;
2001 }
2002
2003 static void
2004 nv50_head_atomic_check_view(struct nv50_head_atom *armh,
2005 struct nv50_head_atom *asyh,
2006 struct nouveau_conn_atom *asyc)
2007 {
2008 struct drm_connector *connector = asyc->state.connector;
2009 struct drm_display_mode *omode = &asyh->state.adjusted_mode;
2010 struct drm_display_mode *umode = &asyh->state.mode;
2011 int mode = asyc->scaler.mode;
2012 struct edid *edid;
2013 int umode_vdisplay, omode_hdisplay, omode_vdisplay;
2014
2015 if (connector->edid_blob_ptr)
2016 edid = (struct edid *)connector->edid_blob_ptr->data;
2017 else
2018 edid = NULL;
2019
2020 if (!asyc->scaler.full) {
2021 if (mode == DRM_MODE_SCALE_NONE)
2022 omode = umode;
2023 } else {
2024 /* Non-EDID LVDS/eDP mode. */
2025 mode = DRM_MODE_SCALE_FULLSCREEN;
2026 }
2027
2028 /* For the user-specified mode, we must ignore doublescan and
2029 * the like, but honor frame packing.
2030 */
2031 umode_vdisplay = umode->vdisplay;
2032 if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
2033 umode_vdisplay += umode->vtotal;
2034 asyh->view.iW = umode->hdisplay;
2035 asyh->view.iH = umode_vdisplay;
2036 /* For the output mode, we can just use the stock helper. */
2037 drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay);
2038 asyh->view.oW = omode_hdisplay;
2039 asyh->view.oH = omode_vdisplay;
2040
2041 /* Add overscan compensation if necessary, will keep the aspect
2042 * ratio the same as the backend mode unless overridden by the
2043 * user setting both hborder and vborder properties.
2044 */
2045 if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
2046 (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
2047 drm_detect_hdmi_monitor(edid)))) {
2048 u32 bX = asyc->scaler.underscan.hborder;
2049 u32 bY = asyc->scaler.underscan.vborder;
2050 u32 r = (asyh->view.oH << 19) / asyh->view.oW;
2051
2052 if (bX) {
2053 asyh->view.oW -= (bX * 2);
2054 if (bY) asyh->view.oH -= (bY * 2);
2055 else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2056 } else {
2057 asyh->view.oW -= (asyh->view.oW >> 4) + 32;
2058 if (bY) asyh->view.oH -= (bY * 2);
2059 else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2060 }
2061 }
2062
2063 /* Handle CENTER/ASPECT scaling, taking into account the areas
2064 * removed already for overscan compensation.
2065 */
2066 switch (mode) {
2067 case DRM_MODE_SCALE_CENTER:
2068 asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
2069 asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
2070 /* fall-through */
2071 case DRM_MODE_SCALE_ASPECT:
2072 if (asyh->view.oH < asyh->view.oW) {
2073 u32 r = (asyh->view.iW << 19) / asyh->view.iH;
2074 asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
2075 } else {
2076 u32 r = (asyh->view.iH << 19) / asyh->view.iW;
2077 asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2078 }
2079 break;
2080 default:
2081 break;
2082 }
2083
2084 asyh->set.view = true;
2085 }
2086
2087 static void
2088 nv50_head_atomic_check_lut(struct nv50_head *head,
2089 struct nv50_head_atom *armh,
2090 struct nv50_head_atom *asyh)
2091 {
2092 struct nv50_disp *disp = nv50_disp(head->base.base.dev);
2093
2094 /* An I8 surface without an input LUT makes no sense, and
2095 * EVO will throw an error if you try.
2096 *
2097 * Legacy clients actually cause this due to the order in
2098 * which they call ioctls, so we will enable the LUT with
2099 * whatever contents the buffer already contains to avoid
2100 * triggering the error check.
2101 */
2102 if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
2103 asyh->lut.handle = 0;
2104 asyh->clr.ilut = armh->lut.visible;
2105 return;
2106 }
2107
2108 if (disp->disp->object.oclass < GF110_DISP) {
2109 asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1;
2110 asyh->set.ilut = true;
2111 } else {
2112 asyh->lut.mode = 7;
2113 asyh->set.ilut = asyh->state.color_mgmt_changed;
2114 }
2115 asyh->lut.handle = disp->mast.base.vram.handle;
2116 }
2117
2118 static void
2119 nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2120 {
2121 struct drm_display_mode *mode = &asyh->state.adjusted_mode;
2122 struct nv50_head_mode *m = &asyh->mode;
2123 u32 blankus;
2124
2125 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE);
2126
2127 /*
2128 * DRM modes are defined in terms of a repeating interval
2129 * starting with the active display area. The hardware modes
2130 * are defined in terms of a repeating interval starting one
2131 * unit (pixel or line) into the sync pulse. So, add bias.
2132 */
2133
2134 m->h.active = mode->crtc_htotal;
2135 m->h.synce = mode->crtc_hsync_end - mode->crtc_hsync_start - 1;
2136 m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1;
2137 m->h.blanks = m->h.blanke + mode->crtc_hdisplay;
2138
2139 m->v.active = mode->crtc_vtotal;
2140 m->v.synce = mode->crtc_vsync_end - mode->crtc_vsync_start - 1;
2141 m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1;
2142 m->v.blanks = m->v.blanke + mode->crtc_vdisplay;
2143
2144 /*XXX: Safe underestimate, even "0" works */
2145 blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active;
2146 blankus *= 1000;
2147 blankus /= mode->crtc_clock;
2148 m->v.blankus = blankus;
2149
2150 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
2151 m->v.blank2e = m->v.active + m->v.blanke;
2152 m->v.blank2s = m->v.blank2e + mode->crtc_vdisplay;
2153 m->v.active = (m->v.active * 2) + 1;
2154 m->interlace = true;
2155 } else {
2156 m->v.blank2e = 0;
2157 m->v.blank2s = 1;
2158 m->interlace = false;
2159 }
2160 m->clock = mode->crtc_clock;
2161
2162 asyh->set.mode = true;
2163 }
2164
2165 static int
2166 nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2167 {
2168 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
2169 struct nv50_disp *disp = nv50_disp(crtc->dev);
2170 struct nv50_head *head = nv50_head(crtc);
2171 struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
2172 struct nv50_head_atom *asyh = nv50_head_atom(state);
2173 struct nouveau_conn_atom *asyc = NULL;
2174 struct drm_connector_state *conns;
2175 struct drm_connector *conn;
2176 int i;
2177
2178 NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
2179 if (asyh->state.active) {
2180 for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
2181 if (conns->crtc == crtc) {
2182 asyc = nouveau_conn_atom(conns);
2183 break;
2184 }
2185 }
2186
2187 if (armh->state.active) {
2188 if (asyc) {
2189 if (asyh->state.mode_changed)
2190 asyc->set.scaler = true;
2191 if (armh->base.depth != asyh->base.depth)
2192 asyc->set.dither = true;
2193 }
2194 } else {
2195 if (asyc)
2196 asyc->set.mask = ~0;
2197 asyh->set.mask = ~0;
2198 }
2199
2200 if (asyh->state.mode_changed)
2201 nv50_head_atomic_check_mode(head, asyh);
2202
2203 if (asyh->state.color_mgmt_changed ||
2204 asyh->base.cpp != armh->base.cpp)
2205 nv50_head_atomic_check_lut(head, armh, asyh);
2206 asyh->lut.visible = asyh->lut.handle != 0;
2207
2208 if (asyc) {
2209 if (asyc->set.scaler)
2210 nv50_head_atomic_check_view(armh, asyh, asyc);
2211 if (asyc->set.dither)
2212 nv50_head_atomic_check_dither(armh, asyh, asyc);
2213 if (asyc->set.procamp)
2214 nv50_head_atomic_check_procamp(armh, asyh, asyc);
2215 }
2216
2217 if ((asyh->core.visible = (asyh->base.cpp != 0))) {
2218 asyh->core.x = asyh->base.x;
2219 asyh->core.y = asyh->base.y;
2220 asyh->core.w = asyh->base.w;
2221 asyh->core.h = asyh->base.h;
2222 } else
2223 if ((asyh->core.visible = asyh->curs.visible) ||
2224 (asyh->core.visible = asyh->lut.visible)) {
2225 /*XXX: We need to either find some way of having the
2226 * primary base layer appear black, while still
2227 * being able to display the other layers, or we
2228 * need to allocate a dummy black surface here.
2229 */
2230 asyh->core.x = 0;
2231 asyh->core.y = 0;
2232 asyh->core.w = asyh->state.mode.hdisplay;
2233 asyh->core.h = asyh->state.mode.vdisplay;
2234 }
2235 asyh->core.handle = disp->mast.base.vram.handle;
2236 asyh->core.offset = 0;
2237 asyh->core.format = 0xcf;
2238 asyh->core.kind = 0;
2239 asyh->core.layout = 1;
2240 asyh->core.block = 0;
2241 asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
2242 asyh->set.base = armh->base.cpp != asyh->base.cpp;
2243 asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
2244 } else {
2245 asyh->lut.visible = false;
2246 asyh->core.visible = false;
2247 asyh->curs.visible = false;
2248 asyh->base.cpp = 0;
2249 asyh->ovly.cpp = 0;
2250 }
2251
2252 if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
2253 if (asyh->core.visible) {
2254 if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
2255 asyh->set.core = true;
2256 } else
2257 if (armh->core.visible) {
2258 asyh->clr.core = true;
2259 }
2260
2261 if (asyh->curs.visible) {
2262 if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
2263 asyh->set.curs = true;
2264 } else
2265 if (armh->curs.visible) {
2266 asyh->clr.curs = true;
2267 }
2268 } else {
2269 asyh->clr.ilut = armh->lut.visible;
2270 asyh->clr.core = armh->core.visible;
2271 asyh->clr.curs = armh->curs.visible;
2272 asyh->set.ilut = asyh->lut.visible;
2273 asyh->set.core = asyh->core.visible;
2274 asyh->set.curs = asyh->curs.visible;
2275 }
2276
2277 if (asyh->clr.mask || asyh->set.mask)
2278 nv50_atom(asyh->state.state)->lock_core = true;
2279 return 0;
2280 }
2281
2282 static const struct drm_crtc_helper_funcs
2283 nv50_head_help = {
2284 .atomic_check = nv50_head_atomic_check,
2285 };
2286
2287 static void
2288 nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
2289 struct drm_crtc_state *state)
2290 {
2291 struct nv50_head_atom *asyh = nv50_head_atom(state);
2292 __drm_atomic_helper_crtc_destroy_state(&asyh->state);
2293 kfree(asyh);
2294 }
2295
2296 static struct drm_crtc_state *
2297 nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
2298 {
2299 struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
2300 struct nv50_head_atom *asyh;
2301 if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
2302 return NULL;
2303 __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
2304 asyh->view = armh->view;
2305 asyh->mode = armh->mode;
2306 asyh->lut = armh->lut;
2307 asyh->core = armh->core;
2308 asyh->curs = armh->curs;
2309 asyh->base = armh->base;
2310 asyh->ovly = armh->ovly;
2311 asyh->dither = armh->dither;
2312 asyh->procamp = armh->procamp;
2313 asyh->clr.mask = 0;
2314 asyh->set.mask = 0;
2315 return &asyh->state;
2316 }
2317
2318 static void
2319 __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
2320 struct drm_crtc_state *state)
2321 {
2322 if (crtc->state)
2323 crtc->funcs->atomic_destroy_state(crtc, crtc->state);
2324 crtc->state = state;
2325 crtc->state->crtc = crtc;
2326 }
2327
2328 static void
2329 nv50_head_reset(struct drm_crtc *crtc)
2330 {
2331 struct nv50_head_atom *asyh;
2332
2333 if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
2334 return;
2335
2336 __drm_atomic_helper_crtc_reset(crtc, &asyh->state);
2337 }
2338
2339 static void
2340 nv50_head_destroy(struct drm_crtc *crtc)
2341 {
2342 struct nv50_head *head = nv50_head(crtc);
2343 int i;
2344
2345 nv50_dmac_destroy(&head->ovly.base);
2346 nv50_pioc_destroy(&head->oimm.base);
2347
2348 for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
2349 nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
2350
2351 drm_crtc_cleanup(crtc);
2352 kfree(crtc);
2353 }
2354
2355 static const struct drm_crtc_funcs
2356 nv50_head_func = {
2357 .reset = nv50_head_reset,
2358 .gamma_set = drm_atomic_helper_legacy_gamma_set,
2359 .destroy = nv50_head_destroy,
2360 .set_config = drm_atomic_helper_set_config,
2361 .page_flip = drm_atomic_helper_page_flip,
2362 .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
2363 .atomic_destroy_state = nv50_head_atomic_destroy_state,
2364 };
2365
2366 static int
2367 nv50_head_create(struct drm_device *dev, int index)
2368 {
2369 struct nouveau_drm *drm = nouveau_drm(dev);
2370 struct nvif_device *device = &drm->client.device;
2371 struct nv50_disp *disp = nv50_disp(dev);
2372 struct nv50_head *head;
2373 struct nv50_base *base;
2374 struct nv50_curs *curs;
2375 struct drm_crtc *crtc;
2376 int ret, i;
2377
2378 head = kzalloc(sizeof(*head), GFP_KERNEL);
2379 if (!head)
2380 return -ENOMEM;
2381
2382 head->base.index = index;
2383 ret = nv50_base_new(drm, head, &base);
2384 if (ret == 0)
2385 ret = nv50_curs_new(drm, head, &curs);
2386 if (ret) {
2387 kfree(head);
2388 return ret;
2389 }
2390
2391 crtc = &head->base.base;
2392 drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane,
2393 &curs->wndw.plane, &nv50_head_func,
2394 "head-%d", head->base.index);
2395 drm_crtc_helper_add(crtc, &nv50_head_help);
2396 drm_mode_crtc_set_gamma_size(crtc, 256);
2397
2398 for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) {
2399 ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
2400 TTM_PL_FLAG_VRAM,
2401 &head->lut.nvbo[i]);
2402 if (ret)
2403 goto out;
2404 }
2405
2406 /* allocate overlay resources */
2407 ret = nv50_oimm_create(device, &disp->disp->object, index, &head->oimm);
2408 if (ret)
2409 goto out;
2410
2411 ret = nv50_ovly_create(device, &disp->disp->object, index,
2412 disp->sync->bo.offset, &head->ovly);
2413 if (ret)
2414 goto out;
2415
2416 out:
2417 if (ret)
2418 nv50_head_destroy(crtc);
2419 return ret;
2420 }
2421
2422 /******************************************************************************
2423 * Output path helpers
2424 *****************************************************************************/
2425 static void
2426 nv50_outp_release(struct nouveau_encoder *nv_encoder)
2427 {
2428 struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
2429 struct {
2430 struct nv50_disp_mthd_v1 base;
2431 } args = {
2432 .base.version = 1,
2433 .base.method = NV50_DISP_MTHD_V1_RELEASE,
2434 .base.hasht = nv_encoder->dcb->hasht,
2435 .base.hashm = nv_encoder->dcb->hashm,
2436 };
2437
2438 nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
2439 nv_encoder->or = -1;
2440 nv_encoder->link = 0;
2441 }
2442
2443 static int
2444 nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
2445 {
2446 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
2447 struct nv50_disp *disp = nv50_disp(drm->dev);
2448 struct {
2449 struct nv50_disp_mthd_v1 base;
2450 struct nv50_disp_acquire_v0 info;
2451 } args = {
2452 .base.version = 1,
2453 .base.method = NV50_DISP_MTHD_V1_ACQUIRE,
2454 .base.hasht = nv_encoder->dcb->hasht,
2455 .base.hashm = nv_encoder->dcb->hashm,
2456 };
2457 int ret;
2458
2459 ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
2460 if (ret) {
2461 NV_ERROR(drm, "error acquiring output path: %d\n", ret);
2462 return ret;
2463 }
2464
2465 nv_encoder->or = args.info.or;
2466 nv_encoder->link = args.info.link;
2467 return 0;
2468 }
2469
2470 static int
2471 nv50_outp_atomic_check_view(struct drm_encoder *encoder,
2472 struct drm_crtc_state *crtc_state,
2473 struct drm_connector_state *conn_state,
2474 struct drm_display_mode *native_mode)
2475 {
2476 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
2477 struct drm_display_mode *mode = &crtc_state->mode;
2478 struct drm_connector *connector = conn_state->connector;
2479 struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
2480 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
2481
2482 NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
2483 asyc->scaler.full = false;
2484 if (!native_mode)
2485 return 0;
2486
2487 if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
2488 switch (connector->connector_type) {
2489 case DRM_MODE_CONNECTOR_LVDS:
2490 case DRM_MODE_CONNECTOR_eDP:
2491 /* Force use of scaler for non-EDID modes. */
2492 if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
2493 break;
2494 mode = native_mode;
2495 asyc->scaler.full = true;
2496 break;
2497 default:
2498 break;
2499 }
2500 } else {
2501 mode = native_mode;
2502 }
2503
2504 if (!drm_mode_equal(adjusted_mode, mode)) {
2505 drm_mode_copy(adjusted_mode, mode);
2506 crtc_state->mode_changed = true;
2507 }
2508
2509 return 0;
2510 }
2511
2512 static int
2513 nv50_outp_atomic_check(struct drm_encoder *encoder,
2514 struct drm_crtc_state *crtc_state,
2515 struct drm_connector_state *conn_state)
2516 {
2517 struct nouveau_connector *nv_connector =
2518 nouveau_connector(conn_state->connector);
2519 return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
2520 nv_connector->native_mode);
2521 }
2522
2523 /******************************************************************************
2524 * DAC
2525 *****************************************************************************/
2526 static void
2527 nv50_dac_disable(struct drm_encoder *encoder)
2528 {
2529 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2530 struct nv50_mast *mast = nv50_mast(encoder->dev);
2531 const int or = nv_encoder->or;
2532 u32 *push;
2533
2534 if (nv_encoder->crtc) {
2535 push = evo_wait(mast, 4);
2536 if (push) {
2537 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2538 evo_mthd(push, 0x0400 + (or * 0x080), 1);
2539 evo_data(push, 0x00000000);
2540 } else {
2541 evo_mthd(push, 0x0180 + (or * 0x020), 1);
2542 evo_data(push, 0x00000000);
2543 }
2544 evo_kick(push, mast);
2545 }
2546 }
2547
2548 nv_encoder->crtc = NULL;
2549 nv50_outp_release(nv_encoder);
2550 }
2551
2552 static void
2553 nv50_dac_enable(struct drm_encoder *encoder)
2554 {
2555 struct nv50_mast *mast = nv50_mast(encoder->dev);
2556 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2557 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2558 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
2559 u32 *push;
2560
2561 nv50_outp_acquire(nv_encoder);
2562
2563 push = evo_wait(mast, 8);
2564 if (push) {
2565 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2566 u32 syncs = 0x00000000;
2567
2568 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2569 syncs |= 0x00000001;
2570 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2571 syncs |= 0x00000002;
2572
2573 evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
2574 evo_data(push, 1 << nv_crtc->index);
2575 evo_data(push, syncs);
2576 } else {
2577 u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
2578 u32 syncs = 0x00000001;
2579
2580 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2581 syncs |= 0x00000008;
2582 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2583 syncs |= 0x00000010;
2584
2585 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2586 magic |= 0x00000001;
2587
2588 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
2589 evo_data(push, syncs);
2590 evo_data(push, magic);
2591 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
2592 evo_data(push, 1 << nv_crtc->index);
2593 }
2594
2595 evo_kick(push, mast);
2596 }
2597
2598 nv_encoder->crtc = encoder->crtc;
2599 }
2600
2601 static enum drm_connector_status
2602 nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
2603 {
2604 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2605 struct nv50_disp *disp = nv50_disp(encoder->dev);
2606 struct {
2607 struct nv50_disp_mthd_v1 base;
2608 struct nv50_disp_dac_load_v0 load;
2609 } args = {
2610 .base.version = 1,
2611 .base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
2612 .base.hasht = nv_encoder->dcb->hasht,
2613 .base.hashm = nv_encoder->dcb->hashm,
2614 };
2615 int ret;
2616
2617 args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
2618 if (args.load.data == 0)
2619 args.load.data = 340;
2620
2621 ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
2622 if (ret || !args.load.load)
2623 return connector_status_disconnected;
2624
2625 return connector_status_connected;
2626 }
2627
2628 static const struct drm_encoder_helper_funcs
2629 nv50_dac_help = {
2630 .atomic_check = nv50_outp_atomic_check,
2631 .enable = nv50_dac_enable,
2632 .disable = nv50_dac_disable,
2633 .detect = nv50_dac_detect
2634 };
2635
2636 static void
2637 nv50_dac_destroy(struct drm_encoder *encoder)
2638 {
2639 drm_encoder_cleanup(encoder);
2640 kfree(encoder);
2641 }
2642
2643 static const struct drm_encoder_funcs
2644 nv50_dac_func = {
2645 .destroy = nv50_dac_destroy,
2646 };
2647
2648 static int
2649 nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
2650 {
2651 struct nouveau_drm *drm = nouveau_drm(connector->dev);
2652 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
2653 struct nvkm_i2c_bus *bus;
2654 struct nouveau_encoder *nv_encoder;
2655 struct drm_encoder *encoder;
2656 int type = DRM_MODE_ENCODER_DAC;
2657
2658 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
2659 if (!nv_encoder)
2660 return -ENOMEM;
2661 nv_encoder->dcb = dcbe;
2662
2663 bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
2664 if (bus)
2665 nv_encoder->i2c = &bus->i2c;
2666
2667 encoder = to_drm_encoder(nv_encoder);
2668 encoder->possible_crtcs = dcbe->heads;
2669 encoder->possible_clones = 0;
2670 drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
2671 "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
2672 drm_encoder_helper_add(encoder, &nv50_dac_help);
2673
2674 drm_mode_connector_attach_encoder(connector, encoder);
2675 return 0;
2676 }
2677
2678 /******************************************************************************
2679 * Audio
2680 *****************************************************************************/
2681 static void
2682 nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2683 {
2684 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2685 struct nv50_disp *disp = nv50_disp(encoder->dev);
2686 struct {
2687 struct nv50_disp_mthd_v1 base;
2688 struct nv50_disp_sor_hda_eld_v0 eld;
2689 } args = {
2690 .base.version = 1,
2691 .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
2692 .base.hasht = nv_encoder->dcb->hasht,
2693 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2694 (0x0100 << nv_crtc->index),
2695 };
2696
2697 nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
2698 }
2699
2700 static void
2701 nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
2702 {
2703 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2704 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2705 struct nouveau_connector *nv_connector;
2706 struct nv50_disp *disp = nv50_disp(encoder->dev);
2707 struct __packed {
2708 struct {
2709 struct nv50_disp_mthd_v1 mthd;
2710 struct nv50_disp_sor_hda_eld_v0 eld;
2711 } base;
2712 u8 data[sizeof(nv_connector->base.eld)];
2713 } args = {
2714 .base.mthd.version = 1,
2715 .base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
2716 .base.mthd.hasht = nv_encoder->dcb->hasht,
2717 .base.mthd.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2718 (0x0100 << nv_crtc->index),
2719 };
2720
2721 nv_connector = nouveau_encoder_connector_get(nv_encoder);
2722 if (!drm_detect_monitor_audio(nv_connector->edid))
2723 return;
2724
2725 memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
2726
2727 nvif_mthd(&disp->disp->object, 0, &args,
2728 sizeof(args.base) + drm_eld_size(args.data));
2729 }
2730
2731 /******************************************************************************
2732 * HDMI
2733 *****************************************************************************/
2734 static void
2735 nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2736 {
2737 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2738 struct nv50_disp *disp = nv50_disp(encoder->dev);
2739 struct {
2740 struct nv50_disp_mthd_v1 base;
2741 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
2742 } args = {
2743 .base.version = 1,
2744 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
2745 .base.hasht = nv_encoder->dcb->hasht,
2746 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2747 (0x0100 << nv_crtc->index),
2748 };
2749
2750 nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
2751 }
2752
2753 static void
2754 nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
2755 {
2756 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2757 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2758 struct nv50_disp *disp = nv50_disp(encoder->dev);
2759 struct {
2760 struct nv50_disp_mthd_v1 base;
2761 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
2762 u8 infoframes[2 * 17]; /* two frames, up to 17 bytes each */
2763 } args = {
2764 .base.version = 1,
2765 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
2766 .base.hasht = nv_encoder->dcb->hasht,
2767 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2768 (0x0100 << nv_crtc->index),
2769 .pwr.state = 1,
2770 .pwr.rekey = 56, /* binary driver, and tegra, constant */
2771 };
2772 struct nouveau_connector *nv_connector;
2773 u32 max_ac_packet;
2774 union hdmi_infoframe avi_frame;
2775 union hdmi_infoframe vendor_frame;
2776 int ret;
2777 int size;
2778
2779 nv_connector = nouveau_encoder_connector_get(nv_encoder);
2780 if (!drm_detect_hdmi_monitor(nv_connector->edid))
2781 return;
2782
2783 ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode,
2784 false);
2785 if (!ret) {
2786 /* We have an AVI InfoFrame, populate it to the display */
2787 args.pwr.avi_infoframe_length
2788 = hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
2789 }
2790
2791 ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
2792 &nv_connector->base, mode);
2793 if (!ret) {
2794 /* We have a Vendor InfoFrame, populate it to the display */
2795 args.pwr.vendor_infoframe_length
2796 = hdmi_infoframe_pack(&vendor_frame,
2797 args.infoframes
2798 + args.pwr.avi_infoframe_length,
2799 17);
2800 }
2801
2802 max_ac_packet = mode->htotal - mode->hdisplay;
2803 max_ac_packet -= args.pwr.rekey;
2804 max_ac_packet -= 18; /* constant from tegra */
2805 args.pwr.max_ac_packet = max_ac_packet / 32;
2806
2807 size = sizeof(args.base)
2808 + sizeof(args.pwr)
2809 + args.pwr.avi_infoframe_length
2810 + args.pwr.vendor_infoframe_length;
2811 nvif_mthd(&disp->disp->object, 0, &args, size);
2812 nv50_audio_enable(encoder, mode);
2813 }
2814
2815 /******************************************************************************
2816 * MST
2817 *****************************************************************************/
2818 #define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
2819 #define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
2820 #define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
2821
2822 struct nv50_mstm {
2823 struct nouveau_encoder *outp;
2824
2825 struct drm_dp_mst_topology_mgr mgr;
2826 struct nv50_msto *msto[4];
2827
2828 bool modified;
2829 bool disabled;
2830 int links;
2831 };
2832
2833 struct nv50_mstc {
2834 struct nv50_mstm *mstm;
2835 struct drm_dp_mst_port *port;
2836 struct drm_connector connector;
2837
2838 struct drm_display_mode *native;
2839 struct edid *edid;
2840
2841 int pbn;
2842 };
2843
2844 struct nv50_msto {
2845 struct drm_encoder encoder;
2846
2847 struct nv50_head *head;
2848 struct nv50_mstc *mstc;
2849 bool disabled;
2850 };
2851
2852 static struct drm_dp_payload *
2853 nv50_msto_payload(struct nv50_msto *msto)
2854 {
2855 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2856 struct nv50_mstc *mstc = msto->mstc;
2857 struct nv50_mstm *mstm = mstc->mstm;
2858 int vcpi = mstc->port->vcpi.vcpi, i;
2859
2860 NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
2861 for (i = 0; i < mstm->mgr.max_payloads; i++) {
2862 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
2863 NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
2864 mstm->outp->base.base.name, i, payload->vcpi,
2865 payload->start_slot, payload->num_slots);
2866 }
2867
2868 for (i = 0; i < mstm->mgr.max_payloads; i++) {
2869 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
2870 if (payload->vcpi == vcpi)
2871 return payload;
2872 }
2873
2874 return NULL;
2875 }
2876
2877 static void
2878 nv50_msto_cleanup(struct nv50_msto *msto)
2879 {
2880 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2881 struct nv50_mstc *mstc = msto->mstc;
2882 struct nv50_mstm *mstm = mstc->mstm;
2883
2884 NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
2885 if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
2886 drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
2887 if (msto->disabled) {
2888 msto->mstc = NULL;
2889 msto->head = NULL;
2890 msto->disabled = false;
2891 }
2892 }
2893
2894 static void
2895 nv50_msto_prepare(struct nv50_msto *msto)
2896 {
2897 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2898 struct nv50_mstc *mstc = msto->mstc;
2899 struct nv50_mstm *mstm = mstc->mstm;
2900 struct {
2901 struct nv50_disp_mthd_v1 base;
2902 struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
2903 } args = {
2904 .base.version = 1,
2905 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
2906 .base.hasht = mstm->outp->dcb->hasht,
2907 .base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
2908 (0x0100 << msto->head->base.index),
2909 };
2910
2911 NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
2912 if (mstc->port && mstc->port->vcpi.vcpi > 0) {
2913 struct drm_dp_payload *payload = nv50_msto_payload(msto);
2914 if (payload) {
2915 args.vcpi.start_slot = payload->start_slot;
2916 args.vcpi.num_slots = payload->num_slots;
2917 args.vcpi.pbn = mstc->port->vcpi.pbn;
2918 args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
2919 }
2920 }
2921
2922 NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
2923 msto->encoder.name, msto->head->base.base.name,
2924 args.vcpi.start_slot, args.vcpi.num_slots,
2925 args.vcpi.pbn, args.vcpi.aligned_pbn);
2926 nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
2927 }
2928
2929 static int
2930 nv50_msto_atomic_check(struct drm_encoder *encoder,
2931 struct drm_crtc_state *crtc_state,
2932 struct drm_connector_state *conn_state)
2933 {
2934 struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
2935 struct nv50_mstm *mstm = mstc->mstm;
2936 int bpp = conn_state->connector->display_info.bpc * 3;
2937 int slots;
2938
2939 mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
2940
2941 slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
2942 if (slots < 0)
2943 return slots;
2944
2945 return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
2946 mstc->native);
2947 }
2948
2949 static void
2950 nv50_msto_enable(struct drm_encoder *encoder)
2951 {
2952 struct nv50_head *head = nv50_head(encoder->crtc);
2953 struct nv50_msto *msto = nv50_msto(encoder);
2954 struct nv50_mstc *mstc = NULL;
2955 struct nv50_mstm *mstm = NULL;
2956 struct drm_connector *connector;
2957 struct drm_connector_list_iter conn_iter;
2958 u8 proto, depth;
2959 int slots;
2960 bool r;
2961
2962 drm_connector_list_iter_begin(encoder->dev, &conn_iter);
2963 drm_for_each_connector_iter(connector, &conn_iter) {
2964 if (connector->state->best_encoder == &msto->encoder) {
2965 mstc = nv50_mstc(connector);
2966 mstm = mstc->mstm;
2967 break;
2968 }
2969 }
2970 drm_connector_list_iter_end(&conn_iter);
2971
2972 if (WARN_ON(!mstc))
2973 return;
2974
2975 slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
2976 r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
2977 WARN_ON(!r);
2978
2979 if (!mstm->links++)
2980 nv50_outp_acquire(mstm->outp);
2981
2982 if (mstm->outp->link & 1)
2983 proto = 0x8;
2984 else
2985 proto = 0x9;
2986
2987 switch (mstc->connector.display_info.bpc) {
2988 case 6: depth = 0x2; break;
2989 case 8: depth = 0x5; break;
2990 case 10:
2991 default: depth = 0x6; break;
2992 }
2993
2994 mstm->outp->update(mstm->outp, head->base.index,
2995 &head->base.base.state->adjusted_mode, proto, depth);
2996
2997 msto->head = head;
2998 msto->mstc = mstc;
2999 mstm->modified = true;
3000 }
3001
3002 static void
3003 nv50_msto_disable(struct drm_encoder *encoder)
3004 {
3005 struct nv50_msto *msto = nv50_msto(encoder);
3006 struct nv50_mstc *mstc = msto->mstc;
3007 struct nv50_mstm *mstm = mstc->mstm;
3008
3009 if (mstc->port)
3010 drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
3011
3012 mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
3013 mstm->modified = true;
3014 if (!--mstm->links)
3015 mstm->disabled = true;
3016 msto->disabled = true;
3017 }
3018
3019 static const struct drm_encoder_helper_funcs
3020 nv50_msto_help = {
3021 .disable = nv50_msto_disable,
3022 .enable = nv50_msto_enable,
3023 .atomic_check = nv50_msto_atomic_check,
3024 };
3025
3026 static void
3027 nv50_msto_destroy(struct drm_encoder *encoder)
3028 {
3029 struct nv50_msto *msto = nv50_msto(encoder);
3030 drm_encoder_cleanup(&msto->encoder);
3031 kfree(msto);
3032 }
3033
3034 static const struct drm_encoder_funcs
3035 nv50_msto = {
3036 .destroy = nv50_msto_destroy,
3037 };
3038
3039 static int
3040 nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
3041 struct nv50_msto **pmsto)
3042 {
3043 struct nv50_msto *msto;
3044 int ret;
3045
3046 if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
3047 return -ENOMEM;
3048
3049 ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
3050 DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
3051 if (ret) {
3052 kfree(*pmsto);
3053 *pmsto = NULL;
3054 return ret;
3055 }
3056
3057 drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
3058 msto->encoder.possible_crtcs = heads;
3059 return 0;
3060 }
3061
3062 static struct drm_encoder *
3063 nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
3064 struct drm_connector_state *connector_state)
3065 {
3066 struct nv50_head *head = nv50_head(connector_state->crtc);
3067 struct nv50_mstc *mstc = nv50_mstc(connector);
3068 if (mstc->port) {
3069 struct nv50_mstm *mstm = mstc->mstm;
3070 return &mstm->msto[head->base.index]->encoder;
3071 }
3072 return NULL;
3073 }
3074
3075 static struct drm_encoder *
3076 nv50_mstc_best_encoder(struct drm_connector *connector)
3077 {
3078 struct nv50_mstc *mstc = nv50_mstc(connector);
3079 if (mstc->port) {
3080 struct nv50_mstm *mstm = mstc->mstm;
3081 return &mstm->msto[0]->encoder;
3082 }
3083 return NULL;
3084 }
3085
3086 static enum drm_mode_status
3087 nv50_mstc_mode_valid(struct drm_connector *connector,
3088 struct drm_display_mode *mode)
3089 {
3090 return MODE_OK;
3091 }
3092
3093 static int
3094 nv50_mstc_get_modes(struct drm_connector *connector)
3095 {
3096 struct nv50_mstc *mstc = nv50_mstc(connector);
3097 int ret = 0;
3098
3099 mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
3100 drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
3101 if (mstc->edid)
3102 ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
3103
3104 if (!mstc->connector.display_info.bpc)
3105 mstc->connector.display_info.bpc = 8;
3106
3107 if (mstc->native)
3108 drm_mode_destroy(mstc->connector.dev, mstc->native);
3109 mstc->native = nouveau_conn_native_mode(&mstc->connector);
3110 return ret;
3111 }
3112
3113 static const struct drm_connector_helper_funcs
3114 nv50_mstc_help = {
3115 .get_modes = nv50_mstc_get_modes,
3116 .mode_valid = nv50_mstc_mode_valid,
3117 .best_encoder = nv50_mstc_best_encoder,
3118 .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
3119 };
3120
3121 static enum drm_connector_status
3122 nv50_mstc_detect(struct drm_connector *connector, bool force)
3123 {
3124 struct nv50_mstc *mstc = nv50_mstc(connector);
3125 if (!mstc->port)
3126 return connector_status_disconnected;
3127 return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
3128 }
3129
3130 static void
3131 nv50_mstc_destroy(struct drm_connector *connector)
3132 {
3133 struct nv50_mstc *mstc = nv50_mstc(connector);
3134 drm_connector_cleanup(&mstc->connector);
3135 kfree(mstc);
3136 }
3137
3138 static const struct drm_connector_funcs
3139 nv50_mstc = {
3140 .reset = nouveau_conn_reset,
3141 .detect = nv50_mstc_detect,
3142 .fill_modes = drm_helper_probe_single_connector_modes,
3143 .destroy = nv50_mstc_destroy,
3144 .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
3145 .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
3146 .atomic_set_property = nouveau_conn_atomic_set_property,
3147 .atomic_get_property = nouveau_conn_atomic_get_property,
3148 };
3149
3150 static int
3151 nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
3152 const char *path, struct nv50_mstc **pmstc)
3153 {
3154 struct drm_device *dev = mstm->outp->base.base.dev;
3155 struct nv50_mstc *mstc;
3156 int ret, i;
3157
3158 if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
3159 return -ENOMEM;
3160 mstc->mstm = mstm;
3161 mstc->port = port;
3162
3163 ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
3164 DRM_MODE_CONNECTOR_DisplayPort);
3165 if (ret) {
3166 kfree(*pmstc);
3167 *pmstc = NULL;
3168 return ret;
3169 }
3170
3171 drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
3172
3173 mstc->connector.funcs->reset(&mstc->connector);
3174 nouveau_conn_attach_properties(&mstc->connector);
3175
3176 for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
3177 drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
3178
3179 drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
3180 drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
3181 drm_mode_connector_set_path_property(&mstc->connector, path);
3182 return 0;
3183 }
3184
3185 static void
3186 nv50_mstm_cleanup(struct nv50_mstm *mstm)
3187 {
3188 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
3189 struct drm_encoder *encoder;
3190 int ret;
3191
3192 NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
3193 ret = drm_dp_check_act_status(&mstm->mgr);
3194
3195 ret = drm_dp_update_payload_part2(&mstm->mgr);
3196
3197 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
3198 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
3199 struct nv50_msto *msto = nv50_msto(encoder);
3200 struct nv50_mstc *mstc = msto->mstc;
3201 if (mstc && mstc->mstm == mstm)
3202 nv50_msto_cleanup(msto);
3203 }
3204 }
3205
3206 mstm->modified = false;
3207 }
3208
3209 static void
3210 nv50_mstm_prepare(struct nv50_mstm *mstm)
3211 {
3212 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
3213 struct drm_encoder *encoder;
3214 int ret;
3215
3216 NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
3217 ret = drm_dp_update_payload_part1(&mstm->mgr);
3218
3219 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
3220 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
3221 struct nv50_msto *msto = nv50_msto(encoder);
3222 struct nv50_mstc *mstc = msto->mstc;
3223 if (mstc && mstc->mstm == mstm)
3224 nv50_msto_prepare(msto);
3225 }
3226 }
3227
3228 if (mstm->disabled) {
3229 if (!mstm->links)
3230 nv50_outp_release(mstm->outp);
3231 mstm->disabled = false;
3232 }
3233 }
3234
3235 static void
3236 nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
3237 {
3238 struct nv50_mstm *mstm = nv50_mstm(mgr);
3239 drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
3240 }
3241
3242 static void
3243 nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
3244 struct drm_connector *connector)
3245 {
3246 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3247 struct nv50_mstc *mstc = nv50_mstc(connector);
3248
3249 drm_connector_unregister(&mstc->connector);
3250
3251 drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
3252
3253 drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
3254 mstc->port = NULL;
3255 drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
3256
3257 drm_connector_unreference(&mstc->connector);
3258 }
3259
3260 static void
3261 nv50_mstm_register_connector(struct drm_connector *connector)
3262 {
3263 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3264
3265 drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
3266
3267 drm_connector_register(connector);
3268 }
3269
3270 static struct drm_connector *
3271 nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
3272 struct drm_dp_mst_port *port, const char *path)
3273 {
3274 struct nv50_mstm *mstm = nv50_mstm(mgr);
3275 struct nv50_mstc *mstc;
3276 int ret;
3277
3278 ret = nv50_mstc_new(mstm, port, path, &mstc);
3279 if (ret) {
3280 if (mstc)
3281 mstc->connector.funcs->destroy(&mstc->connector);
3282 return NULL;
3283 }
3284
3285 return &mstc->connector;
3286 }
3287
3288 static const struct drm_dp_mst_topology_cbs
3289 nv50_mstm = {
3290 .add_connector = nv50_mstm_add_connector,
3291 .register_connector = nv50_mstm_register_connector,
3292 .destroy_connector = nv50_mstm_destroy_connector,
3293 .hotplug = nv50_mstm_hotplug,
3294 };
3295
3296 void
3297 nv50_mstm_service(struct nv50_mstm *mstm)
3298 {
3299 struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
3300 bool handled = true;
3301 int ret;
3302 u8 esi[8] = {};
3303
3304 if (!aux)
3305 return;
3306
3307 while (handled) {
3308 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
3309 if (ret != 8) {
3310 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
3311 return;
3312 }
3313
3314 drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
3315 if (!handled)
3316 break;
3317
3318 drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
3319 }
3320 }
3321
3322 void
3323 nv50_mstm_remove(struct nv50_mstm *mstm)
3324 {
3325 if (mstm)
3326 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
3327 }
3328
3329 static int
3330 nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
3331 {
3332 struct nouveau_encoder *outp = mstm->outp;
3333 struct {
3334 struct nv50_disp_mthd_v1 base;
3335 struct nv50_disp_sor_dp_mst_link_v0 mst;
3336 } args = {
3337 .base.version = 1,
3338 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
3339 .base.hasht = outp->dcb->hasht,
3340 .base.hashm = outp->dcb->hashm,
3341 .mst.state = state,
3342 };
3343 struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
3344 struct nvif_object *disp = &drm->display->disp.object;
3345 int ret;
3346
3347 if (dpcd >= 0x12) {
3348 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
3349 if (ret < 0)
3350 return ret;
3351
3352 dpcd &= ~DP_MST_EN;
3353 if (state)
3354 dpcd |= DP_MST_EN;
3355
3356 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
3357 if (ret < 0)
3358 return ret;
3359 }
3360
3361 return nvif_mthd(disp, 0, &args, sizeof(args));
3362 }
3363
3364 int
3365 nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
3366 {
3367 int ret, state = 0;
3368
3369 if (!mstm)
3370 return 0;
3371
3372 if (dpcd[0] >= 0x12) {
3373 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
3374 if (ret < 0)
3375 return ret;
3376
3377 if (!(dpcd[1] & DP_MST_CAP))
3378 dpcd[0] = 0x11;
3379 else
3380 state = allow;
3381 }
3382
3383 ret = nv50_mstm_enable(mstm, dpcd[0], state);
3384 if (ret)
3385 return ret;
3386
3387 ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
3388 if (ret)
3389 return nv50_mstm_enable(mstm, dpcd[0], 0);
3390
3391 return mstm->mgr.mst_state;
3392 }
3393
3394 static void
3395 nv50_mstm_fini(struct nv50_mstm *mstm)
3396 {
3397 if (mstm && mstm->mgr.mst_state)
3398 drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
3399 }
3400
3401 static void
3402 nv50_mstm_init(struct nv50_mstm *mstm)
3403 {
3404 if (mstm && mstm->mgr.mst_state)
3405 drm_dp_mst_topology_mgr_resume(&mstm->mgr);
3406 }
3407
3408 static void
3409 nv50_mstm_del(struct nv50_mstm **pmstm)
3410 {
3411 struct nv50_mstm *mstm = *pmstm;
3412 if (mstm) {
3413 kfree(*pmstm);
3414 *pmstm = NULL;
3415 }
3416 }
3417
3418 static int
3419 nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
3420 int conn_base_id, struct nv50_mstm **pmstm)
3421 {
3422 const int max_payloads = hweight8(outp->dcb->heads);
3423 struct drm_device *dev = outp->base.base.dev;
3424 struct nv50_mstm *mstm;
3425 int ret, i;
3426 u8 dpcd;
3427
3428 /* This is a workaround for some monitors not functioning
3429 * correctly in MST mode on initial module load. I think
3430 * some bad interaction with the VBIOS may be responsible.
3431 *
3432 * A good ol' off and on again seems to work here ;)
3433 */
3434 ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
3435 if (ret >= 0 && dpcd >= 0x12)
3436 drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
3437
3438 if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
3439 return -ENOMEM;
3440 mstm->outp = outp;
3441 mstm->mgr.cbs = &nv50_mstm;
3442
3443 ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
3444 max_payloads, conn_base_id);
3445 if (ret)
3446 return ret;
3447
3448 for (i = 0; i < max_payloads; i++) {
3449 ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
3450 i, &mstm->msto[i]);
3451 if (ret)
3452 return ret;
3453 }
3454
3455 return 0;
3456 }
3457
3458 /******************************************************************************
3459 * SOR
3460 *****************************************************************************/
3461 static void
3462 nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
3463 struct drm_display_mode *mode, u8 proto, u8 depth)
3464 {
3465 struct nv50_dmac *core = &nv50_mast(nv_encoder->base.base.dev)->base;
3466 u32 *push;
3467
3468 if (!mode) {
3469 nv_encoder->ctrl &= ~BIT(head);
3470 if (!(nv_encoder->ctrl & 0x0000000f))
3471 nv_encoder->ctrl = 0;
3472 } else {
3473 nv_encoder->ctrl |= proto << 8;
3474 nv_encoder->ctrl |= BIT(head);
3475 }
3476
3477 if ((push = evo_wait(core, 6))) {
3478 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
3479 if (mode) {
3480 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3481 nv_encoder->ctrl |= 0x00001000;
3482 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3483 nv_encoder->ctrl |= 0x00002000;
3484 nv_encoder->ctrl |= depth << 16;
3485 }
3486 evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
3487 } else {
3488 if (mode) {
3489 u32 magic = 0x31ec6000 | (head << 25);
3490 u32 syncs = 0x00000001;
3491 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3492 syncs |= 0x00000008;
3493 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3494 syncs |= 0x00000010;
3495 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
3496 magic |= 0x00000001;
3497
3498 evo_mthd(push, 0x0404 + (head * 0x300), 2);
3499 evo_data(push, syncs | (depth << 6));
3500 evo_data(push, magic);
3501 }
3502 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
3503 }
3504 evo_data(push, nv_encoder->ctrl);
3505 evo_kick(push, core);
3506 }
3507 }
3508
3509 static void
3510 nv50_sor_disable(struct drm_encoder *encoder)
3511 {
3512 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3513 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
3514
3515 nv_encoder->crtc = NULL;
3516
3517 if (nv_crtc) {
3518 struct nvkm_i2c_aux *aux = nv_encoder->aux;
3519 u8 pwr;
3520
3521 if (aux) {
3522 int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
3523 if (ret == 0) {
3524 pwr &= ~DP_SET_POWER_MASK;
3525 pwr |= DP_SET_POWER_D3;
3526 nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
3527 }
3528 }
3529
3530 nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
3531 nv50_audio_disable(encoder, nv_crtc);
3532 nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
3533 nv50_outp_release(nv_encoder);
3534 }
3535 }
3536
3537 static void
3538 nv50_sor_enable(struct drm_encoder *encoder)
3539 {
3540 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3541 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
3542 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
3543 struct {
3544 struct nv50_disp_mthd_v1 base;
3545 struct nv50_disp_sor_lvds_script_v0 lvds;
3546 } lvds = {
3547 .base.version = 1,
3548 .base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
3549 .base.hasht = nv_encoder->dcb->hasht,
3550 .base.hashm = nv_encoder->dcb->hashm,
3551 };
3552 struct nv50_disp *disp = nv50_disp(encoder->dev);
3553 struct drm_device *dev = encoder->dev;
3554 struct nouveau_drm *drm = nouveau_drm(dev);
3555 struct nouveau_connector *nv_connector;
3556 struct nvbios *bios = &drm->vbios;
3557 u8 proto = 0xf;
3558 u8 depth = 0x0;
3559
3560 nv_connector = nouveau_encoder_connector_get(nv_encoder);
3561 nv_encoder->crtc = encoder->crtc;
3562 nv50_outp_acquire(nv_encoder);
3563
3564 switch (nv_encoder->dcb->type) {
3565 case DCB_OUTPUT_TMDS:
3566 if (nv_encoder->link & 1) {
3567 proto = 0x1;
3568 /* Only enable dual-link if:
3569 * - Need to (i.e. rate > 165MHz)
3570 * - DCB says we can
3571 * - Not an HDMI monitor, since there's no dual-link
3572 * on HDMI.
3573 */
3574 if (mode->clock >= 165000 &&
3575 nv_encoder->dcb->duallink_possible &&
3576 !drm_detect_hdmi_monitor(nv_connector->edid))
3577 proto |= 0x4;
3578 } else {
3579 proto = 0x2;
3580 }
3581
3582 nv50_hdmi_enable(&nv_encoder->base.base, mode);
3583 break;
3584 case DCB_OUTPUT_LVDS:
3585 proto = 0x0;
3586
3587 if (bios->fp_no_ddc) {
3588 if (bios->fp.dual_link)
3589 lvds.lvds.script |= 0x0100;
3590 if (bios->fp.if_is_24bit)
3591 lvds.lvds.script |= 0x0200;
3592 } else {
3593 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
3594 if (((u8 *)nv_connector->edid)[121] == 2)
3595 lvds.lvds.script |= 0x0100;
3596 } else
3597 if (mode->clock >= bios->fp.duallink_transition_clk) {
3598 lvds.lvds.script |= 0x0100;
3599 }
3600
3601 if (lvds.lvds.script & 0x0100) {
3602 if (bios->fp.strapless_is_24bit & 2)
3603 lvds.lvds.script |= 0x0200;
3604 } else {
3605 if (bios->fp.strapless_is_24bit & 1)
3606 lvds.lvds.script |= 0x0200;
3607 }
3608
3609 if (nv_connector->base.display_info.bpc == 8)
3610 lvds.lvds.script |= 0x0200;
3611 }
3612
3613 nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
3614 break;
3615 case DCB_OUTPUT_DP:
3616 if (nv_connector->base.display_info.bpc == 6)
3617 depth = 0x2;
3618 else
3619 if (nv_connector->base.display_info.bpc == 8)
3620 depth = 0x5;
3621 else
3622 depth = 0x6;
3623
3624 if (nv_encoder->link & 1)
3625 proto = 0x8;
3626 else
3627 proto = 0x9;
3628
3629 nv50_audio_enable(encoder, mode);
3630 break;
3631 default:
3632 BUG();
3633 break;
3634 }
3635
3636 nv_encoder->update(nv_encoder, nv_crtc->index, mode, proto, depth);
3637 }
3638
3639 static const struct drm_encoder_helper_funcs
3640 nv50_sor_help = {
3641 .atomic_check = nv50_outp_atomic_check,
3642 .enable = nv50_sor_enable,
3643 .disable = nv50_sor_disable,
3644 };
3645
3646 static void
3647 nv50_sor_destroy(struct drm_encoder *encoder)
3648 {
3649 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3650 nv50_mstm_del(&nv_encoder->dp.mstm);
3651 drm_encoder_cleanup(encoder);
3652 kfree(encoder);
3653 }
3654
3655 static const struct drm_encoder_funcs
3656 nv50_sor_func = {
3657 .destroy = nv50_sor_destroy,
3658 };
3659
3660 static int
3661 nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
3662 {
3663 struct nouveau_connector *nv_connector = nouveau_connector(connector);
3664 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3665 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3666 struct nouveau_encoder *nv_encoder;
3667 struct drm_encoder *encoder;
3668 int type, ret;
3669
3670 switch (dcbe->type) {
3671 case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
3672 case DCB_OUTPUT_TMDS:
3673 case DCB_OUTPUT_DP:
3674 default:
3675 type = DRM_MODE_ENCODER_TMDS;
3676 break;
3677 }
3678
3679 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
3680 if (!nv_encoder)
3681 return -ENOMEM;
3682 nv_encoder->dcb = dcbe;
3683 nv_encoder->update = nv50_sor_update;
3684
3685 encoder = to_drm_encoder(nv_encoder);
3686 encoder->possible_crtcs = dcbe->heads;
3687 encoder->possible_clones = 0;
3688 drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
3689 "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
3690 drm_encoder_helper_add(encoder, &nv50_sor_help);
3691
3692 drm_mode_connector_attach_encoder(connector, encoder);
3693
3694 if (dcbe->type == DCB_OUTPUT_DP) {
3695 struct nv50_disp *disp = nv50_disp(encoder->dev);
3696 struct nvkm_i2c_aux *aux =
3697 nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
3698 if (aux) {
3699 if (disp->disp->object.oclass < GF110_DISP) {
3700 /* HW has no support for address-only
3701 * transactions, so we're required to
3702 * use custom I2C-over-AUX code.
3703 */
3704 nv_encoder->i2c = &aux->i2c;
3705 } else {
3706 nv_encoder->i2c = &nv_connector->aux.ddc;
3707 }
3708 nv_encoder->aux = aux;
3709 }
3710
3711 /*TODO: Use DP Info Table to check for support. */
3712 if (disp->disp->object.oclass >= GF110_DISP) {
3713 ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
3714 nv_connector->base.base.id,
3715 &nv_encoder->dp.mstm);
3716 if (ret)
3717 return ret;
3718 }
3719 } else {
3720 struct nvkm_i2c_bus *bus =
3721 nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
3722 if (bus)
3723 nv_encoder->i2c = &bus->i2c;
3724 }
3725
3726 return 0;
3727 }
3728
3729 /******************************************************************************
3730 * PIOR
3731 *****************************************************************************/
3732 static int
3733 nv50_pior_atomic_check(struct drm_encoder *encoder,
3734 struct drm_crtc_state *crtc_state,
3735 struct drm_connector_state *conn_state)
3736 {
3737 int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
3738 if (ret)
3739 return ret;
3740 crtc_state->adjusted_mode.clock *= 2;
3741 return 0;
3742 }
3743
3744 static void
3745 nv50_pior_disable(struct drm_encoder *encoder)
3746 {
3747 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3748 struct nv50_mast *mast = nv50_mast(encoder->dev);
3749 const int or = nv_encoder->or;
3750 u32 *push;
3751
3752 if (nv_encoder->crtc) {
3753 push = evo_wait(mast, 4);
3754 if (push) {
3755 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
3756 evo_mthd(push, 0x0700 + (or * 0x040), 1);
3757 evo_data(push, 0x00000000);
3758 }
3759 evo_kick(push, mast);
3760 }
3761 }
3762
3763 nv_encoder->crtc = NULL;
3764 nv50_outp_release(nv_encoder);
3765 }
3766
3767 static void
3768 nv50_pior_enable(struct drm_encoder *encoder)
3769 {
3770 struct nv50_mast *mast = nv50_mast(encoder->dev);
3771 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3772 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
3773 struct nouveau_connector *nv_connector;
3774 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
3775 u8 owner = 1 << nv_crtc->index;
3776 u8 proto, depth;
3777 u32 *push;
3778
3779 nv50_outp_acquire(nv_encoder);
3780
3781 nv_connector = nouveau_encoder_connector_get(nv_encoder);
3782 switch (nv_connector->base.display_info.bpc) {
3783 case 10: depth = 0x6; break;
3784 case 8: depth = 0x5; break;
3785 case 6: depth = 0x2; break;
3786 default: depth = 0x0; break;
3787 }
3788
3789 switch (nv_encoder->dcb->type) {
3790 case DCB_OUTPUT_TMDS:
3791 case DCB_OUTPUT_DP:
3792 proto = 0x0;
3793 break;
3794 default:
3795 BUG();
3796 break;
3797 }
3798
3799 push = evo_wait(mast, 8);
3800 if (push) {
3801 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
3802 u32 ctrl = (depth << 16) | (proto << 8) | owner;
3803 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3804 ctrl |= 0x00001000;
3805 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3806 ctrl |= 0x00002000;
3807 evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
3808 evo_data(push, ctrl);
3809 }
3810
3811 evo_kick(push, mast);
3812 }
3813
3814 nv_encoder->crtc = encoder->crtc;
3815 }
3816
3817 static const struct drm_encoder_helper_funcs
3818 nv50_pior_help = {
3819 .atomic_check = nv50_pior_atomic_check,
3820 .enable = nv50_pior_enable,
3821 .disable = nv50_pior_disable,
3822 };
3823
3824 static void
3825 nv50_pior_destroy(struct drm_encoder *encoder)
3826 {
3827 drm_encoder_cleanup(encoder);
3828 kfree(encoder);
3829 }
3830
3831 static const struct drm_encoder_funcs
3832 nv50_pior_func = {
3833 .destroy = nv50_pior_destroy,
3834 };
3835
3836 static int
3837 nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
3838 {
3839 struct nouveau_connector *nv_connector = nouveau_connector(connector);
3840 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3841 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3842 struct nvkm_i2c_bus *bus = NULL;
3843 struct nvkm_i2c_aux *aux = NULL;
3844 struct i2c_adapter *ddc;
3845 struct nouveau_encoder *nv_encoder;
3846 struct drm_encoder *encoder;
3847 int type;
3848
3849 switch (dcbe->type) {
3850 case DCB_OUTPUT_TMDS:
3851 bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
3852 ddc = bus ? &bus->i2c : NULL;
3853 type = DRM_MODE_ENCODER_TMDS;
3854 break;
3855 case DCB_OUTPUT_DP:
3856 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
3857 ddc = aux ? &nv_connector->aux.ddc : NULL;
3858 type = DRM_MODE_ENCODER_TMDS;
3859 break;
3860 default:
3861 return -ENODEV;
3862 }
3863
3864 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
3865 if (!nv_encoder)
3866 return -ENOMEM;
3867 nv_encoder->dcb = dcbe;
3868 nv_encoder->i2c = ddc;
3869 nv_encoder->aux = aux;
3870
3871 encoder = to_drm_encoder(nv_encoder);
3872 encoder->possible_crtcs = dcbe->heads;
3873 encoder->possible_clones = 0;
3874 drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
3875 "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
3876 drm_encoder_helper_add(encoder, &nv50_pior_help);
3877
3878 drm_mode_connector_attach_encoder(connector, encoder);
3879 return 0;
3880 }
3881
3882 /******************************************************************************
3883 * Atomic
3884 *****************************************************************************/
3885
3886 static void
3887 nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
3888 {
3889 struct nv50_disp *disp = nv50_disp(drm->dev);
3890 struct nv50_dmac *core = &disp->mast.base;
3891 struct nv50_mstm *mstm;
3892 struct drm_encoder *encoder;
3893 u32 *push;
3894
3895 NV_ATOMIC(drm, "commit core %08x\n", interlock);
3896
3897 drm_for_each_encoder(encoder, drm->dev) {
3898 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
3899 mstm = nouveau_encoder(encoder)->dp.mstm;
3900 if (mstm && mstm->modified)
3901 nv50_mstm_prepare(mstm);
3902 }
3903 }
3904
3905 if ((push = evo_wait(core, 5))) {
3906 evo_mthd(push, 0x0084, 1);
3907 evo_data(push, 0x80000000);
3908 evo_mthd(push, 0x0080, 2);
3909 evo_data(push, interlock);
3910 evo_data(push, 0x00000000);
3911 nouveau_bo_wr32(disp->sync, 0, 0x00000000);
3912 evo_kick(push, core);
3913 if (nvif_msec(&drm->client.device, 2000ULL,
3914 if (nouveau_bo_rd32(disp->sync, 0))
3915 break;
3916 usleep_range(1, 2);
3917 ) < 0)
3918 NV_ERROR(drm, "EVO timeout\n");
3919 }
3920
3921 drm_for_each_encoder(encoder, drm->dev) {
3922 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
3923 mstm = nouveau_encoder(encoder)->dp.mstm;
3924 if (mstm && mstm->modified)
3925 nv50_mstm_cleanup(mstm);
3926 }
3927 }
3928 }
3929
3930 static void
3931 nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
3932 {
3933 struct drm_device *dev = state->dev;
3934 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
3935 struct drm_crtc *crtc;
3936 struct drm_plane_state *new_plane_state;
3937 struct drm_plane *plane;
3938 struct nouveau_drm *drm = nouveau_drm(dev);
3939 struct nv50_disp *disp = nv50_disp(dev);
3940 struct nv50_atom *atom = nv50_atom(state);
3941 struct nv50_outp_atom *outp, *outt;
3942 u32 interlock_core = 0;
3943 u32 interlock_chan = 0;
3944 int i;
3945
3946 NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
3947 drm_atomic_helper_wait_for_fences(dev, state, false);
3948 drm_atomic_helper_wait_for_dependencies(state);
3949 drm_atomic_helper_update_legacy_modeset_state(dev, state);
3950
3951 if (atom->lock_core)
3952 mutex_lock(&disp->mutex);
3953
3954 /* Disable head(s). */
3955 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3956 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
3957 struct nv50_head *head = nv50_head(crtc);
3958
3959 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
3960 asyh->clr.mask, asyh->set.mask);
3961 if (old_crtc_state->active && !new_crtc_state->active)
3962 drm_crtc_vblank_off(crtc);
3963
3964 if (asyh->clr.mask) {
3965 nv50_head_flush_clr(head, asyh, atom->flush_disable);
3966 interlock_core |= 1;
3967 }
3968 }
3969
3970 /* Disable plane(s). */
3971 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
3972 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
3973 struct nv50_wndw *wndw = nv50_wndw(plane);
3974
3975 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
3976 asyw->clr.mask, asyw->set.mask);
3977 if (!asyw->clr.mask)
3978 continue;
3979
3980 interlock_chan |= nv50_wndw_flush_clr(wndw, interlock_core,
3981 atom->flush_disable,
3982 asyw);
3983 }
3984
3985 /* Disable output path(s). */
3986 list_for_each_entry(outp, &atom->outp, head) {
3987 const struct drm_encoder_helper_funcs *help;
3988 struct drm_encoder *encoder;
3989
3990 encoder = outp->encoder;
3991 help = encoder->helper_private;
3992
3993 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
3994 outp->clr.mask, outp->set.mask);
3995
3996 if (outp->clr.mask) {
3997 help->disable(encoder);
3998 interlock_core |= 1;
3999 if (outp->flush_disable) {
4000 nv50_disp_atomic_commit_core(drm, interlock_chan);
4001 interlock_core = 0;
4002 interlock_chan = 0;
4003 }
4004 }
4005 }
4006
4007 /* Flush disable. */
4008 if (interlock_core) {
4009 if (atom->flush_disable) {
4010 nv50_disp_atomic_commit_core(drm, interlock_chan);
4011 interlock_core = 0;
4012 interlock_chan = 0;
4013 }
4014 }
4015
4016 /* Update output path(s). */
4017 list_for_each_entry_safe(outp, outt, &atom->outp, head) {
4018 const struct drm_encoder_helper_funcs *help;
4019 struct drm_encoder *encoder;
4020
4021 encoder = outp->encoder;
4022 help = encoder->helper_private;
4023
4024 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
4025 outp->set.mask, outp->clr.mask);
4026
4027 if (outp->set.mask) {
4028 help->enable(encoder);
4029 interlock_core = 1;
4030 }
4031
4032 list_del(&outp->head);
4033 kfree(outp);
4034 }
4035
4036 /* Update head(s). */
4037 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4038 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
4039 struct nv50_head *head = nv50_head(crtc);
4040
4041 NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
4042 asyh->set.mask, asyh->clr.mask);
4043
4044 if (asyh->set.mask) {
4045 nv50_head_flush_set(head, asyh);
4046 interlock_core = 1;
4047 }
4048
4049 if (new_crtc_state->active) {
4050 if (!old_crtc_state->active)
4051 drm_crtc_vblank_on(crtc);
4052 if (new_crtc_state->event)
4053 drm_crtc_vblank_get(crtc);
4054 }
4055 }
4056
4057 /* Update plane(s). */
4058 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4059 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4060 struct nv50_wndw *wndw = nv50_wndw(plane);
4061
4062 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
4063 asyw->set.mask, asyw->clr.mask);
4064 if ( !asyw->set.mask &&
4065 (!asyw->clr.mask || atom->flush_disable))
4066 continue;
4067
4068 interlock_chan |= nv50_wndw_flush_set(wndw, interlock_core, asyw);
4069 }
4070
4071 /* Flush update. */
4072 if (interlock_core) {
4073 if (!interlock_chan && atom->state.legacy_cursor_update) {
4074 u32 *push = evo_wait(&disp->mast, 2);
4075 if (push) {
4076 evo_mthd(push, 0x0080, 1);
4077 evo_data(push, 0x00000000);
4078 evo_kick(push, &disp->mast);
4079 }
4080 } else {
4081 nv50_disp_atomic_commit_core(drm, interlock_chan);
4082 }
4083 }
4084
4085 if (atom->lock_core)
4086 mutex_unlock(&disp->mutex);
4087
4088 /* Wait for HW to signal completion. */
4089 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4090 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4091 struct nv50_wndw *wndw = nv50_wndw(plane);
4092 int ret = nv50_wndw_wait_armed(wndw, asyw);
4093 if (ret)
4094 NV_ERROR(drm, "%s: timeout\n", plane->name);
4095 }
4096
4097 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4098 if (new_crtc_state->event) {
4099 unsigned long flags;
4100 /* Get correct count/ts if racing with vblank irq */
4101 if (new_crtc_state->active)
4102 drm_crtc_accurate_vblank_count(crtc);
4103 spin_lock_irqsave(&crtc->dev->event_lock, flags);
4104 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
4105 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4106
4107 new_crtc_state->event = NULL;
4108 if (new_crtc_state->active)
4109 drm_crtc_vblank_put(crtc);
4110 }
4111 }
4112
4113 drm_atomic_helper_commit_hw_done(state);
4114 drm_atomic_helper_cleanup_planes(dev, state);
4115 drm_atomic_helper_commit_cleanup_done(state);
4116 drm_atomic_state_put(state);
4117 }
4118
4119 static void
4120 nv50_disp_atomic_commit_work(struct work_struct *work)
4121 {
4122 struct drm_atomic_state *state =
4123 container_of(work, typeof(*state), commit_work);
4124 nv50_disp_atomic_commit_tail(state);
4125 }
4126
4127 static int
4128 nv50_disp_atomic_commit(struct drm_device *dev,
4129 struct drm_atomic_state *state, bool nonblock)
4130 {
4131 struct nouveau_drm *drm = nouveau_drm(dev);
4132 struct nv50_disp *disp = nv50_disp(dev);
4133 struct drm_plane_state *new_plane_state;
4134 struct drm_plane *plane;
4135 struct drm_crtc *crtc;
4136 bool active = false;
4137 int ret, i;
4138
4139 ret = pm_runtime_get_sync(dev->dev);
4140 if (ret < 0 && ret != -EACCES)
4141 return ret;
4142
4143 ret = drm_atomic_helper_setup_commit(state, nonblock);
4144 if (ret)
4145 goto done;
4146
4147 INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
4148
4149 ret = drm_atomic_helper_prepare_planes(dev, state);
4150 if (ret)
4151 goto done;
4152
4153 if (!nonblock) {
4154 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
4155 if (ret)
4156 goto err_cleanup;
4157 }
4158
4159 ret = drm_atomic_helper_swap_state(state, true);
4160 if (ret)
4161 goto err_cleanup;
4162
4163 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4164 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4165 struct nv50_wndw *wndw = nv50_wndw(plane);
4166
4167 if (asyw->set.image) {
4168 asyw->ntfy.handle = wndw->dmac->sync.handle;
4169 asyw->ntfy.offset = wndw->ntfy;
4170 asyw->ntfy.awaken = false;
4171 asyw->set.ntfy = true;
4172 nouveau_bo_wr32(disp->sync, wndw->ntfy / 4, 0x00000000);
4173 wndw->ntfy ^= 0x10;
4174 }
4175 }
4176
4177 drm_atomic_state_get(state);
4178
4179 if (nonblock)
4180 queue_work(system_unbound_wq, &state->commit_work);
4181 else
4182 nv50_disp_atomic_commit_tail(state);
4183
4184 drm_for_each_crtc(crtc, dev) {
4185 if (crtc->state->enable) {
4186 if (!drm->have_disp_power_ref) {
4187 drm->have_disp_power_ref = true;
4188 return 0;
4189 }
4190 active = true;
4191 break;
4192 }
4193 }
4194
4195 if (!active && drm->have_disp_power_ref) {
4196 pm_runtime_put_autosuspend(dev->dev);
4197 drm->have_disp_power_ref = false;
4198 }
4199
4200 err_cleanup:
4201 if (ret)
4202 drm_atomic_helper_cleanup_planes(dev, state);
4203 done:
4204 pm_runtime_put_autosuspend(dev->dev);
4205 return ret;
4206 }
4207
4208 static struct nv50_outp_atom *
4209 nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
4210 {
4211 struct nv50_outp_atom *outp;
4212
4213 list_for_each_entry(outp, &atom->outp, head) {
4214 if (outp->encoder == encoder)
4215 return outp;
4216 }
4217
4218 outp = kzalloc(sizeof(*outp), GFP_KERNEL);
4219 if (!outp)
4220 return ERR_PTR(-ENOMEM);
4221
4222 list_add(&outp->head, &atom->outp);
4223 outp->encoder = encoder;
4224 return outp;
4225 }
4226
4227 static int
4228 nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
4229 struct drm_connector_state *old_connector_state)
4230 {
4231 struct drm_encoder *encoder = old_connector_state->best_encoder;
4232 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4233 struct drm_crtc *crtc;
4234 struct nv50_outp_atom *outp;
4235
4236 if (!(crtc = old_connector_state->crtc))
4237 return 0;
4238
4239 old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
4240 new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
4241 if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4242 outp = nv50_disp_outp_atomic_add(atom, encoder);
4243 if (IS_ERR(outp))
4244 return PTR_ERR(outp);
4245
4246 if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
4247 outp->flush_disable = true;
4248 atom->flush_disable = true;
4249 }
4250 outp->clr.ctrl = true;
4251 atom->lock_core = true;
4252 }
4253
4254 return 0;
4255 }
4256
4257 static int
4258 nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
4259 struct drm_connector_state *connector_state)
4260 {
4261 struct drm_encoder *encoder = connector_state->best_encoder;
4262 struct drm_crtc_state *new_crtc_state;
4263 struct drm_crtc *crtc;
4264 struct nv50_outp_atom *outp;
4265
4266 if (!(crtc = connector_state->crtc))
4267 return 0;
4268
4269 new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
4270 if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4271 outp = nv50_disp_outp_atomic_add(atom, encoder);
4272 if (IS_ERR(outp))
4273 return PTR_ERR(outp);
4274
4275 outp->set.ctrl = true;
4276 atom->lock_core = true;
4277 }
4278
4279 return 0;
4280 }
4281
4282 static int
4283 nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
4284 {
4285 struct nv50_atom *atom = nv50_atom(state);
4286 struct drm_connector_state *old_connector_state, *new_connector_state;
4287 struct drm_connector *connector;
4288 int ret, i;
4289
4290 ret = drm_atomic_helper_check(dev, state);
4291 if (ret)
4292 return ret;
4293
4294 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
4295 ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
4296 if (ret)
4297 return ret;
4298
4299 ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
4300 if (ret)
4301 return ret;
4302 }
4303
4304 return 0;
4305 }
4306
4307 static void
4308 nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
4309 {
4310 struct nv50_atom *atom = nv50_atom(state);
4311 struct nv50_outp_atom *outp, *outt;
4312
4313 list_for_each_entry_safe(outp, outt, &atom->outp, head) {
4314 list_del(&outp->head);
4315 kfree(outp);
4316 }
4317
4318 drm_atomic_state_default_clear(state);
4319 }
4320
4321 static void
4322 nv50_disp_atomic_state_free(struct drm_atomic_state *state)
4323 {
4324 struct nv50_atom *atom = nv50_atom(state);
4325 drm_atomic_state_default_release(&atom->state);
4326 kfree(atom);
4327 }
4328
4329 static struct drm_atomic_state *
4330 nv50_disp_atomic_state_alloc(struct drm_device *dev)
4331 {
4332 struct nv50_atom *atom;
4333 if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
4334 drm_atomic_state_init(dev, &atom->state) < 0) {
4335 kfree(atom);
4336 return NULL;
4337 }
4338 INIT_LIST_HEAD(&atom->outp);
4339 return &atom->state;
4340 }
4341
4342 static const struct drm_mode_config_funcs
4343 nv50_disp_func = {
4344 .fb_create = nouveau_user_framebuffer_create,
4345 .output_poll_changed = drm_fb_helper_output_poll_changed,
4346 .atomic_check = nv50_disp_atomic_check,
4347 .atomic_commit = nv50_disp_atomic_commit,
4348 .atomic_state_alloc = nv50_disp_atomic_state_alloc,
4349 .atomic_state_clear = nv50_disp_atomic_state_clear,
4350 .atomic_state_free = nv50_disp_atomic_state_free,
4351 };
4352
4353 /******************************************************************************
4354 * Init
4355 *****************************************************************************/
4356
4357 void
4358 nv50_display_fini(struct drm_device *dev)
4359 {
4360 struct nouveau_encoder *nv_encoder;
4361 struct drm_encoder *encoder;
4362 struct drm_plane *plane;
4363
4364 drm_for_each_plane(plane, dev) {
4365 struct nv50_wndw *wndw = nv50_wndw(plane);
4366 if (plane->funcs != &nv50_wndw)
4367 continue;
4368 nv50_wndw_fini(wndw);
4369 }
4370
4371 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4372 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4373 nv_encoder = nouveau_encoder(encoder);
4374 nv50_mstm_fini(nv_encoder->dp.mstm);
4375 }
4376 }
4377 }
4378
4379 int
4380 nv50_display_init(struct drm_device *dev)
4381 {
4382 struct drm_encoder *encoder;
4383 struct drm_plane *plane;
4384 u32 *push;
4385
4386 push = evo_wait(nv50_mast(dev), 32);
4387 if (!push)
4388 return -EBUSY;
4389
4390 evo_mthd(push, 0x0088, 1);
4391 evo_data(push, nv50_mast(dev)->base.sync.handle);
4392 evo_kick(push, nv50_mast(dev));
4393
4394 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4395 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4396 struct nouveau_encoder *nv_encoder =
4397 nouveau_encoder(encoder);
4398 nv50_mstm_init(nv_encoder->dp.mstm);
4399 }
4400 }
4401
4402 drm_for_each_plane(plane, dev) {
4403 struct nv50_wndw *wndw = nv50_wndw(plane);
4404 if (plane->funcs != &nv50_wndw)
4405 continue;
4406 nv50_wndw_init(wndw);
4407 }
4408
4409 return 0;
4410 }
4411
4412 void
4413 nv50_display_destroy(struct drm_device *dev)
4414 {
4415 struct nv50_disp *disp = nv50_disp(dev);
4416
4417 nv50_dmac_destroy(&disp->mast.base);
4418
4419 nouveau_bo_unmap(disp->sync);
4420 if (disp->sync)
4421 nouveau_bo_unpin(disp->sync);
4422 nouveau_bo_ref(NULL, &disp->sync);
4423
4424 nouveau_display(dev)->priv = NULL;
4425 kfree(disp);
4426 }
4427
4428 MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
4429 static int nouveau_atomic = 0;
4430 module_param_named(atomic, nouveau_atomic, int, 0400);
4431
4432 int
4433 nv50_display_create(struct drm_device *dev)
4434 {
4435 struct nvif_device *device = &nouveau_drm(dev)->client.device;
4436 struct nouveau_drm *drm = nouveau_drm(dev);
4437 struct dcb_table *dcb = &drm->vbios.dcb;
4438 struct drm_connector *connector, *tmp;
4439 struct nv50_disp *disp;
4440 struct dcb_output *dcbe;
4441 int crtcs, ret, i;
4442
4443 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
4444 if (!disp)
4445 return -ENOMEM;
4446
4447 mutex_init(&disp->mutex);
4448
4449 nouveau_display(dev)->priv = disp;
4450 nouveau_display(dev)->dtor = nv50_display_destroy;
4451 nouveau_display(dev)->init = nv50_display_init;
4452 nouveau_display(dev)->fini = nv50_display_fini;
4453 disp->disp = &nouveau_display(dev)->disp;
4454 dev->mode_config.funcs = &nv50_disp_func;
4455 dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
4456 if (nouveau_atomic)
4457 dev->driver->driver_features |= DRIVER_ATOMIC;
4458
4459 /* small shared memory area we use for notifiers and semaphores */
4460 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
4461 0, 0x0000, NULL, NULL, &disp->sync);
4462 if (!ret) {
4463 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
4464 if (!ret) {
4465 ret = nouveau_bo_map(disp->sync);
4466 if (ret)
4467 nouveau_bo_unpin(disp->sync);
4468 }
4469 if (ret)
4470 nouveau_bo_ref(NULL, &disp->sync);
4471 }
4472
4473 if (ret)
4474 goto out;
4475
4476 /* allocate master evo channel */
4477 ret = nv50_core_create(device, &disp->disp->object,
4478 disp->sync->bo.offset, &disp->mast);
4479 if (ret)
4480 goto out;
4481
4482 /* create crtc objects to represent the hw heads */
4483 if (disp->disp->object.oclass >= GF110_DISP)
4484 crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
4485 else
4486 crtcs = 0x3;
4487
4488 for (i = 0; i < fls(crtcs); i++) {
4489 if (!(crtcs & (1 << i)))
4490 continue;
4491 ret = nv50_head_create(dev, i);
4492 if (ret)
4493 goto out;
4494 }
4495
4496 /* create encoder/connector objects based on VBIOS DCB table */
4497 for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
4498 connector = nouveau_connector_create(dev, dcbe->connector);
4499 if (IS_ERR(connector))
4500 continue;
4501
4502 if (dcbe->location == DCB_LOC_ON_CHIP) {
4503 switch (dcbe->type) {
4504 case DCB_OUTPUT_TMDS:
4505 case DCB_OUTPUT_LVDS:
4506 case DCB_OUTPUT_DP:
4507 ret = nv50_sor_create(connector, dcbe);
4508 break;
4509 case DCB_OUTPUT_ANALOG:
4510 ret = nv50_dac_create(connector, dcbe);
4511 break;
4512 default:
4513 ret = -ENODEV;
4514 break;
4515 }
4516 } else {
4517 ret = nv50_pior_create(connector, dcbe);
4518 }
4519
4520 if (ret) {
4521 NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
4522 dcbe->location, dcbe->type,
4523 ffs(dcbe->or) - 1, ret);
4524 ret = 0;
4525 }
4526 }
4527
4528 /* cull any connectors we created that don't have an encoder */
4529 list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
4530 if (connector->encoder_ids[0])
4531 continue;
4532
4533 NV_WARN(drm, "%s has no encoders, removing\n",
4534 connector->name);
4535 connector->funcs->destroy(connector);
4536 }
4537
4538 out:
4539 if (ret)
4540 nv50_display_destroy(dev);
4541 return ret;
4542 }