2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <nvif/class.h>
25 #include <nvif/cl0002.h>
27 #include <drm/drm_atomic_helper.h>
28 #include "nouveau_bo.h"
31 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma
*ctxdma
)
33 nvif_object_fini(&ctxdma
->object
);
34 list_del(&ctxdma
->head
);
38 static struct nv50_wndw_ctxdma
*
39 nv50_wndw_ctxdma_new(struct nv50_wndw
*wndw
, struct nouveau_framebuffer
*fb
)
41 struct nouveau_drm
*drm
= nouveau_drm(fb
->base
.dev
);
42 struct nv50_wndw_ctxdma
*ctxdma
;
43 const u8 kind
= fb
->nvbo
->kind
;
44 const u32 handle
= 0xfb000000 | kind
;
46 struct nv_dma_v0 base
;
48 struct nv50_dma_v0 nv50
;
49 struct gf100_dma_v0 gf100
;
50 struct gf119_dma_v0 gf119
;
53 u32 argc
= sizeof(args
.base
);
56 list_for_each_entry(ctxdma
, &wndw
->ctxdma
.list
, head
) {
57 if (ctxdma
->object
.handle
== handle
)
61 if (!(ctxdma
= kzalloc(sizeof(*ctxdma
), GFP_KERNEL
)))
62 return ERR_PTR(-ENOMEM
);
63 list_add(&ctxdma
->head
, &wndw
->ctxdma
.list
);
65 args
.base
.target
= NV_DMA_V0_TARGET_VRAM
;
66 args
.base
.access
= NV_DMA_V0_ACCESS_RDWR
;
68 args
.base
.limit
= drm
->client
.device
.info
.ram_user
- 1;
70 if (drm
->client
.device
.info
.chipset
< 0x80) {
71 args
.nv50
.part
= NV50_DMA_V0_PART_256
;
72 argc
+= sizeof(args
.nv50
);
74 if (drm
->client
.device
.info
.chipset
< 0xc0) {
75 args
.nv50
.part
= NV50_DMA_V0_PART_256
;
76 args
.nv50
.kind
= kind
;
77 argc
+= sizeof(args
.nv50
);
79 if (drm
->client
.device
.info
.chipset
< 0xd0) {
80 args
.gf100
.kind
= kind
;
81 argc
+= sizeof(args
.gf100
);
83 args
.gf119
.page
= GF119_DMA_V0_PAGE_LP
;
84 args
.gf119
.kind
= kind
;
85 argc
+= sizeof(args
.gf119
);
88 ret
= nvif_object_init(wndw
->ctxdma
.parent
, handle
, NV_DMA_IN_MEMORY
,
89 &args
, argc
, &ctxdma
->object
);
91 nv50_wndw_ctxdma_del(ctxdma
);
99 nv50_wndw_wait_armed(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
101 struct nv50_disp
*disp
= nv50_disp(wndw
->plane
.dev
);
102 if (asyw
->set
.ntfy
) {
103 return wndw
->func
->ntfy_wait_begun(disp
->sync
,
105 wndw
->wndw
.base
.device
);
111 nv50_wndw_flush_clr(struct nv50_wndw
*wndw
, u32
*interlock
, bool flush
,
112 struct nv50_wndw_atom
*asyw
)
114 union nv50_wndw_atom_mask clr
= {
115 .mask
= asyw
->clr
.mask
& ~(flush
? 0 : asyw
->set
.mask
),
117 if (clr
.sema
) wndw
->func
-> sema_clr(wndw
);
118 if (clr
.ntfy
) wndw
->func
-> ntfy_clr(wndw
);
119 if (clr
.image
) wndw
->func
->image_clr(wndw
);
121 interlock
[wndw
->interlock
.type
] |= wndw
->interlock
.data
;
125 nv50_wndw_flush_set(struct nv50_wndw
*wndw
, u32
*interlock
,
126 struct nv50_wndw_atom
*asyw
)
129 asyw
->image
.mode
= 0;
130 asyw
->image
.interval
= 1;
133 if (asyw
->set
.sema
) wndw
->func
->sema_set (wndw
, asyw
);
134 if (asyw
->set
.ntfy
) wndw
->func
->ntfy_set (wndw
, asyw
);
135 if (asyw
->set
.image
) wndw
->func
->image_set(wndw
, asyw
);
136 if (asyw
->set
.lut
) wndw
->func
->lut (wndw
, asyw
);
137 if (asyw
->set
.point
) {
138 wndw
->immd
->point(wndw
, asyw
);
139 wndw
->immd
->update(wndw
, interlock
);
142 interlock
[wndw
->interlock
.type
] |= wndw
->interlock
.data
;
146 nv50_wndw_ntfy_enable(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
148 struct nv50_disp
*disp
= nv50_disp(wndw
->plane
.dev
);
150 asyw
->ntfy
.handle
= wndw
->wndw
.sync
.handle
;
151 asyw
->ntfy
.offset
= wndw
->ntfy
;
152 asyw
->ntfy
.awaken
= false;
153 asyw
->set
.ntfy
= true;
155 wndw
->func
->ntfy_reset(disp
->sync
, wndw
->ntfy
);
160 nv50_wndw_atomic_check_release(struct nv50_wndw
*wndw
,
161 struct nv50_wndw_atom
*asyw
,
162 struct nv50_head_atom
*asyh
)
164 struct nouveau_drm
*drm
= nouveau_drm(wndw
->plane
.dev
);
165 NV_ATOMIC(drm
, "%s release\n", wndw
->plane
.name
);
166 wndw
->func
->release(wndw
, asyw
, asyh
);
167 asyw
->ntfy
.handle
= 0;
168 asyw
->sema
.handle
= 0;
172 nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom
*asyw
)
174 switch (asyw
->state
.fb
->format
->format
) {
175 case DRM_FORMAT_C8
: asyw
->image
.format
= 0x1e; break;
176 case DRM_FORMAT_XRGB8888
:
177 case DRM_FORMAT_ARGB8888
: asyw
->image
.format
= 0xcf; break;
178 case DRM_FORMAT_RGB565
: asyw
->image
.format
= 0xe8; break;
179 case DRM_FORMAT_XRGB1555
:
180 case DRM_FORMAT_ARGB1555
: asyw
->image
.format
= 0xe9; break;
181 case DRM_FORMAT_XBGR2101010
:
182 case DRM_FORMAT_ABGR2101010
: asyw
->image
.format
= 0xd1; break;
183 case DRM_FORMAT_XBGR8888
:
184 case DRM_FORMAT_ABGR8888
: asyw
->image
.format
= 0xd5; break;
193 nv50_wndw_atomic_check_acquire(struct nv50_wndw
*wndw
,
194 struct nv50_wndw_atom
*asyw
,
195 struct nv50_head_atom
*asyh
)
197 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(asyw
->state
.fb
);
198 struct nouveau_drm
*drm
= nouveau_drm(wndw
->plane
.dev
);
201 NV_ATOMIC(drm
, "%s acquire\n", wndw
->plane
.name
);
203 asyw
->image
.w
= fb
->base
.width
;
204 asyw
->image
.h
= fb
->base
.height
;
205 asyw
->image
.kind
= fb
->nvbo
->kind
;
207 ret
= nv50_wndw_atomic_check_acquire_rgb(asyw
);
211 if (asyw
->image
.kind
) {
212 asyw
->image
.layout
= 0;
213 if (drm
->client
.device
.info
.chipset
>= 0xc0)
214 asyw
->image
.block
= fb
->nvbo
->mode
>> 4;
216 asyw
->image
.block
= fb
->nvbo
->mode
;
217 asyw
->image
.pitch
[0] = (fb
->base
.pitches
[0] / 4) << 4;
219 asyw
->image
.layout
= 1;
220 asyw
->image
.block
= 0;
221 asyw
->image
.pitch
[0] = fb
->base
.pitches
[0];
224 ret
= wndw
->func
->acquire(wndw
, asyw
, asyh
);
228 if (asyw
->set
.image
) {
229 if (!(asyh
->state
.pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
))
230 asyw
->image
.interval
= 1;
232 asyw
->image
.interval
= 0;
233 asyw
->image
.mode
= asyw
->image
.interval
? 0 : 1;
240 nv50_wndw_atomic_check(struct drm_plane
*plane
, struct drm_plane_state
*state
)
242 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
243 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
244 struct nv50_wndw_atom
*armw
= nv50_wndw_atom(wndw
->plane
.state
);
245 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
246 struct nv50_head_atom
*harm
= NULL
, *asyh
= NULL
;
247 bool modeset
= false;
250 NV_ATOMIC(drm
, "%s atomic_check\n", plane
->name
);
252 /* Fetch the assembly state for the head the window will belong to,
253 * and determine whether the window will be visible.
255 if (asyw
->state
.crtc
) {
256 asyh
= nv50_head_atom_get(asyw
->state
.state
, asyw
->state
.crtc
);
258 return PTR_ERR(asyh
);
259 modeset
= drm_atomic_crtc_needs_modeset(&asyh
->state
);
260 asyw
->visible
= asyh
->state
.active
;
262 asyw
->visible
= false;
265 /* Fetch assembly state for the head the window used to belong to. */
266 if (armw
->state
.crtc
) {
267 harm
= nv50_head_atom_get(asyw
->state
.state
, armw
->state
.crtc
);
269 return PTR_ERR(harm
);
272 /* Calculate new window state. */
274 asyw
->point
.x
= asyw
->state
.crtc_x
;
275 asyw
->point
.y
= asyw
->state
.crtc_y
;
276 if (memcmp(&armw
->point
, &asyw
->point
, sizeof(asyw
->point
)))
277 asyw
->set
.point
= true;
279 ret
= nv50_wndw_atomic_check_acquire(wndw
, asyw
, asyh
);
284 nv50_wndw_atomic_check_release(wndw
, asyw
, harm
);
289 /* Aside from the obvious case where the window is actively being
290 * disabled, we might also need to temporarily disable the window
291 * when performing certain modeset operations.
293 if (!asyw
->visible
|| modeset
) {
294 asyw
->clr
.ntfy
= armw
->ntfy
.handle
!= 0;
295 asyw
->clr
.sema
= armw
->sema
.handle
!= 0;
296 if (wndw
->func
->image_clr
)
297 asyw
->clr
.image
= armw
->image
.handle
[0] != 0;
298 asyw
->set
.lut
= wndw
->func
->lut
&& asyw
->visible
;
305 nv50_wndw_cleanup_fb(struct drm_plane
*plane
, struct drm_plane_state
*old_state
)
307 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(old_state
->fb
);
308 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
310 NV_ATOMIC(drm
, "%s cleanup: %p\n", plane
->name
, old_state
->fb
);
314 nouveau_bo_unpin(fb
->nvbo
);
318 nv50_wndw_prepare_fb(struct drm_plane
*plane
, struct drm_plane_state
*state
)
320 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(state
->fb
);
321 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
322 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
323 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
324 struct nv50_head_atom
*asyh
;
325 struct nv50_wndw_ctxdma
*ctxdma
;
328 NV_ATOMIC(drm
, "%s prepare: %p\n", plane
->name
, state
->fb
);
332 ret
= nouveau_bo_pin(fb
->nvbo
, TTM_PL_FLAG_VRAM
, true);
336 ctxdma
= nv50_wndw_ctxdma_new(wndw
, fb
);
337 if (IS_ERR(ctxdma
)) {
338 nouveau_bo_unpin(fb
->nvbo
);
339 return PTR_ERR(ctxdma
);
342 asyw
->state
.fence
= reservation_object_get_excl_rcu(fb
->nvbo
->bo
.resv
);
343 asyw
->image
.handle
[0] = ctxdma
->object
.handle
;
344 asyw
->image
.offset
[0] = fb
->nvbo
->bo
.offset
;
346 if (wndw
->func
->prepare
) {
347 asyh
= nv50_head_atom_get(asyw
->state
.state
, asyw
->state
.crtc
);
349 return PTR_ERR(asyh
);
351 wndw
->func
->prepare(wndw
, asyh
, asyw
);
357 static const struct drm_plane_helper_funcs
359 .prepare_fb
= nv50_wndw_prepare_fb
,
360 .cleanup_fb
= nv50_wndw_cleanup_fb
,
361 .atomic_check
= nv50_wndw_atomic_check
,
365 nv50_wndw_atomic_destroy_state(struct drm_plane
*plane
,
366 struct drm_plane_state
*state
)
368 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
369 __drm_atomic_helper_plane_destroy_state(&asyw
->state
);
373 static struct drm_plane_state
*
374 nv50_wndw_atomic_duplicate_state(struct drm_plane
*plane
)
376 struct nv50_wndw_atom
*armw
= nv50_wndw_atom(plane
->state
);
377 struct nv50_wndw_atom
*asyw
;
378 if (!(asyw
= kmalloc(sizeof(*asyw
), GFP_KERNEL
)))
380 __drm_atomic_helper_plane_duplicate_state(plane
, &asyw
->state
);
381 asyw
->sema
= armw
->sema
;
382 asyw
->ntfy
= armw
->ntfy
;
383 asyw
->image
= armw
->image
;
384 asyw
->point
= armw
->point
;
385 asyw
->lut
= armw
->lut
;
392 nv50_wndw_reset(struct drm_plane
*plane
)
394 struct nv50_wndw_atom
*asyw
;
396 if (WARN_ON(!(asyw
= kzalloc(sizeof(*asyw
), GFP_KERNEL
))))
400 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
401 plane
->state
= &asyw
->state
;
402 plane
->state
->plane
= plane
;
403 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
407 nv50_wndw_destroy(struct drm_plane
*plane
)
409 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
410 struct nv50_wndw_ctxdma
*ctxdma
, *ctxtmp
;
412 list_for_each_entry_safe(ctxdma
, ctxtmp
, &wndw
->ctxdma
.list
, head
) {
413 nv50_wndw_ctxdma_del(ctxdma
);
416 nvif_notify_fini(&wndw
->notify
);
417 nv50_dmac_destroy(&wndw
->wimm
);
418 nv50_dmac_destroy(&wndw
->wndw
);
419 drm_plane_cleanup(&wndw
->plane
);
423 const struct drm_plane_funcs
425 .update_plane
= drm_atomic_helper_update_plane
,
426 .disable_plane
= drm_atomic_helper_disable_plane
,
427 .destroy
= nv50_wndw_destroy
,
428 .reset
= nv50_wndw_reset
,
429 .atomic_duplicate_state
= nv50_wndw_atomic_duplicate_state
,
430 .atomic_destroy_state
= nv50_wndw_atomic_destroy_state
,
434 nv50_wndw_notify(struct nvif_notify
*notify
)
436 return NVIF_NOTIFY_KEEP
;
440 nv50_wndw_fini(struct nv50_wndw
*wndw
)
442 nvif_notify_put(&wndw
->notify
);
446 nv50_wndw_init(struct nv50_wndw
*wndw
)
448 nvif_notify_get(&wndw
->notify
);
452 nv50_wndw_new_(const struct nv50_wndw_func
*func
, struct drm_device
*dev
,
453 enum drm_plane_type type
, const char *name
, int index
,
454 const u32
*format
, u32 heads
,
455 enum nv50_disp_interlock_type interlock_type
, u32 interlock_data
,
456 struct nv50_wndw
**pwndw
)
458 struct nv50_wndw
*wndw
;
462 if (!(wndw
= *pwndw
= kzalloc(sizeof(*wndw
), GFP_KERNEL
)))
466 wndw
->interlock
.type
= interlock_type
;
467 wndw
->interlock
.data
= interlock_data
;
468 wndw
->ctxdma
.parent
= &wndw
->wndw
.base
.user
;
470 wndw
->ctxdma
.parent
= &wndw
->wndw
.base
.user
;
471 INIT_LIST_HEAD(&wndw
->ctxdma
.list
);
473 for (nformat
= 0; format
[nformat
]; nformat
++);
475 ret
= drm_universal_plane_init(dev
, &wndw
->plane
, heads
, &nv50_wndw
,
476 format
, nformat
, NULL
,
477 type
, "%s-%d", name
, index
);
484 drm_plane_helper_add(&wndw
->plane
, &nv50_wndw_helper
);
486 wndw
->notify
.func
= nv50_wndw_notify
;