2 * Copyright 2023 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <core/gpuobj.h>
29 #include <subdev/gsp.h>
30 #include <subdev/mmu.h>
31 #include <subdev/vfn.h>
32 #include <engine/gr.h>
36 #include <nvrm/nvtypes.h>
37 #include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
38 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
39 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h>
40 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h>
41 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
42 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
43 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h>
44 #include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
45 #include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h>
46 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
49 r535_chan_doorbell_handle(struct nvkm_chan
*chan
)
51 return (chan
->cgrp
->runl
->id
<< 16) | chan
->id
;
55 r535_chan_stop(struct nvkm_chan
*chan
)
60 r535_chan_start(struct nvkm_chan
*chan
)
65 r535_chan_ramfc_clear(struct nvkm_chan
*chan
)
67 struct nvkm_fifo
*fifo
= chan
->cgrp
->runl
->fifo
;
69 nvkm_gsp_rm_free(&chan
->rm
.object
);
71 dma_free_coherent(fifo
->engine
.subdev
.device
->dev
, fifo
->rm
.mthdbuf_size
,
72 chan
->rm
.mthdbuf
.ptr
, chan
->rm
.mthdbuf
.addr
);
74 nvkm_cgrp_vctx_put(chan
->cgrp
, &chan
->rm
.grctx
);
77 #define CHID_PER_USERD 8
80 r535_chan_ramfc_write(struct nvkm_chan
*chan
, u64 offset
, u64 length
, u32 devm
, bool priv
)
82 struct nvkm_fifo
*fifo
= chan
->cgrp
->runl
->fifo
;
83 struct nvkm_engn
*engn
;
84 struct nvkm_device
*device
= fifo
->engine
.subdev
.device
;
85 NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS
*args
;
86 const int userd_p
= chan
->id
/ CHID_PER_USERD
;
87 const int userd_i
= chan
->id
% CHID_PER_USERD
;
91 if (unlikely(device
->gr
&& !device
->gr
->engine
.subdev
.oneinit
)) {
92 ret
= nvkm_subdev_oneinit(&device
->gr
->engine
.subdev
);
97 nvkm_runl_foreach_engn(engn
, chan
->cgrp
->runl
) {
102 if (WARN_ON(eT
== ~0))
105 chan
->rm
.mthdbuf
.ptr
= dma_alloc_coherent(fifo
->engine
.subdev
.device
->dev
,
106 fifo
->rm
.mthdbuf_size
,
107 &chan
->rm
.mthdbuf
.addr
, GFP_KERNEL
);
108 if (!chan
->rm
.mthdbuf
.ptr
)
111 args
= nvkm_gsp_rm_alloc_get(&chan
->vmm
->rm
.device
.object
, 0xf1f00000 | chan
->id
,
112 fifo
->func
->chan
.user
.oclass
, sizeof(*args
),
114 if (WARN_ON(IS_ERR(args
)))
115 return PTR_ERR(args
);
117 args
->gpFifoOffset
= offset
;
118 args
->gpFifoEntries
= length
/ 8;
120 args
->flags
= NVDEF(NVOS04
, FLAGS
, CHANNEL_TYPE
, PHYSICAL
);
121 args
->flags
|= NVDEF(NVOS04
, FLAGS
, VPR
, FALSE
);
122 args
->flags
|= NVDEF(NVOS04
, FLAGS
, CHANNEL_SKIP_MAP_REFCOUNTING
, FALSE
);
123 args
->flags
|= NVVAL(NVOS04
, FLAGS
, GROUP_CHANNEL_RUNQUEUE
, chan
->runq
);
125 args
->flags
|= NVDEF(NVOS04
, FLAGS
, PRIVILEGED_CHANNEL
, FALSE
);
127 args
->flags
|= NVDEF(NVOS04
, FLAGS
, PRIVILEGED_CHANNEL
, TRUE
);
128 args
->flags
|= NVDEF(NVOS04
, FLAGS
, DELAY_CHANNEL_SCHEDULING
, FALSE
);
129 args
->flags
|= NVDEF(NVOS04
, FLAGS
, CHANNEL_DENY_PHYSICAL_MODE_CE
, FALSE
);
131 args
->flags
|= NVVAL(NVOS04
, FLAGS
, CHANNEL_USERD_INDEX_VALUE
, userd_i
);
132 args
->flags
|= NVDEF(NVOS04
, FLAGS
, CHANNEL_USERD_INDEX_FIXED
, FALSE
);
133 args
->flags
|= NVVAL(NVOS04
, FLAGS
, CHANNEL_USERD_INDEX_PAGE_VALUE
, userd_p
);
134 args
->flags
|= NVDEF(NVOS04
, FLAGS
, CHANNEL_USERD_INDEX_PAGE_FIXED
, TRUE
);
136 args
->flags
|= NVDEF(NVOS04
, FLAGS
, CHANNEL_DENY_AUTH_LEVEL_PRIV
, FALSE
);
137 args
->flags
|= NVDEF(NVOS04
, FLAGS
, CHANNEL_SKIP_SCRUBBER
, FALSE
);
138 args
->flags
|= NVDEF(NVOS04
, FLAGS
, CHANNEL_CLIENT_MAP_FIFO
, FALSE
);
139 args
->flags
|= NVDEF(NVOS04
, FLAGS
, SET_EVICT_LAST_CE_PREFETCH_CHANNEL
, FALSE
);
140 args
->flags
|= NVDEF(NVOS04
, FLAGS
, CHANNEL_VGPU_PLUGIN_CONTEXT
, FALSE
);
141 args
->flags
|= NVDEF(NVOS04
, FLAGS
, CHANNEL_PBDMA_ACQUIRE_TIMEOUT
, FALSE
);
142 args
->flags
|= NVDEF(NVOS04
, FLAGS
, GROUP_CHANNEL_THREAD
, DEFAULT
);
143 args
->flags
|= NVDEF(NVOS04
, FLAGS
, MAP_CHANNEL
, FALSE
);
144 args
->flags
|= NVDEF(NVOS04
, FLAGS
, SKIP_CTXBUFFER_ALLOC
, FALSE
);
146 args
->hVASpace
= chan
->vmm
->rm
.object
.handle
;
147 args
->engineType
= eT
;
149 args
->instanceMem
.base
= chan
->inst
->addr
;
150 args
->instanceMem
.size
= chan
->inst
->size
;
151 args
->instanceMem
.addressSpace
= 2;
152 args
->instanceMem
.cacheAttrib
= 1;
154 args
->userdMem
.base
= nvkm_memory_addr(chan
->userd
.mem
) + chan
->userd
.base
;
155 args
->userdMem
.size
= fifo
->func
->chan
.func
->userd
->size
;
156 args
->userdMem
.addressSpace
= 2;
157 args
->userdMem
.cacheAttrib
= 1;
159 args
->ramfcMem
.base
= chan
->inst
->addr
+ 0;
160 args
->ramfcMem
.size
= 0x200;
161 args
->ramfcMem
.addressSpace
= 2;
162 args
->ramfcMem
.cacheAttrib
= 1;
164 args
->mthdbufMem
.base
= chan
->rm
.mthdbuf
.addr
;
165 args
->mthdbufMem
.size
= fifo
->rm
.mthdbuf_size
;
166 args
->mthdbufMem
.addressSpace
= 1;
167 args
->mthdbufMem
.cacheAttrib
= 0;
170 args
->internalFlags
= NVDEF(NV_KERNELCHANNEL
, ALLOC_INTERNALFLAGS
, PRIVILEGE
, USER
);
172 args
->internalFlags
= NVDEF(NV_KERNELCHANNEL
, ALLOC_INTERNALFLAGS
, PRIVILEGE
, ADMIN
);
173 args
->internalFlags
|= NVDEF(NV_KERNELCHANNEL
, ALLOC_INTERNALFLAGS
, ERROR_NOTIFIER_TYPE
, NONE
);
174 args
->internalFlags
|= NVDEF(NV_KERNELCHANNEL
, ALLOC_INTERNALFLAGS
, ECC_ERROR_NOTIFIER_TYPE
, NONE
);
176 ret
= nvkm_gsp_rm_alloc_wr(&chan
->rm
.object
, args
);
181 NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS
*ctrl
;
184 NVA06F_CTRL_BIND_PARAMS
*ctrl
;
186 ctrl
= nvkm_gsp_rm_ctrl_get(&chan
->rm
.object
,
187 NVA06F_CTRL_CMD_BIND
, sizeof(*ctrl
));
188 if (WARN_ON(IS_ERR(ctrl
)))
189 return PTR_ERR(ctrl
);
191 ctrl
->engineType
= eT
;
193 ret
= nvkm_gsp_rm_ctrl_wr(&chan
->rm
.object
, ctrl
);
198 ctrl
= nvkm_gsp_rm_ctrl_get(&chan
->rm
.object
,
199 NVA06F_CTRL_CMD_GPFIFO_SCHEDULE
, sizeof(*ctrl
));
200 if (WARN_ON(IS_ERR(ctrl
)))
201 return PTR_ERR(ctrl
);
204 ret
= nvkm_gsp_rm_ctrl_wr(&chan
->rm
.object
, ctrl
);
210 static const struct nvkm_chan_func_ramfc
212 .write
= r535_chan_ramfc_write
,
213 .clear
= r535_chan_ramfc_clear
,
218 struct r535_chan_userd
{
219 struct nvkm_memory
*mem
;
220 struct nvkm_memory
*map
;
224 struct list_head head
;
228 r535_chan_id_put(struct nvkm_chan
*chan
)
230 struct nvkm_runl
*runl
= chan
->cgrp
->runl
;
231 struct nvkm_fifo
*fifo
= runl
->fifo
;
232 struct r535_chan_userd
*userd
;
234 mutex_lock(&fifo
->userd
.mutex
);
235 list_for_each_entry(userd
, &fifo
->userd
.list
, head
) {
236 if (userd
->map
== chan
->userd
.mem
) {
237 u32 chid
= chan
->userd
.base
/ chan
->func
->userd
->size
;
239 userd
->used
&= ~BIT(chid
);
241 nvkm_memory_unref(&userd
->map
);
242 nvkm_memory_unref(&userd
->mem
);
243 nvkm_chid_put(runl
->chid
, userd
->chid
, &chan
->cgrp
->lock
);
244 list_del(&userd
->head
);
250 mutex_unlock(&fifo
->userd
.mutex
);
255 r535_chan_id_get_locked(struct nvkm_chan
*chan
, struct nvkm_memory
*muserd
, u64 ouserd
)
257 const u32 userd_size
= CHID_PER_USERD
* chan
->func
->userd
->size
;
258 struct nvkm_runl
*runl
= chan
->cgrp
->runl
;
259 struct nvkm_fifo
*fifo
= runl
->fifo
;
260 struct r535_chan_userd
*userd
;
264 if (ouserd
+ chan
->func
->userd
->size
>= userd_size
||
265 (ouserd
& (chan
->func
->userd
->size
- 1))) {
266 RUNL_DEBUG(runl
, "ouserd %llx", ouserd
);
270 chid
= div_u64(ouserd
, chan
->func
->userd
->size
);
272 list_for_each_entry(userd
, &fifo
->userd
.list
, head
) {
273 if (userd
->mem
== muserd
) {
274 if (userd
->used
& BIT(chid
))
280 if (&userd
->head
== &fifo
->userd
.list
) {
281 if (nvkm_memory_size(muserd
) < userd_size
) {
282 RUNL_DEBUG(runl
, "userd too small");
286 userd
= kzalloc(sizeof(*userd
), GFP_KERNEL
);
290 userd
->chid
= nvkm_chid_get(runl
->chid
, chan
);
291 if (userd
->chid
< 0) {
297 userd
->mem
= nvkm_memory_ref(muserd
);
299 ret
= nvkm_memory_kmap(userd
->mem
, &userd
->map
);
301 nvkm_chid_put(runl
->chid
, userd
->chid
, &chan
->cgrp
->lock
);
307 list_add(&userd
->head
, &fifo
->userd
.list
);
310 userd
->used
|= BIT(chid
);
312 chan
->userd
.mem
= nvkm_memory_ref(userd
->map
);
313 chan
->userd
.base
= ouserd
;
315 return (userd
->chid
* CHID_PER_USERD
) + chid
;
319 r535_chan_id_get(struct nvkm_chan
*chan
, struct nvkm_memory
*muserd
, u64 ouserd
)
321 struct nvkm_fifo
*fifo
= chan
->cgrp
->runl
->fifo
;
324 mutex_lock(&fifo
->userd
.mutex
);
325 ret
= r535_chan_id_get_locked(chan
, muserd
, ouserd
);
326 mutex_unlock(&fifo
->userd
.mutex
);
330 static const struct nvkm_chan_func
332 .id_get
= r535_chan_id_get
,
333 .id_put
= r535_chan_id_put
,
334 .inst
= &gf100_chan_inst
,
335 .userd
= &gv100_chan_userd
,
336 .ramfc
= &r535_chan_ramfc
,
337 .start
= r535_chan_start
,
338 .stop
= r535_chan_stop
,
339 .doorbell_handle
= r535_chan_doorbell_handle
,
342 static const struct nvkm_cgrp_func
347 r535_engn_nonstall(struct nvkm_engn
*engn
)
349 struct nvkm_subdev
*subdev
= &engn
->engine
->subdev
;
352 ret
= nvkm_gsp_intr_nonstall(subdev
->device
->gsp
, subdev
->type
, subdev
->inst
);
357 static const struct nvkm_engn_func
359 .nonstall
= r535_engn_nonstall
,
363 r535_gr_ctor(struct nvkm_engn
*engn
, struct nvkm_vctx
*vctx
, struct nvkm_chan
*chan
)
365 /* RM requires GR context buffers to remain mapped until after the
366 * channel has been destroyed (as opposed to after the last gr obj
369 * Take an extra ref here, which will be released once the channel
370 * object has been deleted.
372 refcount_inc(&vctx
->refs
);
373 chan
->rm
.grctx
= vctx
;
377 static const struct nvkm_engn_func
379 .nonstall
= r535_engn_nonstall
,
380 .ctor2
= r535_gr_ctor
,
384 r535_flcn_bind(struct nvkm_engn
*engn
, struct nvkm_vctx
*vctx
, struct nvkm_chan
*chan
)
386 struct nvkm_gsp_client
*client
= &chan
->vmm
->rm
.client
;
387 NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS
*ctrl
;
389 ctrl
= nvkm_gsp_rm_ctrl_get(&chan
->vmm
->rm
.device
.subdevice
,
390 NV2080_CTRL_CMD_GPU_PROMOTE_CTX
, sizeof(*ctrl
));
392 return PTR_ERR(ctrl
);
394 ctrl
->hClient
= client
->object
.handle
;
395 ctrl
->hObject
= chan
->rm
.object
.handle
;
396 ctrl
->hChanClient
= client
->object
.handle
;
397 ctrl
->virtAddress
= vctx
->vma
->addr
;
398 ctrl
->size
= vctx
->inst
->size
;
399 ctrl
->engineType
= engn
->id
;
400 ctrl
->ChID
= chan
->id
;
402 return nvkm_gsp_rm_ctrl_wr(&chan
->vmm
->rm
.device
.subdevice
, ctrl
);
406 r535_flcn_ctor(struct nvkm_engn
*engn
, struct nvkm_vctx
*vctx
, struct nvkm_chan
*chan
)
410 if (WARN_ON(!engn
->rm
.size
))
413 ret
= nvkm_gpuobj_new(engn
->engine
->subdev
.device
, engn
->rm
.size
, 0, true, NULL
,
418 ret
= nvkm_vmm_get(vctx
->vmm
, 12, vctx
->inst
->size
, &vctx
->vma
);
422 ret
= nvkm_memory_map(vctx
->inst
, 0, vctx
->vmm
, vctx
->vma
, NULL
, 0);
426 return r535_flcn_bind(engn
, vctx
, chan
);
429 static const struct nvkm_engn_func
431 .nonstall
= r535_engn_nonstall
,
432 .ctor2
= r535_flcn_ctor
,
436 r535_runl_allow(struct nvkm_runl
*runl
, u32 engm
)
441 r535_runl_block(struct nvkm_runl
*runl
, u32 engm
)
445 static const struct nvkm_runl_func
447 .block
= r535_runl_block
,
448 .allow
= r535_runl_allow
,
452 r535_fifo_2080_type(enum nvkm_subdev_type type
, int inst
)
455 case NVKM_ENGINE_GR
: return NV2080_ENGINE_TYPE_GR0
;
456 case NVKM_ENGINE_CE
: return NV2080_ENGINE_TYPE_COPY0
+ inst
;
457 case NVKM_ENGINE_SEC2
: return NV2080_ENGINE_TYPE_SEC2
;
458 case NVKM_ENGINE_NVDEC
: return NV2080_ENGINE_TYPE_NVDEC0
+ inst
;
459 case NVKM_ENGINE_NVENC
: return NV2080_ENGINE_TYPE_NVENC0
+ inst
;
460 case NVKM_ENGINE_NVJPG
: return NV2080_ENGINE_TYPE_NVJPEG0
+ inst
;
461 case NVKM_ENGINE_OFA
: return NV2080_ENGINE_TYPE_OFA
;
462 case NVKM_ENGINE_SW
: return NV2080_ENGINE_TYPE_SW
;
472 r535_fifo_engn_type(RM_ENGINE_TYPE rm
, enum nvkm_subdev_type
*ptype
)
475 case RM_ENGINE_TYPE_GR0
:
476 *ptype
= NVKM_ENGINE_GR
;
478 case RM_ENGINE_TYPE_COPY0
...RM_ENGINE_TYPE_COPY9
:
479 *ptype
= NVKM_ENGINE_CE
;
480 return rm
- RM_ENGINE_TYPE_COPY0
;
481 case RM_ENGINE_TYPE_NVDEC0
...RM_ENGINE_TYPE_NVDEC7
:
482 *ptype
= NVKM_ENGINE_NVDEC
;
483 return rm
- RM_ENGINE_TYPE_NVDEC0
;
484 case RM_ENGINE_TYPE_NVENC0
...RM_ENGINE_TYPE_NVENC2
:
485 *ptype
= NVKM_ENGINE_NVENC
;
486 return rm
- RM_ENGINE_TYPE_NVENC0
;
487 case RM_ENGINE_TYPE_SW
:
488 *ptype
= NVKM_ENGINE_SW
;
490 case RM_ENGINE_TYPE_SEC2
:
491 *ptype
= NVKM_ENGINE_SEC2
;
493 case RM_ENGINE_TYPE_NVJPEG0
...RM_ENGINE_TYPE_NVJPEG7
:
494 *ptype
= NVKM_ENGINE_NVJPG
;
495 return rm
- RM_ENGINE_TYPE_NVJPEG0
;
496 case RM_ENGINE_TYPE_OFA
:
497 *ptype
= NVKM_ENGINE_OFA
;
505 r535_fifo_ectx_size(struct nvkm_fifo
*fifo
)
507 NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS
*ctrl
;
508 struct nvkm_gsp
*gsp
= fifo
->engine
.subdev
.device
->gsp
;
509 struct nvkm_runl
*runl
;
510 struct nvkm_engn
*engn
;
512 ctrl
= nvkm_gsp_rm_ctrl_rd(&gsp
->internal
.device
.subdevice
,
513 NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO
,
515 if (WARN_ON(IS_ERR(ctrl
)))
516 return PTR_ERR(ctrl
);
518 for (int i
= 0; i
< ctrl
->numConstructedFalcons
; i
++) {
519 nvkm_runl_foreach(runl
, fifo
) {
520 nvkm_runl_foreach_engn(engn
, runl
) {
521 if (engn
->rm
.desc
== ctrl
->constructedFalconsTable
[i
].engDesc
) {
523 ctrl
->constructedFalconsTable
[i
].ctxBufferSize
;
530 nvkm_gsp_rm_ctrl_done(&gsp
->internal
.device
.subdevice
, ctrl
);
535 r535_fifo_runl_ctor(struct nvkm_fifo
*fifo
)
537 struct nvkm_subdev
*subdev
= &fifo
->engine
.subdev
;
538 struct nvkm_gsp
*gsp
= subdev
->device
->gsp
;
539 struct nvkm_runl
*runl
;
540 struct nvkm_engn
*engn
;
542 u32 chids
= 2048 / CHID_PER_USERD
;
544 NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS
*ctrl
;
546 if ((ret
= nvkm_chid_new(&nvkm_chan_event
, subdev
, cgids
, 0, cgids
, &fifo
->cgid
)) ||
547 (ret
= nvkm_chid_new(&nvkm_chan_event
, subdev
, chids
, 0, chids
, &fifo
->chid
)))
550 ctrl
= nvkm_gsp_rm_ctrl_rd(&gsp
->internal
.device
.subdevice
,
551 NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE
, sizeof(*ctrl
));
552 if (WARN_ON(IS_ERR(ctrl
)))
553 return PTR_ERR(ctrl
);
555 for (int i
= 0; i
< ctrl
->numEntries
; i
++) {
556 const u32 addr
= ctrl
->entries
[i
].engineData
[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE
];
557 const u32 id
= ctrl
->entries
[i
].engineData
[ENGINE_INFO_TYPE_RUNLIST
];
559 runl
= nvkm_runl_get(fifo
, id
, addr
);
561 runl
= nvkm_runl_new(fifo
, id
, addr
, 0);
562 if (WARN_ON(IS_ERR(runl
)))
567 for (int i
= 0; i
< ctrl
->numEntries
; i
++) {
568 const u32 addr
= ctrl
->entries
[i
].engineData
[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE
];
569 const u32 rmid
= ctrl
->entries
[i
].engineData
[ENGINE_INFO_TYPE_RM_ENGINE_TYPE
];
570 const u32 id
= ctrl
->entries
[i
].engineData
[ENGINE_INFO_TYPE_RUNLIST
];
571 enum nvkm_subdev_type type
;
574 runl
= nvkm_runl_get(fifo
, id
, addr
);
578 inst
= r535_fifo_engn_type(rmid
, &type
);
580 nvkm_warn(subdev
, "RM_ENGINE_TYPE 0x%x\n", rmid
);
585 nv2080
= r535_fifo_2080_type(type
, inst
);
593 engn
= nvkm_runl_add(runl
, nv2080
, &r535_ce
, type
, inst
);
596 engn
= nvkm_runl_add(runl
, nv2080
, &r535_gr
, type
, inst
);
598 case NVKM_ENGINE_NVDEC
:
599 case NVKM_ENGINE_NVENC
:
600 case NVKM_ENGINE_NVJPG
:
601 case NVKM_ENGINE_OFA
:
602 engn
= nvkm_runl_add(runl
, nv2080
, &r535_flcn
, type
, inst
);
616 engn
->rm
.desc
= ctrl
->entries
[i
].engineData
[ENGINE_INFO_TYPE_ENG_DESC
];
619 nvkm_gsp_rm_ctrl_done(&gsp
->internal
.device
.subdevice
, ctrl
);
622 NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS
*ctrl
;
624 ctrl
= nvkm_gsp_rm_ctrl_rd(&gsp
->internal
.device
.subdevice
,
625 NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE
,
628 return PTR_ERR(ctrl
);
630 fifo
->rm
.mthdbuf_size
= ctrl
->size
;
632 nvkm_gsp_rm_ctrl_done(&gsp
->internal
.device
.subdevice
, ctrl
);
635 return r535_fifo_ectx_size(fifo
);
639 r535_fifo_dtor(struct nvkm_fifo
*fifo
)
645 r535_fifo_new(const struct nvkm_fifo_func
*hw
, struct nvkm_device
*device
,
646 enum nvkm_subdev_type type
, int inst
, struct nvkm_fifo
**pfifo
)
648 struct nvkm_fifo_func
*rm
;
650 if (!(rm
= kzalloc(sizeof(*rm
), GFP_KERNEL
)))
653 rm
->dtor
= r535_fifo_dtor
;
654 rm
->runl_ctor
= r535_fifo_runl_ctor
;
655 rm
->runl
= &r535_runl
;
657 rm
->cgrp
.func
= &r535_cgrp
;
659 rm
->chan
.func
= &r535_chan
;
660 rm
->nonstall
= &ga100_fifo_nonstall
;
661 rm
->nonstall_ctor
= ga100_fifo_nonstall_ctor
;
663 return nvkm_fifo_new_(rm
, device
, type
, inst
, pfifo
);