1 From: Bruce Chang<BruceChang@via.com.tw>
2 Subject: add Via chrome9 drm support
5 Signed-off-by: Bruce Chang<BruceChang@via.com.tw>
6 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
9 drivers/gpu/drm/Kconfig | 7
10 drivers/gpu/drm/Makefile | 1
11 drivers/gpu/drm/via_chrome9/Makefile | 8
12 drivers/gpu/drm/via_chrome9/via_chrome9_3d_reg.h | 407 ++++++
13 drivers/gpu/drm/via_chrome9/via_chrome9_dma.c | 1285 +++++++++++++++++++++
14 drivers/gpu/drm/via_chrome9/via_chrome9_dma.h | 69 +
15 drivers/gpu/drm/via_chrome9/via_chrome9_drm.c | 950 +++++++++++++++
16 drivers/gpu/drm/via_chrome9/via_chrome9_drm.h | 443 +++++++
17 drivers/gpu/drm/via_chrome9/via_chrome9_drv.c | 224 +++
18 drivers/gpu/drm/via_chrome9/via_chrome9_drv.h | 150 ++
19 drivers/gpu/drm/via_chrome9/via_chrome9_mm.c | 435 +++++++
20 drivers/gpu/drm/via_chrome9/via_chrome9_mm.h | 67 +
21 drivers/gpu/drm/via_chrome9/via_chrome9_verifier.c | 982 ++++++++++++++++
22 drivers/gpu/drm/via_chrome9/via_chrome9_verifier.h | 61
23 14 files changed, 5089 insertions(+)
25 --- a/drivers/gpu/drm/Kconfig
26 +++ b/drivers/gpu/drm/Kconfig
27 @@ -99,6 +99,13 @@ config DRM_VIA
28 Choose this option if you have a Via unichrome or compatible video
29 chipset. If M is selected the module will be called via.
31 +config DRM_VIA_CHROME9
32 + tristate "Via chrome9 video cards"
35 + Choose this option if you have a Via chrome9 or compatible video
36 + chipset. If M is selected the module will be called via_chrome9.
39 tristate "Savage video cards"
41 --- a/drivers/gpu/drm/Makefile
42 +++ b/drivers/gpu/drm/Makefile
43 @@ -23,4 +23,5 @@ obj-$(CONFIG_DRM_I915) += i915/
44 obj-$(CONFIG_DRM_SIS) += sis/
45 obj-$(CONFIG_DRM_SAVAGE)+= savage/
46 obj-$(CONFIG_DRM_VIA) +=via/
47 +obj-$(CONFIG_DRM_VIA_CHROME9) +=via_chrome9/
50 +++ b/drivers/gpu/drm/via_chrome9/Makefile
53 +# Makefile for the drm device driver. This driver provides support for the
54 +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
56 +ccflags-y := -Iinclude/drm
57 +via_chrome9-y := via_chrome9_drv.o via_chrome9_drm.o via_chrome9_mm.o via_chrome9_dma.o via_chrome9_verifier.o
59 +obj-$(CONFIG_DRM_VIA_CHROME9) += via_chrome9.o
60 \ No newline at end of file
62 +++ b/drivers/gpu/drm/via_chrome9/via_chrome9_3d_reg.h
65 + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
66 + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
68 + * Permission is hereby granted, free of charge, to any person
69 + * obtaining a copy of this software and associated documentation
70 + * files (the "Software"), to deal in the Software without
71 + * restriction, including without limitation the rights to use,
72 + * copy, modify, merge, publish, distribute, sub license,
73 + * and/or sell copies of the Software, and to permit persons to
74 + * whom the Software is furnished to do so, subject to the
75 + * following conditions:
77 + * The above copyright notice and this permission notice
78 + * (including the next paragraph) shall be included in all
79 + * copies or substantial portions of the Software.
81 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
82 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
83 + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
84 + * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
85 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
86 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
87 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
88 + * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
91 +#ifndef VIA_CHROME9_3D_REG_H
92 +#define VIA_CHROME9_3D_REG_H
93 +#define GetMMIORegister(base, offset) \
94 + (*(__volatile__ unsigned int *)(void *)(((unsigned char *)(base)) + \
96 +#define SetMMIORegister(base, offset, val) \
97 + (*(__volatile__ unsigned int *)(void *)(((unsigned char *)(base)) + \
100 +#define GetMMIORegisterU8(base, offset) \
101 + (*(__volatile__ unsigned char *)(void *)(((unsigned char *)(base)) + \
103 +#define SetMMIORegisterU8(base, offset, val) \
104 + (*(__volatile__ unsigned char *)(void *)(((unsigned char *)(base)) + \
107 +#define BCI_SEND(bci, value) (*(bci)++ = (unsigned long)(value))
108 +#define BCI_SET_STREAM_REGISTER(bci_base, bci_index, reg_value) \
110 + unsigned long cmd; \
112 + cmd = (0x90000000 \
113 + | (1<<16) /* stream processor register */ \
114 + | (bci_index & 0x3FFC)); /* MMIO register address */ \
115 + BCI_SEND(bci_base, cmd); \
116 + BCI_SEND(bci_base, reg_value); \
119 +/* Command Header Type */
121 +#define INV_AGPHeader0 0xFE000000
122 +#define INV_AGPHeader1 0xFE010000
123 +#define INV_AGPHeader2 0xFE020000
124 +#define INV_AGPHeader3 0xFE030000
125 +#define INV_AGPHeader4 0xFE040000
126 +#define INV_AGPHeader5 0xFE050000
127 +#define INV_AGPHeader6 0xFE060000
128 +#define INV_AGPHeader7 0xFE070000
129 +#define INV_AGPHeader82 0xFE820000
130 +#define INV_AGPHeader_MASK 0xFFFF0000
131 +#define INV_DUMMY_MASK 0xFF000000
133 +/*send pause address of AGP ring command buffer via_chrome9 this IO port*/
134 +#define INV_REG_PCIPAUSE 0x294
135 +#define INV_REG_PCIPAUSE_ENABLE 0x4
137 +#define INV_CMDBUF_THRESHOLD (8)
138 +#define INV_QW_PAUSE_ALIGN 0x40
140 +/* Transmission IO Space*/
141 +#define INV_REG_CR_TRANS 0x041C
142 +#define INV_REG_CR_BEGIN 0x0420
143 +#define INV_REG_CR_END 0x0438
145 +#define INV_REG_3D_TRANS 0x043C
146 +#define INV_REG_3D_BEGIN 0x0440
147 +#define INV_REG_3D_END 0x06FC
148 +#define INV_REG_23D_WAIT 0x326C
149 +/*3D / 2D ID Control (Only For Group A)*/
150 +#define INV_REG_2D3D_ID_CTRL 0x060
155 +#define INV_RB_ENG_STATUS 0x0400
156 +#define INV_ENG_BUSY_HQV0 0x00040000
157 +#define INV_ENG_BUSY_HQV1 0x00020000
158 +#define INV_ENG_BUSY_CR 0x00000010
159 +#define INV_ENG_BUSY_MPEG 0x00000008
160 +#define INV_ENG_BUSY_VQ 0x00000004
161 +#define INV_ENG_BUSY_2D 0x00000002
162 +#define INV_ENG_BUSY_3D 0x00001FE1
163 +#define INV_ENG_BUSY_ALL \
164 + (INV_ENG_BUSY_2D | INV_ENG_BUSY_3D | INV_ENG_BUSY_CR)
166 +/* Command Queue Status*/
167 +#define INV_RB_VQ_STATUS 0x0448
168 +#define INV_VQ_FULL 0x40000000
170 +/* AGP command buffer pointer current position*/
171 +#define INV_RB_AGPCMD_CURRADDR 0x043C
173 +/* AGP command buffer status*/
174 +#define INV_RB_AGPCMD_STATUS 0x0444
175 +#define INV_AGPCMD_InPause 0x80000000
177 +/*AGP command buffer pause address*/
178 +#define INV_RB_AGPCMD_PAUSEADDR 0x045C
180 +/*AGP command buffer jump address*/
181 +#define INV_RB_AGPCMD_JUMPADDR 0x0460
183 +/*AGP command buffer start address*/
184 +#define INV_RB_AGPCMD_STARTADDR 0x0464
188 +#define NUMBER_OF_EVENT_TAGS 1024
189 +#define NUMBER_OF_APERTURES_CLB 16
191 +/* Register definition */
192 +#define HW_SHADOW_ADDR 0x8520
193 +#define HW_GARTTABLE_ADDR 0x8540
195 +#define INV_HSWFlag_DBGMASK 0x00000FFF
196 +#define INV_HSWFlag_ENCODEMASK 0x007FFFF0
197 +#define INV_HSWFlag_ADDRSHFT 8
198 +#define INV_HSWFlag_DECODEMASK \
199 + (INV_HSWFlag_ENCODEMASK << INV_HSWFlag_ADDRSHFT)
200 +#define INV_HSWFlag_ADDR_ENCODE(x) 0xCC000000
201 +#define INV_HSWFlag_ADDR_DECODE(x) \
202 + (((unsigned int)x & INV_HSWFlag_DECODEMASK) >> INV_HSWFlag_ADDRSHFT)
205 +#define INV_SubA_HAGPBstL 0x60000000
206 +#define INV_SubA_HAGPBstH 0x61000000
207 +#define INV_SubA_HAGPBendL 0x62000000
208 +#define INV_SubA_HAGPBendH 0x63000000
209 +#define INV_SubA_HAGPBpL 0x64000000
210 +#define INV_SubA_HAGPBpID 0x65000000
211 +#define INV_HAGPBpID_PAUSE 0x00000000
212 +#define INV_HAGPBpID_JUMP 0x00000100
213 +#define INV_HAGPBpID_STOP 0x00000200
215 +#define INV_HAGPBpH_MASK 0x000000FF
216 +#define INV_HAGPBpH_SHFT 0
218 +#define INV_SubA_HAGPBjumpL 0x66000000
219 +#define INV_SubA_HAGPBjumpH 0x67000000
220 +#define INV_HAGPBjumpH_MASK 0x000000FF
221 +#define INV_HAGPBjumpH_SHFT 0
223 +#define INV_SubA_HFthRCM 0x68000000
224 +#define INV_HFthRCM_MASK 0x003F0000
225 +#define INV_HFthRCM_SHFT 16
226 +#define INV_HFthRCM_8 0x00080000
227 +#define INV_HFthRCM_10 0x000A0000
228 +#define INV_HFthRCM_18 0x00120000
229 +#define INV_HFthRCM_24 0x00180000
230 +#define INV_HFthRCM_32 0x00200000
232 +#define INV_HAGPBClear 0x00000008
234 +#define INV_HRSTTrig_RestoreAGP 0x00000004
235 +#define INV_HRSTTrig_RestoreAll 0x00000002
236 +#define INV_HAGPBTrig 0x00000001
238 +#define INV_ParaSubType_MASK 0xff000000
239 +#define INV_ParaType_MASK 0x00ff0000
240 +#define INV_ParaOS_MASK 0x0000ff00
241 +#define INV_ParaAdr_MASK 0x000000ff
242 +#define INV_ParaSubType_SHIFT 24
243 +#define INV_ParaType_SHIFT 16
244 +#define INV_ParaOS_SHIFT 8
245 +#define INV_ParaAdr_SHIFT 0
247 +#define INV_ParaType_Vdata 0x00000000
248 +#define INV_ParaType_Attr 0x00010000
249 +#define INV_ParaType_Tex 0x00020000
250 +#define INV_ParaType_Pal 0x00030000
251 +#define INV_ParaType_FVF 0x00040000
252 +#define INV_ParaType_PreCR 0x00100000
253 +#define INV_ParaType_CR 0x00110000
254 +#define INV_ParaType_Cfg 0x00fe0000
255 +#define INV_ParaType_Dummy 0x00300000
257 +#define INV_SubType_Tex0 0x00000000
258 +#define INV_SubType_Tex1 0x00000001
259 +#define INV_SubType_Tex2 0x00000002
260 +#define INV_SubType_Tex3 0x00000003
261 +#define INV_SubType_Tex4 0x00000004
262 +#define INV_SubType_Tex5 0x00000005
263 +#define INV_SubType_Tex6 0x00000006
264 +#define INV_SubType_Tex7 0x00000007
265 +#define INV_SubType_General 0x000000fe
266 +#define INV_SubType_TexSample 0x00000020
268 +#define INV_HWBasL_MASK 0x00FFFFFF
269 +#define INV_HWBasH_MASK 0xFF000000
270 +#define INV_HWBasH_SHFT 24
271 +#define INV_HWBasL(x) ((unsigned int)(x) & INV_HWBasL_MASK)
272 +#define INV_HWBasH(x) ((unsigned int)(x) >> INV_HWBasH_SHFT)
273 +#define INV_HWBas256(x) ((unsigned int)(x) >> 8)
274 +#define INV_HWPit32(x) ((unsigned int)(x) >> 5)
276 +/* Read Back Register Setting */
277 +#define INV_SubA_HSetRBGID 0x02000000
278 +#define INV_HSetRBGID_CR 0x00000000
279 +#define INV_HSetRBGID_FE 0x00000001
280 +#define INV_HSetRBGID_PE 0x00000002
281 +#define INV_HSetRBGID_RC 0x00000003
282 +#define INV_HSetRBGID_PS 0x00000004
283 +#define INV_HSetRBGID_XE 0x00000005
284 +#define INV_HSetRBGID_BE 0x00000006
287 +struct drm_clb_event_tag_info {
288 + unsigned int *linear_address;
289 + unsigned int *event_tag_linear_address;
290 + int usage[NUMBER_OF_EVENT_TAGS];
291 + unsigned int pid[NUMBER_OF_EVENT_TAGS];
294 +static inline int is_agp_header(unsigned int data)
296 + switch (data & INV_AGPHeader_MASK) {
297 + case INV_AGPHeader0:
298 + case INV_AGPHeader1:
299 + case INV_AGPHeader2:
300 + case INV_AGPHeader3:
301 + case INV_AGPHeader4:
302 + case INV_AGPHeader5:
303 + case INV_AGPHeader6:
304 + case INV_AGPHeader7:
312 +#define ADDCmdHeader0_INVI(pCmd, dwCount) \
314 + /* 4 unsigned int align, insert NULL Command for padding */ \
315 + while (((unsigned long *)(pCmd)) & 0xF) { \
316 + *(pCmd)++ = 0xCC000000; \
318 + *(pCmd)++ = INV_AGPHeader0; \
319 + *(pCmd)++ = (dwCount); \
321 + *(pCmd)++ = (unsigned int)INV_HSWFlag_ADDR_ENCODE(pCmd); \
325 +#define ADDCmdHeader1_INVI(pCmd, dwAddr, dwCount) \
327 + /* 4 unsigned int align, insert NULL Command for padding */ \
328 + while (((unsigned long *)(pCmd)) & 0xF) { \
329 + *(pCmd)++ = 0xCC000000; \
331 + *(pCmd)++ = INV_AGPHeader1 | (dwAddr); \
332 + *(pCmd)++ = (dwCount); \
334 + *(pCmd)++ = (unsigned int)INV_HSWFlag_ADDR_ENCODE(pCmd); \
337 +/* Header2: CR/3D */
338 +#define ADDCmdHeader2_INVI(pCmd, dwAddr, dwType) \
340 + /* 4 unsigned int align, insert NULL Command for padding */ \
341 + while (((unsigned int)(pCmd)) & 0xF) { \
342 + *(pCmd)++ = 0xCC000000; \
344 + *(pCmd)++ = INV_AGPHeader2 | ((dwAddr)+4); \
345 + *(pCmd)++ = (dwAddr); \
346 + *(pCmd)++ = (dwType); \
347 + *(pCmd)++ = (unsigned int)INV_HSWFlag_ADDR_ENCODE(pCmd); \
350 +/* Header2: CR/3D with SW Flag */
351 +#define ADDCmdHeader2_SWFlag_INVI(pCmd, dwAddr, dwType, dwSWFlag) \
353 + /* 4 unsigned int align, insert NULL Command for padding */ \
354 + while (((unsigned long *)(pCmd)) & 0xF) { \
355 + *(pCmd)++ = 0xCC000000; \
357 + *(pCmd)++ = INV_AGPHeader2 | ((dwAddr)+4); \
358 + *(pCmd)++ = (dwAddr); \
359 + *(pCmd)++ = (dwType); \
360 + *(pCmd)++ = (dwSWFlag); \
365 +#define ADDCmdHeader3_INVI(pCmd, dwType, dwStart, dwCount) \
367 + /* 4 unsigned int align, insert NULL Command for padding */ \
368 + while (((unsigned long *)(pCmd)) & 0xF) { \
369 + *(pCmd)++ = 0xCC000000; \
371 + *(pCmd)++ = INV_AGPHeader3 | INV_REG_3D_TRANS; \
372 + *(pCmd)++ = (dwCount); \
373 + *(pCmd)++ = (dwType) | ((dwStart) & 0xFFFF); \
374 + *(pCmd)++ = (unsigned int)INV_HSWFlag_ADDR_ENCODE(pCmd); \
377 +/* Header3: 3D with SW Flag */
378 +#define ADDCmdHeader3_SWFlag_INVI(pCmd, dwType, dwStart, dwSWFlag, dwCount) \
380 + /* 4 unsigned int align, insert NULL Command for padding */ \
381 + while (((unsigned long *)(pCmd)) & 0xF) { \
382 + *(pCmd)++ = 0xCC000000; \
384 + *(pCmd)++ = INV_AGPHeader3 | INV_REG_3D_TRANS; \
385 + *(pCmd)++ = (dwCount); \
386 + *(pCmd)++ = (dwType) | ((dwStart) & 0xFFFF); \
387 + *(pCmd)++ = (dwSWFlag); \
391 +#define ADDCmdHeader4_INVI(pCmd, dwAddr, dwCount, id) \
393 + /* 4 unsigned int align, insert NULL Command for padding */ \
394 + while (((unsigned long *)(pCmd)) & 0xF) { \
395 + *(pCmd)++ = 0xCC000000; \
397 + *(pCmd)++ = INV_AGPHeader4 | (dwAddr); \
398 + *(pCmd)++ = (dwCount); \
399 + *(pCmd)++ = (id); \
404 +#define ADDCmdHeader5_INVI(pCmd, dwQWcount, id) \
406 + /* 4 unsigned int align, insert NULL Command for padding */ \
407 + while (((unsigned long *)(pCmd)) & 0xF) { \
408 + *(pCmd)++ = 0xCC000000; \
410 + *(pCmd)++ = INV_AGPHeader5; \
411 + *(pCmd)++ = (dwQWcount); \
412 + *(pCmd)++ = (id); \
416 +/* Header6: DEBUG */
417 +#define ADDCmdHeader6_INVI(pCmd) \
419 + /* 4 unsigned int align, insert NULL Command for padding */ \
420 + while (((unsigned long *)(pCmd)) & 0xF) { \
421 + *(pCmd)++ = 0xCC000000; \
423 + *(pCmd)++ = INV_AGPHeader6; \
430 +#define ADDCmdHeader7_INVI(pCmd, dwQWcount, id) \
432 + /* 4 unsigned int align, insert NULL Command for padding */ \
433 + while (((unsigned long *)(pCmd)) & 0xF) { \
434 + *(pCmd)++ = 0xCC000000; \
436 + *(pCmd)++ = INV_AGPHeader7; \
437 + *(pCmd)++ = (dwQWcount); \
438 + *(pCmd)++ = (id); \
442 +/* Header82: Branch buffer */
443 +#define ADDCmdHeader82_INVI(pCmd, dwAddr, dwType); \
445 + /* 4 unsigned int align, insert NULL Command for padding */ \
446 + while (((unsigned long *)(pCmd)) & 0xF) { \
447 + *(pCmd)++ = 0xCC000000; \
449 + *(pCmd)++ = INV_AGPHeader82 | ((dwAddr)+4); \
450 + *(pCmd)++ = (dwAddr); \
451 + *(pCmd)++ = (dwType); \
452 + *(pCmd)++ = 0xCC000000; \
456 +#define ADD2DCmd_INVI(pCmd, dwAddr, dwCmd) \
458 + *(pCmd)++ = (dwAddr); \
459 + *(pCmd)++ = (dwCmd); \
462 +#define ADDCmdData_INVI(pCmd, dwCmd) (*(pCmd)++ = (dwCmd))
464 +#define ADDCmdDataStream_INVI(pCmdBuf, pCmd, dwCount) \
466 + memcpy((pCmdBuf), (pCmd), ((dwCount)<<2)); \
467 + (pCmdBuf) += (dwCount); \
472 +++ b/drivers/gpu/drm/via_chrome9/via_chrome9_dma.c
475 + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
476 + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
478 + * Permission is hereby granted, free of charge, to any person
479 + * obtaining a copy of this software and associated documentation
480 + * files (the "Software"), to deal in the Software without
481 + * restriction, including without limitation the rights to use,
482 + * copy, modify, merge, publish, distribute, sub license,
483 + * and/or sell copies of the Software, and to permit persons to
484 + * whom the Software is furnished to do so, subject to the
485 + * following conditions:
487 + * The above copyright notice and this permission notice
488 + * (including the next paragraph) shall be included in all
489 + * copies or substantial portions of the Software.
491 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
492 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
493 + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
494 + * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
495 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
496 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
497 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
498 + * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
503 +#include "via_chrome9_drm.h"
504 +#include "via_chrome9_drv.h"
505 +#include "via_chrome9_3d_reg.h"
506 +#include "via_chrome9_dma.h"
508 +#define NULLCOMMANDNUMBER 256
509 +unsigned int NULL_COMMAND_INV[4] =
510 + { 0xCC000000, 0xCD000000, 0xCE000000, 0xCF000000 };
513 +via_chrome9ke_assert(int a)
518 +ProtectSizeValue(unsigned int size)
521 + for (i = 0; i < 8; i++)
522 + if ((size > (1 << (i + 12)))
523 + && (size <= (1 << (i + 13))))
528 +void via_chrome9_dma_init_inv(struct drm_device *dev)
530 + struct drm_via_chrome9_private *dev_priv =
531 + (struct drm_via_chrome9_private *)dev->dev_private;
532 + struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
533 + dev_priv->dma_manager;
535 + if (dev_priv->chip_sub_index == CHIP_H6S2) {
536 + unsigned int *pGARTTable;
537 + unsigned int i, entries, GARTOffset;
538 + unsigned char sr6a, sr6b, sr6c, sr6f, sr7b;
539 + unsigned int *addrlinear;
540 + unsigned int size, alignedoffset;
542 + entries = dev_priv->pagetable_map.pagetable_size /
543 + sizeof(unsigned int);
544 + pGARTTable = dev_priv->pagetable_map.pagetable_handle;
546 + GARTOffset = dev_priv->pagetable_map.pagetable_offset;
548 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
549 + sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
551 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
553 + sr6a = (unsigned char)((GARTOffset & 0xff000) >> 12);
554 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6a);
555 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6a);
557 + sr6b = (unsigned char)((GARTOffset & 0xff00000) >> 20);
558 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6b);
559 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6b);
561 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
562 + sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
563 + sr6c |= ((unsigned char)((GARTOffset >> 28) & 0x01));
564 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
566 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x7b);
567 + sr7b = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
569 + sr7b |= ProtectSizeValue(dev_priv->
570 + pagetable_map.pagetable_size);
571 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr7b);
573 + for (i = 0; i < entries; i++)
574 + writel(0x80000000, pGARTTable+i);
577 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6f);
579 + sr6f = GetMMIORegisterU8(dev_priv->mmio->handle,
581 + } while (sr6f & 0x80);
584 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6f);
586 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
587 + sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
589 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
591 + if (dev_priv->drm_agp_type != DRM_AGP_DISABLED) {
592 + size = lpcmDMAManager->DMASize * sizeof(unsigned int) +
593 + dev_priv->agp_size;
595 + entries = (size + PAGE_SIZE - 1) / PAGE_SIZE;
597 + (unsigned int *)dev_priv->pcie_vmalloc_nocache;
599 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
601 + GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
603 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
605 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6f);
607 + sr6f = GetMMIORegisterU8(dev_priv->mmio->handle,
609 + } while (sr6f & 0x80);
611 + for (i = 0; i < entries; i++)
612 + writel(page_to_pfn(vmalloc_to_page(
613 + (void *)addrlinear + PAGE_SIZE * i)) &
614 + 0x3fffffff, pGARTTable + i + alignedoffset);
617 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6f);
619 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
621 + GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
623 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
628 + if (dev_priv->drm_agp_type == DRM_AGP_DOUBLE_BUFFER)
629 + SetAGPDoubleCmd_inv(dev);
630 + else if (dev_priv->drm_agp_type == DRM_AGP_RING_BUFFER)
631 + SetAGPRingCmdRegs_inv(dev);
637 +InitPCIEGART(struct drm_via_chrome9_private *dev_priv)
639 + unsigned int *pGARTTable;
640 + unsigned int i, entries, GARTOffset;
641 + unsigned char sr6a, sr6b, sr6c, sr6f, sr7b;
643 + if (!dev_priv->pagetable_map.pagetable_size)
646 + entries = dev_priv->pagetable_map.pagetable_size / sizeof(unsigned int);
649 + ioremap_nocache(dev_priv->fb_base_address +
650 + dev_priv->pagetable_map.pagetable_offset,
651 + dev_priv->pagetable_map.pagetable_size);
653 + dev_priv->pagetable_map.pagetable_handle = pGARTTable;
657 + /*set gart table base */
658 + GARTOffset = dev_priv->pagetable_map.pagetable_offset;
660 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
661 + sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
663 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
665 + sr6a = (unsigned char) ((GARTOffset & 0xff000) >> 12);
666 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6a);
667 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6a);
669 + sr6b = (unsigned char) ((GARTOffset & 0xff00000) >> 20);
670 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6b);
671 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6b);
673 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
674 + sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
675 + sr6c |= ((unsigned char) ((GARTOffset >> 28) & 0x01));
676 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
678 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x7b);
679 + sr7b = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
681 + sr7b |= ProtectSizeValue(dev_priv->pagetable_map.pagetable_size);
682 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr7b);
684 + for (i = 0; i < entries; i++)
685 + writel(0x80000000, pGARTTable + i);
687 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6f);
689 + sr6f = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
691 + while (sr6f & 0x80)
695 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6f);
697 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
698 + sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
700 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
706 +static unsigned int *
707 +AllocAndBindPCIEMemory(struct drm_via_chrome9_private *dev_priv,
708 + unsigned int size, unsigned int offset)
710 + unsigned int *addrlinear;
711 + unsigned int *pGARTTable;
712 + unsigned int entries, alignedoffset, i;
713 + unsigned char sr6c, sr6f;
718 + entries = (size + PAGE_SIZE - 1) / PAGE_SIZE;
719 + alignedoffset = (offset + PAGE_SIZE - 1) / PAGE_SIZE;
721 + if ((entries + alignedoffset) >
722 + (dev_priv->pagetable_map.pagetable_size / sizeof(unsigned int)))
726 + __vmalloc(entries * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM,
727 + PAGE_KERNEL_NOCACHE);
732 + pGARTTable = dev_priv->pagetable_map.pagetable_handle;
734 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
735 + sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
737 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
739 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6f);
741 + sr6f = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
743 + while (sr6f & 0x80)
746 + for (i = 0; i < entries; i++)
748 + (vmalloc_to_page((void *) addrlinear + PAGE_SIZE * i)) &
749 + 0x3fffffff, pGARTTable + i + alignedoffset);
752 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6f);
754 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
755 + sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
757 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
764 +SetAGPDoubleCmd_inv(struct drm_device *dev)
766 + /* we now don't use double buffer */
771 +SetAGPRingCmdRegs_inv(struct drm_device *dev)
773 + struct drm_via_chrome9_private *dev_priv =
774 + (struct drm_via_chrome9_private *) dev->dev_private;
775 + struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
776 + (struct drm_via_chrome9_DMA_manager *) dev_priv->dma_manager;
777 + unsigned int AGPBufLinearBase = 0, AGPBufPhysicalBase = 0;
778 + unsigned long *pFree;
779 + unsigned int dwStart, dwEnd, dwPause, AGPCurrAddr, AGPCurStat, CurrAGP;
780 + unsigned int dwReg60, dwReg61, dwReg62, dwReg63,
781 + dwReg64, dwReg65, dwJump;
783 + lpcmDMAManager->pFree = lpcmDMAManager->pBeg;
785 + AGPBufLinearBase = (unsigned int) lpcmDMAManager->addr_linear;
786 + AGPBufPhysicalBase =
787 + (dev_priv->chip_agp ==
788 + CHIP_PCIE) ? 0 : (unsigned int) dev->agp->base +
789 + lpcmDMAManager->pPhysical;
790 + /*add shadow offset */
793 + GetMMIORegister(dev_priv->mmio->handle, INV_RB_AGPCMD_CURRADDR);
795 + GetMMIORegister(dev_priv->mmio->handle, INV_RB_AGPCMD_STATUS);
797 + if (AGPCurStat & INV_AGPCMD_InPause) {
799 + GetMMIORegister(dev_priv->mmio->handle,
800 + INV_RB_AGPCMD_CURRADDR);
801 + pFree = (unsigned long *) (AGPBufLinearBase + AGPCurrAddr -
802 + AGPBufPhysicalBase);
803 + ADDCmdHeader2_INVI(pFree, INV_REG_CR_TRANS, INV_ParaType_Dummy);
804 + if (dev_priv->chip_sub_index == CHIP_H6S2)
806 + ADDCmdData_INVI(pFree, 0xCCCCCCC0);
807 + ADDCmdData_INVI(pFree, 0xDDD00000);
809 + while ((u32)((unsigned int) pFree) & 0x7f)
811 + /*for 8*128bit aligned */
814 + ADDCmdData_INVI(pFree, 0xCCCCCCC0);
815 + ADDCmdData_INVI(pFree, 0xDDD00000);
817 + while ((u32) ((unsigned int) pFree) & 0x1f)
819 + /*for 256bit aligned */
821 + (u32) (((unsigned int) pFree) - AGPBufLinearBase +
822 + AGPBufPhysicalBase - 16);
824 + dwReg64 = INV_SubA_HAGPBpL | INV_HWBasL(dwPause);
826 + INV_SubA_HAGPBpID | INV_HWBasH(dwPause) |
829 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
830 + INV_ParaType_PreCR);
831 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
833 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
836 + while (GetMMIORegister(dev_priv->mmio->handle,
837 + INV_RB_ENG_STATUS) & INV_ENG_BUSY_ALL)
841 + (u32) ((unsigned int) lpcmDMAManager->pBeg - AGPBufLinearBase +
842 + AGPBufPhysicalBase);
843 + dwEnd = (u32) ((unsigned int) lpcmDMAManager->pEnd - AGPBufLinearBase +
844 + AGPBufPhysicalBase);
846 + lpcmDMAManager->pFree = lpcmDMAManager->pBeg;
847 + if (dev_priv->chip_sub_index == CHIP_H6S2) {
848 + ADDCmdHeader2_INVI(lpcmDMAManager->pFree, INV_REG_CR_TRANS,
849 + INV_ParaType_Dummy);
851 + ADDCmdData_INVI(lpcmDMAManager->pFree, 0xCCCCCCC0);
852 + ADDCmdData_INVI(lpcmDMAManager->pFree, 0xDDD00000);
854 + while ((u32)((unsigned long *) lpcmDMAManager->pFree) & 0x7f)
857 + dwJump = 0xFFFFFFF0;
859 + (u32)(((unsigned int) lpcmDMAManager->pFree) -
860 + 16 - AGPBufLinearBase + AGPBufPhysicalBase);
862 + DRM_DEBUG("dwStart = %08x, dwEnd = %08x, dwPause = %08x\n", dwStart,
865 + dwReg60 = INV_SubA_HAGPBstL | INV_HWBasL(dwStart);
866 + dwReg61 = INV_SubA_HAGPBstH | INV_HWBasH(dwStart);
867 + dwReg62 = INV_SubA_HAGPBendL | INV_HWBasL(dwEnd);
868 + dwReg63 = INV_SubA_HAGPBendH | INV_HWBasH(dwEnd);
869 + dwReg64 = INV_SubA_HAGPBpL | INV_HWBasL(dwPause);
870 + dwReg65 = INV_SubA_HAGPBpID | INV_HWBasH(dwPause) | INV_HAGPBpID_PAUSE;
872 + if (dev_priv->chip_sub_index == CHIP_H6S2)
875 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
876 + INV_ParaType_PreCR);
877 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg60);
878 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg61);
879 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg62);
880 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg63);
881 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg64);
882 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg65);
883 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
884 + INV_SubA_HAGPBjumpL | INV_HWBasL(dwJump));
885 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
886 + INV_SubA_HAGPBjumpH | INV_HWBasH(dwJump));
888 + /* Trigger AGP cycle */
889 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
890 + INV_SubA_HFthRCM | INV_HFthRCM_10 | INV_HAGPBTrig);
894 + GetMMIORegister(dev_priv->mmio->handle, INV_RB_AGPCMD_CURRADDR);
896 + lpcmDMAManager->pInUseBySW = lpcmDMAManager->pFree;
899 +/* Do hw intialization and determine whether to use dma or mmio to
902 +via_chrome9_hw_init(struct drm_device *dev,
903 + struct drm_via_chrome9_init *init)
905 + struct drm_via_chrome9_private *dev_priv =
906 + (struct drm_via_chrome9_private *) dev->dev_private;
907 + unsigned retval = 0;
908 + unsigned int *pGARTTable, *addrlinear = NULL;
910 + struct drm_clb_event_tag_info *event_tag_info;
911 + struct drm_via_chrome9_DMA_manager *lpcmDMAManager = NULL;
913 + if (init->chip_agp == CHIP_PCIE) {
914 + dev_priv->pagetable_map.pagetable_offset =
915 + init->garttable_offset;
916 + dev_priv->pagetable_map.pagetable_size = init->garttable_size;
917 + dev_priv->agp_size = init->agp_tex_size;
918 + /*Henry :prepare for PCIE texture buffer */
920 + dev_priv->pagetable_map.pagetable_offset = 0;
921 + dev_priv->pagetable_map.pagetable_size = 0;
924 + dev_priv->dma_manager =
925 + kmalloc(sizeof(struct drm_via_chrome9_DMA_manager), GFP_KERNEL);
926 + if (!dev_priv->dma_manager) {
927 + DRM_ERROR("could not allocate system for dma_manager!\n");
932 + (struct drm_via_chrome9_DMA_manager *) dev_priv->dma_manager;
933 + ((struct drm_via_chrome9_DMA_manager *)
934 + dev_priv->dma_manager)->DMASize = init->DMA_size;
935 + ((struct drm_via_chrome9_DMA_manager *)
936 + dev_priv->dma_manager)->pPhysical = init->DMA_phys_address;
938 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS, 0x00110000);
939 + if (dev_priv->chip_sub_index == CHIP_H6S2) {
940 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
942 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
945 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
947 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
951 + /* Specify fence command read back ID */
952 + /* Default the read back ID is CR */
953 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
954 + INV_ParaType_PreCR);
955 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
956 + INV_SubA_HSetRBGID | INV_HSetRBGID_CR);
958 + DRM_DEBUG("begin to init\n");
960 + if (dev_priv->chip_sub_index == CHIP_H6S2) {
961 + dev_priv->pcie_vmalloc_nocache = 0;
962 + if (dev_priv->pagetable_map.pagetable_size)
963 + retval = InitPCIEGART(dev_priv);
965 + if (retval && dev_priv->drm_agp_type != DRM_AGP_DISABLED) {
967 + AllocAndBindPCIEMemory(dev_priv,
968 + lpcmDMAManager->DMASize +
969 + dev_priv->agp_size, 0);
971 + dev_priv->pcie_vmalloc_nocache = (unsigned long)
974 + dev_priv->bci_buffer =
975 + vmalloc(MAX_BCI_BUFFER_SIZE);
976 + dev_priv->drm_agp_type = DRM_AGP_DISABLED;
979 + dev_priv->bci_buffer = vmalloc(MAX_BCI_BUFFER_SIZE);
980 + dev_priv->drm_agp_type = DRM_AGP_DISABLED;
983 + if (dev_priv->drm_agp_type != DRM_AGP_DISABLED) {
985 + addrlinear = (unsigned int *)
986 + ioremap(dev->agp->base +
987 + lpcmDMAManager->pPhysical,
988 + lpcmDMAManager->DMASize);
989 + dev_priv->bci_buffer = NULL;
991 + dev_priv->bci_buffer = vmalloc(MAX_BCI_BUFFER_SIZE);
992 + /*Homer, BCI path always use this block of memory8 */
996 + /*till here we have known whether support dma or not */
997 + pages = dev->sg->pages;
998 + event_tag_info = vmalloc(sizeof(struct drm_clb_event_tag_info));
999 + memset(event_tag_info, 0, sizeof(struct drm_clb_event_tag_info));
1000 + if (!event_tag_info)
1001 + return DRM_ERROR(" event_tag_info allocate error!");
1003 + /* aligned to 16k alignment */
1004 + event_tag_info->linear_address =
1006 + *) (((unsigned int) dev_priv->shadow_map.shadow_handle +
1007 + 0x3fff) & 0xffffc000);
1008 + event_tag_info->event_tag_linear_address =
1009 + event_tag_info->linear_address + 3;
1010 + dev_priv->event_tag_info = (void *) event_tag_info;
1011 + dev_priv->max_apertures = NUMBER_OF_APERTURES_CLB;
1013 + /* Initialize DMA data structure */
1014 + lpcmDMAManager->DMASize /= sizeof(unsigned int);
1015 + lpcmDMAManager->pBeg = addrlinear;
1016 + lpcmDMAManager->pFree = lpcmDMAManager->pBeg;
1017 + lpcmDMAManager->pInUseBySW = lpcmDMAManager->pBeg;
1018 + lpcmDMAManager->pInUseByHW = lpcmDMAManager->pBeg;
1019 + lpcmDMAManager->LastIssuedEventTag = (unsigned int) (unsigned long *)
1020 + lpcmDMAManager->pBeg;
1021 + lpcmDMAManager->ppInUseByHW =
1022 + (unsigned int **) ((char *) (dev_priv->mmio->handle) +
1023 + INV_RB_AGPCMD_CURRADDR);
1024 + lpcmDMAManager->bDMAAgp = dev_priv->chip_agp;
1025 + lpcmDMAManager->addr_linear = (unsigned int *) addrlinear;
1027 + if (dev_priv->drm_agp_type == DRM_AGP_DOUBLE_BUFFER) {
1028 + lpcmDMAManager->MaxKickoffSize = lpcmDMAManager->DMASize >> 1;
1029 + lpcmDMAManager->pEnd =
1030 + lpcmDMAManager->addr_linear +
1031 + (lpcmDMAManager->DMASize >> 1) - 1;
1032 + SetAGPDoubleCmd_inv(dev);
1033 + if (dev_priv->chip_sub_index == CHIP_H6S2) {
1034 + DRM_INFO("DMA buffer initialized finished. ");
1035 + DRM_INFO("Use PCIE Double Buffer type!\n");
1036 + DRM_INFO("Total PCIE DMA buffer size = %8d bytes. \n",
1037 + lpcmDMAManager->DMASize << 2);
1039 + DRM_INFO("DMA buffer initialized finished. ");
1040 + DRM_INFO("Use AGP Double Buffer type!\n");
1041 + DRM_INFO("Total AGP DMA buffer size = %8d bytes. \n",
1042 + lpcmDMAManager->DMASize << 2);
1044 + } else if (dev_priv->drm_agp_type == DRM_AGP_RING_BUFFER) {
1045 + lpcmDMAManager->MaxKickoffSize = lpcmDMAManager->DMASize;
1046 + lpcmDMAManager->pEnd =
1047 + lpcmDMAManager->addr_linear + lpcmDMAManager->DMASize;
1048 + SetAGPRingCmdRegs_inv(dev);
1049 + if (dev_priv->chip_sub_index == CHIP_H6S2) {
1050 + DRM_INFO("DMA buffer initialized finished. \n");
1051 + DRM_INFO("Use PCIE Ring Buffer type!");
1052 + DRM_INFO("Total PCIE DMA buffer size = %8d bytes. \n",
1053 + lpcmDMAManager->DMASize << 2);
1055 + DRM_INFO("DMA buffer initialized finished. ");
1056 + DRM_INFO("Use AGP Ring Buffer type!\n");
1057 + DRM_INFO("Total AGP DMA buffer size = %8d bytes. \n",
1058 + lpcmDMAManager->DMASize << 2);
1060 + } else if (dev_priv->drm_agp_type == DRM_AGP_DISABLED) {
1061 + lpcmDMAManager->MaxKickoffSize = 0x0;
1062 + if (dev_priv->chip_sub_index == CHIP_H6S2)
1063 + DRM_INFO("PCIE init failed! Use PCI\n");
1065 + DRM_INFO("AGP init failed! Use PCI\n");
1071 +kickoff_bci_inv(struct drm_device *dev,
1072 + struct drm_via_chrome9_flush *dma_info)
1074 + u32 HdType, dwQWCount, i, dwCount, Addr1, Addr2, SWPointer,
1076 + unsigned long *pCmdData;
1079 + struct drm_via_chrome9_private *dev_priv =
1080 + (struct drm_via_chrome9_private *) dev->dev_private;
1081 + /*pCmdData = __s3gke_vmalloc(dma_info->cmd_size<<2); */
1082 + pCmdData = dev_priv->bci_buffer;
1086 + result = copy_from_user((int *) pCmdData, dma_info->usermode_dma_buf,
1087 + dma_info->cmd_size << 2);
1089 + DRM_ERROR("In function kickoff_bci_inv,\
1090 + copy_from_user is fault. \n");
1093 +#if VIA_CHROME9_VERIFY_ENABLE
1094 + result = via_chrome9_verify_command_stream(
1095 + (const uint32_t *)pCmdData, dma_info->cmd_size << 2,
1096 + dev, dev_priv->chip_sub_index == CHIP_H6S2 ? 0 : 1);
1098 + DRM_ERROR("The command has the security issue \n");
1103 + SWPointerEnd = (u32) dma_info->cmd_size;
1104 + while (SWPointer < SWPointerEnd) {
1105 + HdType = pCmdData[SWPointer] & INV_AGPHeader_MASK;
1107 + case INV_AGPHeader0:
1108 + case INV_AGPHeader5:
1109 + dwQWCount = pCmdData[SWPointer + 1];
1112 + for (i = 0; i < dwQWCount; i++) {
1113 + SetMMIORegister(dev_priv->mmio->handle,
1114 + pCmdData[SWPointer],
1115 + pCmdData[SWPointer + 1]);
1120 + case INV_AGPHeader1:
1121 + dwCount = pCmdData[SWPointer + 1];
1123 + SWPointer += 4; /* skip 128-bit. */
1125 + for (; dwCount > 0; dwCount--, SWPointer++,
1127 + SetMMIORegister(dev_priv->hostBlt->handle,
1128 + Addr1, pCmdData[SWPointer]);
1132 + case INV_AGPHeader4:
1133 + dwCount = pCmdData[SWPointer + 1];
1134 + Addr1 = pCmdData[SWPointer] & 0x0000FFFF;
1135 + SWPointer += 4; /* skip 128-bit. */
1137 + for (; dwCount > 0; dwCount--, SWPointer++)
1138 + SetMMIORegister(dev_priv->mmio->handle, Addr1,
1139 + pCmdData[SWPointer]);
1142 + case INV_AGPHeader2:
1143 + Addr1 = pCmdData[SWPointer + 1] & 0xFFFF;
1144 + Addr2 = pCmdData[SWPointer] & 0xFFFF;
1146 + /* Write first data (either ParaType or whatever) to
1148 + SetMMIORegister(dev_priv->mmio->handle, Addr1,
1149 + pCmdData[SWPointer + 2]);
1152 + /* The following data are all written to Addr2,
1153 + until another header is met */
1154 + while (!is_agp_header(pCmdData[SWPointer])
1155 + && (SWPointer < SWPointerEnd)) {
1156 + SetMMIORegister(dev_priv->mmio->handle, Addr2,
1157 + pCmdData[SWPointer]);
1162 + case INV_AGPHeader3:
1163 + Addr1 = pCmdData[SWPointer] & 0xFFFF;
1164 + Addr2 = Addr1 + 4;
1165 + dwCount = pCmdData[SWPointer + 1];
1167 + /* Write first data (either ParaType or whatever) to
1169 + SetMMIORegister(dev_priv->mmio->handle, Addr1,
1170 + pCmdData[SWPointer + 2]);
1173 + for (i = 0; i < dwCount; i++) {
1174 + SetMMIORegister(dev_priv->mmio->handle, Addr2,
1175 + pCmdData[SWPointer]);
1180 + case INV_AGPHeader6:
1183 + case INV_AGPHeader7:
1187 + SWPointer += 4; /* Advance to next header */
1190 + SWPointer = (SWPointer + 3) & ~3;
1195 +kickoff_dma_db_inv(struct drm_device *dev)
1197 + struct drm_via_chrome9_private *dev_priv =
1198 + (struct drm_via_chrome9_private *) dev->dev_private;
1199 + struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
1200 + dev_priv->dma_manager;
1202 + u32 BufferSize = (u32) (lpcmDMAManager->pFree - lpcmDMAManager->pBeg);
1204 + unsigned int AGPBufLinearBase =
1205 + (unsigned int) lpcmDMAManager->addr_linear;
1206 + unsigned int AGPBufPhysicalBase =
1207 + (unsigned int) dev->agp->base + lpcmDMAManager->pPhysical;
1208 + /*add shadow offset */
1210 + unsigned int dwStart, dwEnd, dwPause;
1211 + unsigned int dwReg60, dwReg61, dwReg62, dwReg63, dwReg64, dwReg65;
1212 + unsigned int CR_Status;
1214 + if (BufferSize == 0)
1217 + /* 256-bit alignment of AGP pause address */
1218 + if ((u32) ((unsigned long *) lpcmDMAManager->pFree) & 0x1f) {
1219 + ADDCmdHeader2_INVI(lpcmDMAManager->pFree, INV_REG_CR_TRANS,
1220 + INV_ParaType_Dummy);
1222 + ADDCmdData_INVI(lpcmDMAManager->pFree, 0xCCCCCCC0);
1223 + ADDCmdData_INVI(lpcmDMAManager->pFree, 0xDDD00000);
1225 + while (((unsigned int) lpcmDMAManager->pFree) & 0x1f)
1230 + (u32) (unsigned long *)lpcmDMAManager->pBeg -
1231 + AGPBufLinearBase + AGPBufPhysicalBase;
1232 + dwEnd = (u32) (unsigned long *)lpcmDMAManager->pEnd -
1233 + AGPBufLinearBase + AGPBufPhysicalBase;
1235 + (u32)(unsigned long *)lpcmDMAManager->pFree -
1236 + AGPBufLinearBase + AGPBufPhysicalBase - 4;
1238 + dwReg60 = INV_SubA_HAGPBstL | INV_HWBasL(dwStart);
1239 + dwReg61 = INV_SubA_HAGPBstH | INV_HWBasH(dwStart);
1240 + dwReg62 = INV_SubA_HAGPBendL | INV_HWBasL(dwEnd);
1241 + dwReg63 = INV_SubA_HAGPBendH | INV_HWBasH(dwEnd);
1242 + dwReg64 = INV_SubA_HAGPBpL | INV_HWBasL(dwPause);
1243 + dwReg65 = INV_SubA_HAGPBpID | INV_HWBasH(dwPause) | INV_HAGPBpID_STOP;
1245 + /* wait CR idle */
1246 + CR_Status = GetMMIORegister(dev_priv->mmio->handle, INV_RB_ENG_STATUS);
1247 + while (CR_Status & INV_ENG_BUSY_CR)
1249 + GetMMIORegister(dev_priv->mmio->handle,
1250 + INV_RB_ENG_STATUS);
1252 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
1253 + INV_ParaType_PreCR);
1254 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg60);
1255 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg61);
1256 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg62);
1257 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg63);
1258 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg64);
1259 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg65);
1261 + /* Trigger AGP cycle */
1262 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
1263 + INV_SubA_HFthRCM | INV_HFthRCM_10 | INV_HAGPBTrig);
1265 + if (lpcmDMAManager->pBeg == lpcmDMAManager->addr_linear) {
1266 + /* The second AGP command buffer */
1267 + lpcmDMAManager->pBeg =
1268 + lpcmDMAManager->addr_linear +
1269 + (lpcmDMAManager->DMASize >> 2);
1270 + lpcmDMAManager->pEnd =
1271 + lpcmDMAManager->addr_linear + lpcmDMAManager->DMASize;
1272 + lpcmDMAManager->pFree = lpcmDMAManager->pBeg;
1274 + /* The first AGP command buffer */
1275 + lpcmDMAManager->pBeg = lpcmDMAManager->addr_linear;
1276 + lpcmDMAManager->pEnd =
1277 + lpcmDMAManager->addr_linear +
1278 + (lpcmDMAManager->DMASize / 2) - 1;
1279 + lpcmDMAManager->pFree = lpcmDMAManager->pBeg;
1281 + CR_Status = GetMMIORegister(dev_priv->mmio->handle, INV_RB_ENG_STATUS);
1286 +kickoff_dma_ring_inv(struct drm_device *dev)
1288 + unsigned int dwPause, dwReg64, dwReg65;
1290 + struct drm_via_chrome9_private *dev_priv =
1291 + (struct drm_via_chrome9_private *) dev->dev_private;
1292 + struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
1293 + dev_priv->dma_manager;
1295 + unsigned int AGPBufLinearBase =
1296 + (unsigned int) lpcmDMAManager->addr_linear;
1297 + unsigned int AGPBufPhysicalBase =
1298 + (dev_priv->chip_agp ==
1299 + CHIP_PCIE) ? 0 : (unsigned int) dev->agp->base +
1300 + lpcmDMAManager->pPhysical;
1301 + /*add shadow offset */
1303 + /* 256-bit alignment of AGP pause address */
1304 + if (dev_priv->chip_sub_index == CHIP_H6S2) {
1306 + ((unsigned long *) lpcmDMAManager->pFree) & 0x7f) {
1307 + ADDCmdHeader2_INVI(lpcmDMAManager->pFree,
1309 + INV_ParaType_Dummy);
1311 + ADDCmdData_INVI(lpcmDMAManager->pFree,
1313 + ADDCmdData_INVI(lpcmDMAManager->pFree,
1316 + while ((u32)((unsigned long *) lpcmDMAManager->pFree) &
1322 + ((unsigned long *) lpcmDMAManager->pFree) & 0x1f) {
1323 + ADDCmdHeader2_INVI(lpcmDMAManager->pFree,
1325 + INV_ParaType_Dummy);
1327 + ADDCmdData_INVI(lpcmDMAManager->pFree,
1329 + ADDCmdData_INVI(lpcmDMAManager->pFree,
1332 + while ((u32)((unsigned long *) lpcmDMAManager->pFree) &
1339 + dwPause = (u32) ((unsigned long *) lpcmDMAManager->pFree)
1340 + - AGPBufLinearBase + AGPBufPhysicalBase - 16;
1342 + dwReg64 = INV_SubA_HAGPBpL | INV_HWBasL(dwPause);
1343 + dwReg65 = INV_SubA_HAGPBpID | INV_HWBasH(dwPause) | INV_HAGPBpID_PAUSE;
1345 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
1346 + INV_ParaType_PreCR);
1347 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg64);
1348 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg65);
1350 + lpcmDMAManager->pInUseBySW = lpcmDMAManager->pFree;
1354 +waitchipidle_inv(struct drm_via_chrome9_private *dev_priv)
1356 + unsigned int count = 50000;
1357 + unsigned int eng_status;
1358 + unsigned int engine_busy;
1362 + GetMMIORegister(dev_priv->mmio->handle,
1363 + INV_RB_ENG_STATUS);
1364 + engine_busy = eng_status & INV_ENG_BUSY_ALL;
1367 + while (engine_busy && count)
1369 + if (count && engine_busy == 0)
1375 +get_space_db_inv(struct drm_device *dev,
1376 + struct cmd_get_space *lpcmGetSpaceData)
1378 + struct drm_via_chrome9_private *dev_priv =
1379 + (struct drm_via_chrome9_private *) dev->dev_private;
1380 + struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
1381 + dev_priv->dma_manager;
1383 + unsigned int dwRequestSize = lpcmGetSpaceData->dwRequestSize;
1384 + if (dwRequestSize > lpcmDMAManager->MaxKickoffSize) {
1385 + DRM_INFO("too big DMA buffer request!!!\n");
1386 + via_chrome9ke_assert(0);
1387 + *lpcmGetSpaceData->pCmdData = (unsigned int) NULL;
1391 + if ((lpcmDMAManager->pFree + dwRequestSize) >
1392 + (lpcmDMAManager->pEnd - INV_CMDBUF_THRESHOLD * 2))
1393 + kickoff_dma_db_inv(dev);
1395 + *lpcmGetSpaceData->pCmdData = (unsigned int) lpcmDMAManager->pFree;
1399 +RewindRingAGP_inv(struct drm_device *dev)
1401 + struct drm_via_chrome9_private *dev_priv =
1402 + (struct drm_via_chrome9_private *) dev->dev_private;
1403 + struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
1404 + dev_priv->dma_manager;
1406 + unsigned int AGPBufLinearBase =
1407 + (unsigned int) lpcmDMAManager->addr_linear;
1408 + unsigned int AGPBufPhysicalBase =
1409 + (dev_priv->chip_agp ==
1410 + CHIP_PCIE) ? 0 : (unsigned int) dev->agp->base +
1411 + lpcmDMAManager->pPhysical;
1412 + /*add shadow offset */
1414 + unsigned int dwPause, dwJump;
1415 + unsigned int dwReg66, dwReg67;
1416 + unsigned int dwReg64, dwReg65;
1418 + ADDCmdHeader2_INVI(lpcmDMAManager->pFree, INV_REG_CR_TRANS,
1419 + INV_ParaType_Dummy);
1420 + ADDCmdData_INVI(lpcmDMAManager->pFree, 0xCCCCCCC7);
1421 + if (dev_priv->chip_sub_index == CHIP_H6S2)
1422 + while ((unsigned int) lpcmDMAManager->pFree & 0x7F)
1423 + ADDCmdData_INVI(lpcmDMAManager->pFree, 0xCCCCCCC7);
1425 + while ((unsigned int) lpcmDMAManager->pFree & 0x1F)
1426 + ADDCmdData_INVI(lpcmDMAManager->pFree, 0xCCCCCCC7);
1427 + dwJump = ((u32) ((unsigned long *) lpcmDMAManager->pFree))
1428 + - AGPBufLinearBase + AGPBufPhysicalBase - 16;
1430 + lpcmDMAManager->pFree = lpcmDMAManager->pBeg;
1432 + dwPause = ((u32) ((unsigned long *) lpcmDMAManager->pFree))
1433 + - AGPBufLinearBase + AGPBufPhysicalBase - 16;
1435 + dwReg64 = INV_SubA_HAGPBpL | INV_HWBasL(dwPause);
1436 + dwReg65 = INV_SubA_HAGPBpID | INV_HWBasH(dwPause) | INV_HAGPBpID_PAUSE;
1438 + dwReg66 = INV_SubA_HAGPBjumpL | INV_HWBasL(dwJump);
1439 + dwReg67 = INV_SubA_HAGPBjumpH | INV_HWBasH(dwJump);
1441 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
1442 + INV_ParaType_PreCR);
1443 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg66);
1444 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg67);
1446 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg64);
1447 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg65);
1448 + lpcmDMAManager->pInUseBySW = lpcmDMAManager->pFree;
1453 +get_space_ring_inv(struct drm_device *dev,
1454 + struct cmd_get_space *lpcmGetSpaceData)
1456 + struct drm_via_chrome9_private *dev_priv =
1457 + (struct drm_via_chrome9_private *) dev->dev_private;
1458 + struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
1459 + dev_priv->dma_manager;
1460 + unsigned int dwUnFlushed;
1461 + unsigned int dwRequestSize = lpcmGetSpaceData->dwRequestSize;
1463 + unsigned int AGPBufLinearBase =
1464 + (unsigned int) lpcmDMAManager->addr_linear;
1465 + unsigned int AGPBufPhysicalBase =
1466 + (dev_priv->chip_agp ==
1467 + CHIP_PCIE) ? 0 : (unsigned int) dev->agp->base +
1468 + lpcmDMAManager->pPhysical;
1469 + /*add shadow offset */
1470 + u32 BufStart, BufEnd, CurSW, CurHW, NextSW, BoundaryCheck;
1473 + (unsigned int) (lpcmDMAManager->pFree - lpcmDMAManager->pBeg);
1474 + /*default bEnableModuleSwitch is on for metro,is off for rest */
1475 + /*cmHW_Module_Switch is context-wide variable which is enough for 2d/3d
1476 + switch in a context. */
1477 + /*But we must keep the dma buffer being wrapped head and tail by 3d cmds
1478 + when it is kicked off to kernel mode. */
1479 + /*Get DMA Space (If requested, or no BCI space and BCI not forced. */
1481 + if (dwRequestSize > lpcmDMAManager->MaxKickoffSize) {
1482 + DRM_INFO("too big DMA buffer request!!!\n");
1483 + via_chrome9ke_assert(0);
1484 + *lpcmGetSpaceData->pCmdData = 0;
1488 + if (dwUnFlushed + dwRequestSize > lpcmDMAManager->MaxKickoffSize)
1489 + kickoff_dma_ring_inv(dev);
1492 + (u32)((unsigned int) lpcmDMAManager->pBeg) - AGPBufLinearBase +
1493 + AGPBufPhysicalBase;
1494 + BufEnd = (u32)((unsigned int) lpcmDMAManager->pEnd) - AGPBufLinearBase +
1495 + AGPBufPhysicalBase;
1496 + dwRequestSize = lpcmGetSpaceData->dwRequestSize << 2;
1497 + NextSW = (u32) ((unsigned int) lpcmDMAManager->pFree) + dwRequestSize +
1498 + INV_CMDBUF_THRESHOLD * 8 - AGPBufLinearBase +
1499 + AGPBufPhysicalBase;
1501 + CurSW = (u32)((unsigned int) lpcmDMAManager->pFree) - AGPBufLinearBase +
1502 + AGPBufPhysicalBase;
1503 + CurHW = GetMMIORegister(dev_priv->mmio->handle, INV_RB_AGPCMD_CURRADDR);
1505 + if (NextSW >= BufEnd) {
1506 + kickoff_dma_ring_inv(dev);
1507 + CurSW = (u32) ((unsigned int) lpcmDMAManager->pFree) -
1508 + AGPBufLinearBase + AGPBufPhysicalBase;
1509 + /* make sure the last rewind is completed */
1510 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1511 + INV_RB_AGPCMD_CURRADDR);
1512 + while (CurHW > CurSW)
1513 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1514 + INV_RB_AGPCMD_CURRADDR);
1515 + /* Sometime the value read from HW is unreliable,
1516 + so need double confirm. */
1517 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1518 + INV_RB_AGPCMD_CURRADDR);
1519 + while (CurHW > CurSW)
1520 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1521 + INV_RB_AGPCMD_CURRADDR);
1523 + BufStart + dwRequestSize + INV_QW_PAUSE_ALIGN * 16;
1524 + if (BoundaryCheck >= BufEnd)
1525 + /* If an empty command buffer can't hold
1526 + the request data. */
1527 + via_chrome9ke_assert(0);
1529 + /* We need to guarntee the new commands have no chance
1530 + to override the unexected commands or wait until there
1531 + is no unexecuted commands in agp buffer */
1532 + if (CurSW <= BoundaryCheck) {
1533 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1534 + INV_RB_AGPCMD_CURRADDR);
1535 + while (CurHW < CurSW)
1536 + CurHW = GetMMIORegister(
1537 + dev_priv->mmio->handle,
1538 + INV_RB_AGPCMD_CURRADDR);
1539 + /*Sometime the value read from HW is unreliable,
1540 + so need double confirm. */
1541 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1542 + INV_RB_AGPCMD_CURRADDR);
1543 + while (CurHW < CurSW) {
1544 + CurHW = GetMMIORegister(
1545 + dev_priv->mmio->handle,
1546 + INV_RB_AGPCMD_CURRADDR);
1548 + RewindRingAGP_inv(dev);
1549 + CurSW = (u32) ((unsigned long *)
1550 + lpcmDMAManager->pFree) -
1551 + AGPBufLinearBase + AGPBufPhysicalBase;
1552 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1553 + INV_RB_AGPCMD_CURRADDR);
1554 + /* Waiting until hw pointer jump to start
1555 + and hw pointer will */
1556 + /* equal to sw pointer */
1557 + while (CurHW != CurSW) {
1558 + CurHW = GetMMIORegister(
1559 + dev_priv->mmio->handle,
1560 + INV_RB_AGPCMD_CURRADDR);
1563 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1564 + INV_RB_AGPCMD_CURRADDR);
1566 + while (CurHW <= BoundaryCheck) {
1567 + CurHW = GetMMIORegister(
1568 + dev_priv->mmio->handle,
1569 + INV_RB_AGPCMD_CURRADDR);
1571 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1572 + INV_RB_AGPCMD_CURRADDR);
1573 + /* Sometime the value read from HW is
1574 + unreliable, so need double confirm. */
1575 + while (CurHW <= BoundaryCheck) {
1576 + CurHW = GetMMIORegister(
1577 + dev_priv->mmio->handle,
1578 + INV_RB_AGPCMD_CURRADDR);
1580 + RewindRingAGP_inv(dev);
1584 + /* no need to rewind Ensure unexecuted agp commands will
1585 + not be override by new
1587 + CurSW = (u32) ((unsigned int) lpcmDMAManager->pFree) -
1588 + AGPBufLinearBase + AGPBufPhysicalBase;
1589 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1590 + INV_RB_AGPCMD_CURRADDR);
1592 + while ((CurHW > CurSW) && (CurHW <= NextSW))
1593 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1594 + INV_RB_AGPCMD_CURRADDR);
1596 + /* Sometime the value read from HW is unreliable,
1597 + so need double confirm. */
1598 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1599 + INV_RB_AGPCMD_CURRADDR);
1600 + while ((CurHW > CurSW) && (CurHW <= NextSW))
1601 + CurHW = GetMMIORegister(dev_priv->mmio->handle,
1602 + INV_RB_AGPCMD_CURRADDR);
1604 + /*return the space handle */
1605 + *lpcmGetSpaceData->pCmdData = (unsigned int) lpcmDMAManager->pFree;
1609 +release_space_inv(struct drm_device *dev,
1610 + struct cmd_release_space *lpcmReleaseSpaceData)
1612 + struct drm_via_chrome9_private *dev_priv =
1613 + (struct drm_via_chrome9_private *) dev->dev_private;
1614 + struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
1615 + dev_priv->dma_manager;
1616 + unsigned int dwReleaseSize = lpcmReleaseSpaceData->dwReleaseSize;
1619 + lpcmDMAManager->pFree += dwReleaseSize;
1621 + /* aligned address */
1622 + while (((unsigned int) lpcmDMAManager->pFree) & 0xF) {
1623 + /* not in 4 unsigned ints (16 Bytes) align address,
1624 + insert NULL Commands */
1625 + *lpcmDMAManager->pFree++ = NULL_COMMAND_INV[i & 0x3];
1629 + if ((dev_priv->chip_sub_index == CHIP_H5 ||
1630 + dev_priv->chip_sub_index == CHIP_H6S2) &&
1631 + (dev_priv->drm_agp_type == DRM_AGP_RING_BUFFER)) {
1632 + ADDCmdHeader2_INVI(lpcmDMAManager->pFree, INV_REG_CR_TRANS,
1633 + INV_ParaType_Dummy);
1634 + for (i = 0; i < NULLCOMMANDNUMBER; i++)
1635 + ADDCmdData_INVI(lpcmDMAManager->pFree, 0xCC000000);
1640 +via_chrome9_ioctl_flush(struct drm_device *dev, void *data,
1641 + struct drm_file *file_priv)
1643 + struct drm_via_chrome9_flush *dma_info = data;
1644 + struct drm_via_chrome9_private *dev_priv =
1645 + (struct drm_via_chrome9_private *) dev->dev_private;
1648 + struct cmd_get_space getspace;
1649 + struct cmd_release_space releasespace;
1650 + unsigned long *pCmdData = NULL;
1652 + switch (dma_info->dma_cmd_type) {
1653 + /* Copy DMA buffer to BCI command buffer */
1655 + case flush_bci_and_wait:
1656 + if (dma_info->cmd_size <= 0)
1658 + if (dma_info->cmd_size > MAX_BCI_BUFFER_SIZE) {
1659 + DRM_INFO("too big BCI space request!!!\n");
1663 + kickoff_bci_inv(dev, dma_info);
1664 + waitchipidle_inv(dev_priv);
1666 + /* Use DRM DMA buffer manager to kick off DMA directly */
1670 + /* Copy user mode DMA buffer to kernel DMA buffer,
1671 + then kick off DMA */
1672 + case flush_dma_buffer:
1673 + case flush_dma_and_wait:
1674 + if (dma_info->cmd_size <= 0)
1677 + getspace.dwRequestSize = dma_info->cmd_size;
1678 + if ((dev_priv->chip_sub_index == CHIP_H5 ||
1679 + dev_priv->chip_sub_index == CHIP_H6S2) &&
1680 + (dev_priv->drm_agp_type == DRM_AGP_RING_BUFFER))
1681 + getspace.dwRequestSize += (NULLCOMMANDNUMBER + 4);
1682 + /*henry:Patch for VT3293 agp ring buffer stability */
1683 + getspace.pCmdData = (unsigned int *) &pCmdData;
1685 + if (dev_priv->drm_agp_type == DRM_AGP_DOUBLE_BUFFER)
1686 + get_space_db_inv(dev, &getspace);
1687 + else if (dev_priv->drm_agp_type == DRM_AGP_RING_BUFFER)
1688 + get_space_ring_inv(dev, &getspace);
1690 + /*copy data from userspace to kernel-dma-agp buffer */
1691 + result = copy_from_user((int *)
1693 + dma_info->usermode_dma_buf,
1694 + dma_info->cmd_size << 2);
1696 + DRM_ERROR("In function via_chrome9_ioctl_flush,\
1697 + copy_from_user is fault. \n");
1701 +#if VIA_CHROME9_VERIFY_ENABLE
1702 + result = via_chrome9_verify_command_stream(
1703 + (const uint32_t *)pCmdData, dma_info->cmd_size << 2,
1704 + dev, dev_priv->chip_sub_index == CHIP_H6S2 ? 0 : 1);
1706 + DRM_ERROR("The user command has security issue.\n");
1711 + releasespace.dwReleaseSize = dma_info->cmd_size;
1712 + release_space_inv(dev, &releasespace);
1713 + if (dev_priv->drm_agp_type == DRM_AGP_DOUBLE_BUFFER)
1714 + kickoff_dma_db_inv(dev);
1715 + else if (dev_priv->drm_agp_type == DRM_AGP_RING_BUFFER)
1716 + kickoff_dma_ring_inv(dev);
1718 + if (dma_info->dma_cmd_type == flush_dma_and_wait)
1719 + waitchipidle_inv(dev_priv);
1721 + DRM_INFO("No enough DMA space");
1727 + DRM_INFO("Invalid DMA buffer type");
1735 +via_chrome9_ioctl_free(struct drm_device *dev, void *data,
1736 + struct drm_file *file_priv)
1742 +via_chrome9_ioctl_wait_chip_idle(struct drm_device *dev, void *data,
1743 + struct drm_file *file_priv)
1745 + struct drm_via_chrome9_private *dev_priv =
1746 + (struct drm_via_chrome9_private *) dev->dev_private;
1748 + waitchipidle_inv(dev_priv);
1749 + /* maybe_bug here, do we always return 0 */
1754 +via_chrome9_ioctl_flush_cache(struct drm_device *dev, void *data,
1755 + struct drm_file *file_priv)
1760 +++ b/drivers/gpu/drm/via_chrome9/via_chrome9_dma.h
1763 + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
1764 + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
1766 + * Permission is hereby granted, free of charge, to any person
1767 + * obtaining a copy of this software and associated documentation
1768 + * files (the "Software"), to deal in the Software without
1769 + * restriction, including without limitation the rights to use,
1770 + * copy, modify, merge, publish, distribute, sub license,
1771 + * and/or sell copies of the Software, and to permit persons to
1772 + * whom the Software is furnished to do so, subject to the
1773 + * following conditions:
1775 + * The above copyright notice and this permission notice
1776 + * (including the next paragraph) shall be included in all
1777 + * copies or substantial portions of the Software.
1779 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1780 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
1781 + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1782 + * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
1783 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1784 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1785 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
1786 + * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1788 +#ifndef _VIA_CHROME9_DMA_H_
1789 +#define _VIA_CHROME9_DMA_H_
1791 +#define MAX_BCI_BUFFER_SIZE (16 * 1024 * 1024)
1793 +enum cmd_request_type {
1797 + CM_REQUEST_RB_FORCED_DMA,
1798 + CM_REQUEST_NOTAVAILABLE
1801 +struct cmd_get_space {
1802 + unsigned int dwRequestSize;
1803 + enum cmd_request_type hint;
1804 + __volatile__ unsigned int *pCmdData;
1807 +struct cmd_release_space {
1808 + unsigned int dwReleaseSize;
1811 +extern int via_chrome9_hw_init(struct drm_device *dev,
1812 + struct drm_via_chrome9_init *init);
1813 +extern int via_chrome9_ioctl_flush(struct drm_device *dev, void *data,
1814 + struct drm_file *file_priv);
1815 +extern int via_chrome9_ioctl_free(struct drm_device *dev, void *data,
1816 + struct drm_file *file_prev);
1817 +extern int via_chrome9_ioctl_wait_chip_idle(struct drm_device *dev,
1818 + void *data, struct drm_file *file_priv);
1819 +extern int via_chrome9_ioctl_flush_cache(struct drm_device *dev,
1820 + void *data, struct drm_file *file_priv);
1821 +extern int via_chrome9_ioctl_flush(struct drm_device *dev, void *data,
1822 + struct drm_file *file_priv);
1823 +extern int via_chrome9_ioctl_free(struct drm_device *dev, void *data,
1824 + struct drm_file *file_priv);
1825 +extern unsigned int ProtectSizeValue(unsigned int size);
1826 +extern void SetAGPDoubleCmd_inv(struct drm_device *dev);
1827 +extern void SetAGPRingCmdRegs_inv(struct drm_device *dev);
1828 +extern void via_chrome9_dma_init_inv(struct drm_device *dev);
1832 +++ b/drivers/gpu/drm/via_chrome9/via_chrome9_drm.c
1835 + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
1836 + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
1838 + * Permission is hereby granted, free of charge, to any person
1839 + * obtaining a copy of this software and associated documentation
1840 + * files (the "Software"), to deal in the Software without
1841 + * restriction, including without limitation the rights to use,
1842 + * copy, modify, merge, publish, distribute, sub license,
1843 + * and/or sell copies of the Software, and to permit persons to
1844 + * whom the Software is furnished to do so, subject to the
1845 + * following conditions:
1847 + * The above copyright notice and this permission notice
1848 + * (including the next paragraph) shall be included in all
1849 + * copies or substantial portions of the Software.
1851 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1852 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
1853 + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1854 + * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
1855 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1856 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1857 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
1858 + * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1861 +#include "via_chrome9_drm.h"
1862 +#include "via_chrome9_drv.h"
1863 +#include "via_chrome9_mm.h"
1864 +#include "via_chrome9_dma.h"
1865 +#include "via_chrome9_3d_reg.h"
1867 +#define VIA_CHROME9DRM_VIDEO_STARTADDRESS_ALIGNMENT 10
1869 +void *via_chrome9_dev_v4l;
1870 +void *via_chrome9_filepriv_v4l;
1872 +void __via_chrome9ke_udelay(unsigned long usecs)
1874 + unsigned long start;
1875 + unsigned long stop;
1876 + unsigned long period;
1877 + unsigned long wait_period;
1878 + struct timespec tval;
1880 +#ifdef NDELAY_LIMIT
1881 +#define UDELAY_LIMIT (NDELAY_LIMIT/1000) /* supposed to be 10 msec */
1883 +#define UDELAY_LIMIT (10000) /* 10 msec */
1886 + if (usecs > UDELAY_LIMIT) {
1888 + tval.tv_sec = usecs / 1000000;
1889 + tval.tv_nsec = (usecs - tval.tv_sec * 1000000) * 1000;
1890 + wait_period = timespec_to_jiffies(&tval);
1895 + period = ((unsigned long)-1 - start) + stop + 1;
1897 + period = stop - start;
1899 + } while (period < wait_period);
1901 + udelay(usecs); /* delay value might get checked once again */
1904 +int via_chrome9_ioctl_process_exit(struct drm_device *dev, void *data,
1905 + struct drm_file *file_priv)
1910 +int via_chrome9_ioctl_restore_primary(struct drm_device *dev,
1911 + void *data, struct drm_file *file_priv)
1916 +void Initialize3DEngine(struct drm_via_chrome9_private *dev_priv)
1919 + unsigned int StageOfTexture;
1921 + if (dev_priv->chip_sub_index == CHIP_H5 ||
1922 + dev_priv->chip_sub_index == CHIP_H5S1) {
1923 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
1926 + for (i = 0; i <= 0x8A; i++) {
1927 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
1928 + (unsigned int) i << 24);
1931 + /* Initial Texture Stage Setting*/
1932 + for (StageOfTexture = 0; StageOfTexture < 0xf;
1933 + StageOfTexture++) {
1934 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
1935 + (0x00020000 | 0x00000000 |
1936 + (StageOfTexture & 0xf)<<24));
1937 + /* *((unsigned int volatile*)(pMapIOPort+HC_REG_TRANS_SET)) =
1938 + (0x00020000 | HC_ParaSubType_Tex0 | (StageOfTexture &
1940 + for (i = 0 ; i <= 0x30 ; i++) {
1941 + SetMMIORegister(dev_priv->mmio->handle,
1942 + 0x440, (unsigned int) i << 24);
1946 + /* Initial Texture Sampler Setting*/
1947 + for (StageOfTexture = 0; StageOfTexture < 0xf;
1948 + StageOfTexture++) {
1949 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
1950 + (0x00020000 | 0x00020000 |
1951 + (StageOfTexture & 0xf)<<24));
1952 + /* *((unsigned int volatile*)(pMapIOPort+
1953 + HC_REG_TRANS_SET)) = (0x00020000 | 0x00020000 |
1954 + ( StageOfTexture & 0xf)<<24);*/
1955 + for (i = 0 ; i <= 0x30 ; i++) {
1956 + SetMMIORegister(dev_priv->mmio->handle,
1957 + 0x440, (unsigned int) i << 24);
1961 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
1962 + (0x00020000 | 0xfe000000));
1963 + /* *((unsigned int volatile*)(pMapIOPort+HC_REG_TRANS_SET)) =
1964 + (0x00020000 | HC_ParaSubType_TexGen);*/
1965 + for (i = 0 ; i <= 0x13 ; i++) {
1966 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
1967 + (unsigned int) i << 24);
1968 + /* *((unsigned int volatile*)(pMapIOPort+
1969 + HC_REG_Hpara0)) = ((unsigned int) i << 24);*/
1972 + /* Initial Gamma Table Setting*/
1973 + /* Initial Gamma Table Setting*/
1974 + /* 5 + 4 = 9 (12) dwords*/
1975 + /* sRGB texture is not directly support by H3 hardware.
1976 + We have to set the deGamma table for texture sampling.*/
1978 + /* degamma table*/
1979 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
1980 + (0x00030000 | 0x15000000));
1981 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
1982 + (0x40000000 | (30 << 20) | (15 << 10) | (5)));
1983 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
1984 + ((119 << 20) | (81 << 10) | (52)));
1985 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
1986 + ((283 << 20) | (219 << 10) | (165)));
1987 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
1988 + ((535 << 20) | (441 << 10) | (357)));
1989 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
1990 + ((119 << 20) | (884 << 20) | (757 << 10) |
1994 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
1995 + (0x00030000 | 0x17000000));
1996 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
1997 + (0x40000000 | (13 << 20) | (13 << 10) | (13)));
1998 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
1999 + (0x40000000 | (26 << 20) | (26 << 10) | (26)));
2000 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2001 + (0x40000000 | (39 << 20) | (39 << 10) | (39)));
2002 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2003 + ((51 << 20) | (51 << 10) | (51)));
2004 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2005 + ((71 << 20) | (71 << 10) | (71)));
2006 + SetMMIORegister(dev_priv->mmio->handle,
2007 + 0x440, (87 << 20) | (87 << 10) | (87));
2008 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2009 + (113 << 20) | (113 << 10) | (113));
2010 + SetMMIORegister(dev_priv->mmio->handle,
2011 + 0x440, (135 << 20) | (135 << 10) | (135));
2012 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2013 + (170 << 20) | (170 << 10) | (170));
2014 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2015 + (199 << 20) | (199 << 10) | (199));
2016 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2017 + (246 << 20) | (246 << 10) | (246));
2018 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2019 + (284 << 20) | (284 << 10) | (284));
2020 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2021 + (317 << 20) | (317 << 10) | (317));
2022 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2023 + (347 << 20) | (347 << 10) | (347));
2024 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2025 + (373 << 20) | (373 << 10) | (373));
2026 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2027 + (398 << 20) | (398 << 10) | (398));
2028 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2029 + (442 << 20) | (442 << 10) | (442));
2030 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2031 + (481 << 20) | (481 << 10) | (481));
2032 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2033 + (517 << 20) | (517 << 10) | (517));
2034 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2035 + (550 << 20) | (550 << 10) | (550));
2036 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2037 + (609 << 20) | (609 << 10) | (609));
2038 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2039 + (662 << 20) | (662 << 10) | (662));
2040 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2041 + (709 << 20) | (709 << 10) | (709));
2042 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2043 + (753 << 20) | (753 << 10) | (753));
2044 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2045 + (794 << 20) | (794 << 10) | (794));
2046 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2047 + (832 << 20) | (832 << 10) | (832));
2048 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2049 + (868 << 20) | (868 << 10) | (868));
2050 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2051 + (902 << 20) | (902 << 10) | (902));
2052 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2053 + (934 << 20) | (934 << 10) | (934));
2054 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2055 + (966 << 20) | (966 << 10) | (966));
2056 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2057 + (996 << 20) | (996 << 10) | (996));
2061 + For Interrupt Restore only All types of write through
2062 + regsiters should be write header data to hardware at
2063 + least before it can restore. H/W will automatically
2064 + record the header to write through state buffer for
2067 + HParaType = 8'h03, HParaSubType = 8'h00
2073 + HParaSubType 8'h12, 8'h15 is initialized.
2075 + 1. All these write through registers can't be partial
2077 + 2. All these write through must be AGP command
2078 + 16 entries : 4 128-bit data */
2080 + /* Initialize INV_ParaSubType_TexPal */
2081 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2082 + (0x00030000 | 0x00000000));
2083 + for (i = 0; i < 16; i++) {
2084 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2088 + /* Initialize INV_ParaSubType_4X4Cof */
2089 + /* 32 entries : 8 128-bit data */
2090 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2091 + (0x00030000 | 0x11000000));
2092 + for (i = 0; i < 32; i++) {
2093 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2097 + /* Initialize INV_ParaSubType_StipPal */
2098 + /* 5 entries : 2 128-bit data */
2099 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2100 + (0x00030000 | 0x14000000));
2101 + for (i = 0; i < (5+3); i++) {
2102 + SetMMIORegister(dev_priv->mmio->handle,
2103 + 0x440, 0x00000000);
2106 + /* primitive setting & vertex format*/
2107 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2108 + (0x00040000 | 0x14000000));
2109 + for (i = 0; i < 52; i++) {
2110 + SetMMIORegister(dev_priv->mmio->handle,
2111 + 0x440, ((unsigned int) i << 24));
2113 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2115 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2117 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2119 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2121 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2124 + /* setting Misconfig*/
2125 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2127 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2129 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2131 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2133 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2135 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2137 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2139 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2141 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2143 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2145 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2147 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2149 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2151 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2153 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2155 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2157 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2159 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2161 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2163 + } else if (dev_priv->chip_sub_index == CHIP_H6S2) {
2164 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2166 + for (i = 0; i <= 0x9A; i++) {
2167 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2168 + (unsigned int) i << 24);
2171 + /* Initial Texture Stage Setting*/
2172 + for (StageOfTexture = 0; StageOfTexture <= 0xf;
2173 + StageOfTexture++) {
2174 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2175 + (0x00020000 | 0x00000000 |
2176 + (StageOfTexture & 0xf)<<24));
2177 + for (i = 0 ; i <= 0x30 ; i++) {
2178 + SetMMIORegister(dev_priv->mmio->handle,
2179 + 0x440, (unsigned int) i << 24);
2183 + /* Initial Texture Sampler Setting*/
2184 + for (StageOfTexture = 0; StageOfTexture <= 0xf;
2185 + StageOfTexture++) {
2186 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2187 + (0x00020000 | 0x20000000 |
2188 + (StageOfTexture & 0xf)<<24));
2189 + for (i = 0 ; i <= 0x36 ; i++) {
2190 + SetMMIORegister(dev_priv->mmio->handle,
2191 + 0x440, (unsigned int) i << 24);
2195 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2196 + (0x00020000 | 0xfe000000));
2197 + for (i = 0 ; i <= 0x13 ; i++) {
2198 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2199 + (unsigned int) i << 24);
2200 + /* *((unsigned int volatile*)(pMapIOPort+
2201 + HC_REG_Hpara0)) =((unsigned int) i << 24);*/
2204 + /* Initial Gamma Table Setting*/
2205 + /* Initial Gamma Table Setting*/
2206 + /* 5 + 4 = 9 (12) dwords*/
2207 + /* sRGB texture is not directly support by
2209 + /* We have to set the deGamma table for texture
2212 + /* degamma table*/
2213 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2214 + (0x00030000 | 0x15000000));
2215 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2216 + (0x40000000 | (30 << 20) | (15 << 10) | (5)));
2217 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2218 + ((119 << 20) | (81 << 10) | (52)));
2219 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2220 + ((283 << 20) | (219 << 10) | (165)));
2221 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2222 + ((535 << 20) | (441 << 10) | (357)));
2223 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2224 + ((119 << 20) | (884 << 20) | (757 << 10)
2228 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2229 + (0x00030000 | 0x17000000));
2230 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2231 + (0x40000000 | (13 << 20) | (13 << 10) | (13)));
2232 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2233 + (0x40000000 | (26 << 20) | (26 << 10) | (26)));
2234 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2235 + (0x40000000 | (39 << 20) | (39 << 10) | (39)));
2236 + SetMMIORegister(dev_priv->mmio->handle,
2237 + 0x440, ((51 << 20) | (51 << 10) | (51)));
2238 + SetMMIORegister(dev_priv->mmio->handle,
2239 + 0x440, ((71 << 20) | (71 << 10) | (71)));
2240 + SetMMIORegister(dev_priv->mmio->handle,
2241 + 0x440, (87 << 20) | (87 << 10) | (87));
2242 + SetMMIORegister(dev_priv->mmio->handle,
2243 + 0x440, (113 << 20) | (113 << 10) | (113));
2244 + SetMMIORegister(dev_priv->mmio->handle,
2245 + 0x440, (135 << 20) | (135 << 10) | (135));
2246 + SetMMIORegister(dev_priv->mmio->handle,
2247 + 0x440, (170 << 20) | (170 << 10) | (170));
2248 + SetMMIORegister(dev_priv->mmio->handle,
2249 + 0x440, (199 << 20) | (199 << 10) | (199));
2250 + SetMMIORegister(dev_priv->mmio->handle,
2251 + 0x440, (246 << 20) | (246 << 10) | (246));
2252 + SetMMIORegister(dev_priv->mmio->handle,
2253 + 0x440, (284 << 20) | (284 << 10) | (284));
2254 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2255 + (317 << 20) | (317 << 10) | (317));
2256 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2257 + (347 << 20) | (347 << 10) | (347));
2258 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2259 + (373 << 20) | (373 << 10) | (373));
2260 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2261 + (398 << 20) | (398 << 10) | (398));
2262 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2263 + (442 << 20) | (442 << 10) | (442));
2264 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2265 + (481 << 20) | (481 << 10) | (481));
2266 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2267 + (517 << 20) | (517 << 10) | (517));
2268 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2269 + (550 << 20) | (550 << 10) | (550));
2270 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2271 + (609 << 20) | (609 << 10) | (609));
2272 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2273 + (662 << 20) | (662 << 10) | (662));
2274 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2275 + (709 << 20) | (709 << 10) | (709));
2276 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2277 + (753 << 20) | (753 << 10) | (753));
2278 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2279 + (794 << 20) | (794 << 10) | (794));
2280 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2281 + (832 << 20) | (832 << 10) | (832));
2282 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2283 + (868 << 20) | (868 << 10) | (868));
2284 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2285 + (902 << 20) | (902 << 10) | (902));
2286 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2287 + (934 << 20) | (934 << 10) | (934));
2288 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2289 + (966 << 20) | (966 << 10) | (966));
2290 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2291 + (996 << 20) | (996 << 10) | (996));
2294 + /* For Interrupt Restore only
2295 + All types of write through regsiters should be write
2296 + header data to hardware at least before it can restore.
2297 + H/W will automatically record the header to write
2298 + through state buffer for restureusage.
2300 + HParaType = 8'h03, HParaSubType = 8'h00
2306 + HParaSubType 8'h12, 8'h15 is initialized.
2308 + 1. All these write through registers can't be partial
2310 + 2. All these write through must be AGP command
2311 + 16 entries : 4 128-bit data */
2313 + /* Initialize INV_ParaSubType_TexPal */
2314 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2315 + (0x00030000 | 0x00000000));
2316 + for (i = 0; i < 16; i++) {
2317 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2321 + /* Initialize INV_ParaSubType_4X4Cof */
2322 + /* 32 entries : 8 128-bit data */
2323 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2324 + (0x00030000 | 0x11000000));
2325 + for (i = 0; i < 32; i++) {
2326 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2330 + /* Initialize INV_ParaSubType_StipPal */
2331 + /* 5 entries : 2 128-bit data */
2332 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2333 + (0x00030000 | 0x14000000));
2334 + for (i = 0; i < (5+3); i++) {
2335 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2339 + /* primitive setting & vertex format*/
2340 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2342 + for (i = 0; i <= 0x62; i++) {
2343 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2344 + ((unsigned int) i << 24));
2347 + /*ParaType 0xFE - Configure and Misc Setting*/
2348 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2350 + for (i = 0; i <= 0x47; i++) {
2351 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2352 + ((unsigned int) i << 24));
2354 + /*ParaType 0x11 - Frame Buffer Auto-Swapping and
2355 + Command Regulator Misc*/
2356 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2358 + for (i = 0; i <= 0x20; i++) {
2359 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2360 + ((unsigned int) i << 24));
2362 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2364 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2366 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2368 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2370 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2373 + /* setting Misconfig*/
2374 + SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2376 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2378 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2380 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2382 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2384 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2386 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2388 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2390 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2392 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2394 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2396 + SetMMIORegister(dev_priv->mmio->handle, 0x440,
2401 +int via_chrome9_drm_resume(struct pci_dev *pci)
2403 + struct drm_device *dev = (struct drm_device *)pci_get_drvdata(pci);
2404 + struct drm_via_chrome9_private *dev_priv =
2405 + (struct drm_via_chrome9_private *)dev->dev_private;
2407 + if (!dev_priv->initialized)
2410 + Initialize3DEngine(dev_priv);
2412 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS, 0x00110000);
2413 + if (dev_priv->chip_sub_index == CHIP_H6S2) {
2414 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
2416 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
2419 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
2421 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
2426 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
2427 + INV_ParaType_PreCR);
2428 + SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
2429 + INV_SubA_HSetRBGID | INV_HSetRBGID_CR);
2431 + if (dev_priv->chip_sub_index == CHIP_H6S2) {
2433 + /* Here restore SR66~SR6F SR79~SR7B */
2434 + for (i = 0; i < 10; i++) {
2435 + SetMMIORegisterU8(dev_priv->mmio->handle,
2436 + 0x83c4, 0x66 + i);
2437 + SetMMIORegisterU8(dev_priv->mmio->handle,
2438 + 0x83c5, dev_priv->gti_backup[i]);
2441 + for (i = 0; i < 3; i++) {
2442 + SetMMIORegisterU8(dev_priv->mmio->handle,
2443 + 0x83c4, 0x79 + i);
2444 + SetMMIORegisterU8(dev_priv->mmio->handle,
2445 + 0x83c5, dev_priv->gti_backup[10 + i]);
2449 + via_chrome9_dma_init_inv(dev);
2454 +int via_chrome9_drm_suspend(struct pci_dev *pci,
2455 + pm_message_t state)
2458 + struct drm_device *dev = (struct drm_device *)pci_get_drvdata(pci);
2459 + struct drm_via_chrome9_private *dev_priv =
2460 + (struct drm_via_chrome9_private *)dev->dev_private;
2462 + if (!dev_priv->initialized)
2465 + if (dev_priv->chip_sub_index != CHIP_H6S2)
2468 + /* Save registers from SR66~SR6F */
2469 + for (i = 0; i < 10; i++) {
2470 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x66 + i);
2471 + dev_priv->gti_backup[i] =
2472 + GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
2475 + /* Save registers from SR79~SR7B */
2476 + for (i = 0; i < 3; i++) {
2477 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x79 + i);
2478 + dev_priv->gti_backup[10 + i] =
2479 + GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
2485 +int via_chrome9_driver_load(struct drm_device *dev,
2486 + unsigned long chipset)
2488 + struct drm_via_chrome9_private *dev_priv;
2490 + static int associate;
2493 + pci_set_drvdata(dev->pdev, dev);
2494 + dev->pdev->driver = &dev->driver->pci_driver;
2498 + dev->counters += 4;
2499 + dev->types[6] = _DRM_STAT_IRQ;
2500 + dev->types[7] = _DRM_STAT_PRIMARY;
2501 + dev->types[8] = _DRM_STAT_SECONDARY;
2502 + dev->types[9] = _DRM_STAT_DMA;
2504 + dev_priv = drm_calloc(1, sizeof(struct drm_via_chrome9_private),
2506 + if (dev_priv == NULL)
2510 + memset(dev_priv, 0, sizeof(struct drm_via_chrome9_private));
2512 + dev_priv->dev = dev;
2513 + dev->dev_private = (void *)dev_priv;
2515 + dev_priv->chip_index = chipset;
2517 + ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
2519 + drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
2523 +int via_chrome9_driver_unload(struct drm_device *dev)
2525 + struct drm_via_chrome9_private *dev_priv = dev->dev_private;
2527 + drm_sman_takedown(&dev_priv->sman);
2529 + drm_free(dev_priv, sizeof(struct drm_via_chrome9_private),
2532 + dev->dev_private = 0;
2537 +static int via_chrome9_initialize(struct drm_device *dev,
2538 + struct drm_via_chrome9_init *init)
2540 + struct drm_via_chrome9_private *dev_priv =
2541 + (struct drm_via_chrome9_private *)dev->dev_private;
2543 + dev_priv->chip_agp = init->chip_agp;
2544 + dev_priv->chip_index = init->chip_index;
2545 + dev_priv->chip_sub_index = init->chip_sub_index;
2547 + dev_priv->usec_timeout = init->usec_timeout;
2548 + dev_priv->front_offset = init->front_offset;
2549 + dev_priv->back_offset = init->back_offset >>
2550 + VIA_CHROME9DRM_VIDEO_STARTADDRESS_ALIGNMENT <<
2551 + VIA_CHROME9DRM_VIDEO_STARTADDRESS_ALIGNMENT;
2552 + dev_priv->available_fb_size = init->available_fb_size -
2553 + (init->available_fb_size %
2554 + (1 << VIA_CHROME9DRM_VIDEO_STARTADDRESS_ALIGNMENT));
2555 + dev_priv->depth_offset = init->depth_offset;
2557 + /* Find all the map added first, doing this is necessary to
2559 + if (via_chrome9_map_init(dev, init)) {
2560 + DRM_ERROR("function via_chrome9_map_init ERROR !\n");
2564 + /* Necessary information has been gathered for initialize hw */
2565 + if (via_chrome9_hw_init(dev, init)) {
2566 + DRM_ERROR("function via_chrome9_hw_init ERROR !\n");
2570 + /* After hw intialization, we have kown whether to use agp
2571 + or to use pcie for texture */
2572 + if (via_chrome9_heap_management_init(dev, init)) {
2573 + DRM_ERROR("function \
2574 + via_chrome9_heap_management_init ERROR !\n");
2578 + dev_priv->initialized = 1;
2583 + /* all the error recover has been processed in relevant function,
2584 + so here just return error */
2588 +static void via_chrome9_cleanup(struct drm_device *dev,
2589 + struct drm_via_chrome9_init *init)
2591 + struct drm_via_chrome9_DMA_manager *lpcmDMAManager = NULL;
2592 + struct drm_via_chrome9_private *dev_priv =
2593 + (struct drm_via_chrome9_private *)dev->dev_private;
2594 + DRM_DEBUG("function via_chrome9_cleanup run!\n");
2600 + (struct drm_via_chrome9_DMA_manager *)dev_priv->dma_manager;
2601 + if (dev_priv->pcie_vmalloc_nocache) {
2602 + vfree((void *)dev_priv->pcie_vmalloc_nocache);
2603 + dev_priv->pcie_vmalloc_nocache = 0;
2604 + if (lpcmDMAManager)
2605 + lpcmDMAManager->addr_linear = NULL;
2608 + if (dev_priv->pagetable_map.pagetable_handle) {
2609 + iounmap(dev_priv->pagetable_map.pagetable_handle);
2610 + dev_priv->pagetable_map.pagetable_handle = NULL;
2613 + if (lpcmDMAManager && lpcmDMAManager->addr_linear) {
2614 + iounmap(lpcmDMAManager->addr_linear);
2615 + lpcmDMAManager->addr_linear = NULL;
2618 + kfree(lpcmDMAManager);
2619 + dev_priv->dma_manager = NULL;
2621 + if (dev_priv->event_tag_info) {
2622 + vfree(dev_priv->event_tag_info);
2623 + dev_priv->event_tag_info = NULL;
2626 + if (dev_priv->bci_buffer) {
2627 + vfree(dev_priv->bci_buffer);
2628 + dev_priv->bci_buffer = NULL;
2631 + via_chrome9_memory_destroy_heap(dev, dev_priv);
2633 + /* After cleanup, it should to set the value to null */
2634 + dev_priv->sarea = dev_priv->mmio = dev_priv->hostBlt =
2635 + dev_priv->fb = dev_priv->front = dev_priv->back =
2636 + dev_priv->depth = dev_priv->agp_tex =
2637 + dev_priv->shadow_map.shadow = 0;
2638 + dev_priv->sarea_priv = 0;
2639 + dev_priv->initialized = 0;
2643 +Do almost everything intialize here,include:
2644 +1.intialize all addmaps in private data structure
2645 +2.intialize memory heap management for video agp/pcie
2646 +3.intialize hw for dma(pcie/agp) function
2648 +Note:all this function will dispatch into relevant function
2650 +int via_chrome9_ioctl_init(struct drm_device *dev, void *data,
2651 + struct drm_file *file_priv)
2653 + struct drm_via_chrome9_init *init = (struct drm_via_chrome9_init *)data;
2655 + switch (init->func) {
2656 + case VIA_CHROME9_INIT:
2657 + if (via_chrome9_initialize(dev, init)) {
2658 + DRM_ERROR("function via_chrome9_initialize error\n");
2661 + via_chrome9_filepriv_v4l = (void *)file_priv;
2662 + via_chrome9_dev_v4l = (void *)dev;
2665 + case VIA_CHROME9_CLEANUP:
2666 + via_chrome9_cleanup(dev, init);
2667 + via_chrome9_filepriv_v4l = 0;
2668 + via_chrome9_dev_v4l = 0;
2678 +int via_chrome9_ioctl_allocate_event_tag(struct drm_device *dev,
2679 + void *data, struct drm_file *file_priv)
2681 + struct drm_via_chrome9_event_tag *event_tag = data;
2682 + struct drm_via_chrome9_private *dev_priv =
2683 + (struct drm_via_chrome9_private *)dev->dev_private;
2684 + struct drm_clb_event_tag_info *event_tag_info =
2685 + dev_priv->event_tag_info;
2686 + unsigned int *event_addr = 0, i = 0;
2688 + for (i = 0; i < NUMBER_OF_EVENT_TAGS; i++) {
2689 + if (!event_tag_info->usage[i])
2693 + if (i < NUMBER_OF_EVENT_TAGS) {
2694 + event_tag_info->usage[i] = 1;
2695 + event_tag->event_offset = i;
2696 + event_tag->last_sent_event_value.event_low = 0;
2697 + event_tag->current_event_value.event_low = 0;
2698 + event_addr = event_tag_info->linear_address +
2699 + event_tag->event_offset * 4;
2709 +int via_chrome9_ioctl_free_event_tag(struct drm_device *dev,
2710 + void *data, struct drm_file *file_priv)
2712 + struct drm_via_chrome9_private *dev_priv =
2713 + (struct drm_via_chrome9_private *)dev->dev_private;
2714 + struct drm_clb_event_tag_info *event_tag_info =
2715 + dev_priv->event_tag_info;
2716 + struct drm_via_chrome9_event_tag *event_tag = data;
2718 + event_tag_info->usage[event_tag->event_offset] = 0;
2722 +void via_chrome9_lastclose(struct drm_device *dev)
2724 + via_chrome9_cleanup(dev, 0);
2728 +static int via_chrome9_do_wait_vblank(struct drm_via_chrome9_private
2733 + for (i = 0; i < dev_priv->usec_timeout; i++) {
2734 + VIA_CHROME9_WRITE8(0x83d4, 0x34);
2735 + if ((VIA_CHROME9_READ8(0x83d5)) & 0x8)
2737 + __via_chrome9ke_udelay(1);
2743 +void via_chrome9_preclose(struct drm_device *dev, struct drm_file *file_priv)
2745 + struct drm_via_chrome9_private *dev_priv =
2746 + (struct drm_via_chrome9_private *) dev->dev_private;
2747 + struct drm_via_chrome9_sarea *sarea_priv = NULL;
2752 + sarea_priv = dev_priv->sarea_priv;
2756 + if ((sarea_priv->page_flip == 1) &&
2757 + (sarea_priv->current_page != VIA_CHROME9_FRONT)) {
2758 + __volatile__ unsigned long *bci_base;
2759 + if (via_chrome9_do_wait_vblank(dev_priv))
2762 + bci_base = (__volatile__ unsigned long *)(dev_priv->bci);
2764 + BCI_SET_STREAM_REGISTER(bci_base, 0x81c4, 0xc0000000);
2765 + BCI_SET_STREAM_REGISTER(bci_base, 0x81c0,
2766 + dev_priv->front_offset);
2767 + BCI_SEND(bci_base, 0x64000000);/* wait vsync */
2769 + sarea_priv->current_page = VIA_CHROME9_FRONT;
2773 +int via_chrome9_is_agp(struct drm_device *dev)
2775 + /* filter out pcie group which has no AGP device */
2776 + if (dev->pci_device == 0x1122 || dev->pci_device == 0x5122) {
2777 + dev->driver->driver_features &=
2778 + ~(DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_REQUIRE_AGP);
2785 +++ b/drivers/gpu/drm/via_chrome9/via_chrome9_drm.h
2788 + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
2789 + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
2791 + * Permission is hereby granted, free of charge, to any person
2792 + * obtaining a copy of this software and associated documentation
2793 + * files (the "Software"), to deal in the Software without
2794 + * restriction, including without limitation the rights to use,
2795 + * copy, modify, merge, publish, distribute, sub license,
2796 + * and/or sell copies of the Software, and to permit persons to
2797 + * whom the Software is furnished to do so, subject to the
2798 + * following conditions:
2800 + * The above copyright notice and this permission notice
2801 + * (including the next paragraph) shall be included in all
2802 + * copies or substantial portions of the Software.
2804 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2805 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
2806 + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2807 + * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
2808 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2809 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
2810 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
2811 + * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2813 +#ifndef _VIA_CHROME9_DRM_H_
2814 +#define _VIA_CHROME9_DRM_H_
2816 +/* WARNING: These defines must be the same as what the Xserver uses.
2817 + * if you change them, you must change the defines in the Xserver.
2820 +#ifndef _VIA_CHROME9_DEFINES_
2821 +#define _VIA_CHROME9_DEFINES_
2824 +#include "via_drmclient.h"
2827 +#define VIA_CHROME9_NR_SAREA_CLIPRECTS 8
2828 +#define VIA_CHROME9_NR_XVMC_PORTS 10
2829 +#define VIA_CHROME9_NR_XVMC_LOCKS 5
2830 +#define VIA_CHROME9_MAX_CACHELINE_SIZE 64
2831 +#define XVMCLOCKPTR(saPriv,lockNo) \
2832 + ((__volatile__ struct drm_hw_lock *) \
2833 + (((((unsigned long) (saPriv)->XvMCLockArea) + \
2834 + (VIA_CHROME9_MAX_CACHELINE_SIZE - 1)) & \
2835 + ~(VIA_CHROME9_MAX_CACHELINE_SIZE - 1)) + \
2836 + VIA_CHROME9_MAX_CACHELINE_SIZE*(lockNo)))
2838 +/* Each region is a minimum of 64k, and there are at most 64 of them.
2840 +#define VIA_CHROME9_NR_TEX_REGIONS 64
2841 +#define VIA_CHROME9_LOG_MIN_TEX_REGION_SIZE 16
2844 +#define VIA_CHROME9_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
2845 +#define VIA_CHROME9_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
2846 +#define VIA_CHROME9_UPLOAD_CTX 0x4
2847 +#define VIA_CHROME9_UPLOAD_BUFFERS 0x8
2848 +#define VIA_CHROME9_UPLOAD_TEX0 0x10
2849 +#define VIA_CHROME9_UPLOAD_TEX1 0x20
2850 +#define VIA_CHROME9_UPLOAD_CLIPRECTS 0x40
2851 +#define VIA_CHROME9_UPLOAD_ALL 0xff
2853 +/* VIA_CHROME9 specific ioctls */
2854 +#define DRM_VIA_CHROME9_ALLOCMEM 0x00
2855 +#define DRM_VIA_CHROME9_FREEMEM 0x01
2856 +#define DRM_VIA_CHROME9_FREE 0x02
2857 +#define DRM_VIA_CHROME9_ALLOCATE_EVENT_TAG 0x03
2858 +#define DRM_VIA_CHROME9_FREE_EVENT_TAG 0x04
2859 +#define DRM_VIA_CHROME9_ALLOCATE_APERTURE 0x05
2860 +#define DRM_VIA_CHROME9_FREE_APERTURE 0x06
2861 +#define DRM_VIA_CHROME9_ALLOCATE_VIDEO_MEM 0x07
2862 +#define DRM_VIA_CHROME9_FREE_VIDEO_MEM 0x08
2863 +#define DRM_VIA_CHROME9_WAIT_CHIP_IDLE 0x09
2864 +#define DRM_VIA_CHROME9_PROCESS_EXIT 0x0A
2865 +#define DRM_VIA_CHROME9_RESTORE_PRIMARY 0x0B
2866 +#define DRM_VIA_CHROME9_FLUSH_CACHE 0x0C
2867 +#define DRM_VIA_CHROME9_INIT 0x0D
2868 +#define DRM_VIA_CHROME9_FLUSH 0x0E
2869 +#define DRM_VIA_CHROME9_CHECKVIDMEMSIZE 0x0F
2870 +#define DRM_VIA_CHROME9_PCIEMEMCTRL 0x10
2871 +#define DRM_VIA_CHROME9_AUTH_MAGIC 0x11
2872 +#define DRM_VIA_CHROME9_GET_PCI_ID 0x12
2873 +#define DRM_VIA_CHROME9_INIT_JUDGE 0x16
2874 +#define DRM_VIA_CHROME9_DMA 0x17
2876 +#define DRM_IOCTL_VIA_CHROME9_INIT \
2877 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_INIT, \
2878 + struct drm_via_chrome9_init)
2879 +#define DRM_IOCTL_VIA_CHROME9_FLUSH \
2880 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FLUSH, \
2881 + struct drm_via_chrome9_flush)
2882 +#define DRM_IOCTL_VIA_CHROME9_FREE \
2883 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FREE, int)
2884 +#define DRM_IOCTL_VIA_CHROME9_ALLOCATE_EVENT_TAG \
2885 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_ALLOCATE_EVENT_TAG, \
2886 + struct drm_event_via_chrome9_tag)
2887 +#define DRM_IOCTL_VIA_CHROME9_FREE_EVENT_TAG \
2888 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FREE_EVENT_TAG, \
2889 + struct drm_event_via_chrome9_tag)
2890 +#define DRM_IOCTL_VIA_CHROME9_ALLOCATE_APERTURE \
2891 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_ALLOCATE_APERTURE, \
2892 + struct drm_via_chrome9_aperture)
2893 +#define DRM_IOCTL_VIA_CHROME9_FREE_APERTURE \
2894 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FREE_APERTURE, \
2895 + struct drm_via_chrome9_aperture)
2896 +#define DRM_IOCTL_VIA_CHROME9_ALLOCATE_VIDEO_MEM \
2897 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_ALLOCATE_VIDEO_MEM, \
2898 + struct drm_via_chrome9_memory_alloc)
2899 +#define DRM_IOCTL_VIA_CHROME9_FREE_VIDEO_MEM \
2900 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FREE_VIDEO_MEM, \
2901 + struct drm_via_chrome9_memory_alloc)
2902 +#define DRM_IOCTL_VIA_CHROME9_WAIT_CHIP_IDLE \
2903 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_WAIT_CHIP_IDLE, int)
2904 +#define DRM_IOCTL_VIA_CHROME9_PROCESS_EXIT \
2905 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_PROCESS_EXIT, int)
2906 +#define DRM_IOCTL_VIA_CHROME9_RESTORE_PRIMARY \
2907 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_RESTORE_PRIMARY, int)
2908 +#define DRM_IOCTL_VIA_CHROME9_FLUSH_CACHE \
2909 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FLUSH_CACHE, int)
2910 +#define DRM_IOCTL_VIA_CHROME9_ALLOCMEM \
2911 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_ALLOCMEM, int)
2912 +#define DRM_IOCTL_VIA_CHROME9_FREEMEM \
2913 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FREEMEM, int)
2914 +#define DRM_IOCTL_VIA_CHROME9_CHECK_VIDMEM_SIZE \
2915 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_CHECKVIDMEMSIZE, \
2916 + struct drm_via_chrome9_memory_alloc)
2917 +#define DRM_IOCTL_VIA_CHROME9_PCIEMEMCTRL \
2918 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_PCIEMEMCTRL,\
2919 + drm_via_chrome9_pciemem_ctrl_t)
2920 +#define DRM_IOCTL_VIA_CHROME9_AUTH_MAGIC \
2921 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_AUTH_MAGIC, drm_auth_t)
2922 +#define DRM_IOCTL_VIA_CHROME9_GET_PCI_ID \
2923 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_GET_PCI_ID, \
2924 + struct get_pci_id_struct)
2925 +#define DRM_IOCTL_VIA_CHROME9_INIT_JUDGE \
2926 + DRM_IOR(DRM_COMMAND_BASE + DRM_VIA_CHROME9_INIT_JUDGE, int)
2927 +#define DRM_IOCTL_VIA_CHROME9_DMA \
2928 + DRM_IO(DRM_COMMAND_BASE + DRM_VIA_CHROME9_DMA, int)
2931 + CHIP_UNKNOWN = -1,
2932 + CHIP_CMODEL, /*Model for any chip. */
2933 + CHIP_CLB, /*Columbia */
2934 + CHIP_DST, /*Destination */
2935 + CHIP_CSR, /*Castlerock */
2936 + CHIP_INV, /*Innovation (H3) */
2937 + CHIP_H5, /*Innovation (H5) */
2938 + CHIP_H5S1, /*Innovation (H5S1) */
2939 + CHIP_H6S2, /*Innovation (H6S2) */
2940 + CHIP_CMS, /*Columbia MS */
2941 + CHIP_METRO, /*Metropolis */
2942 + CHIP_MANHATTAN, /*manhattan */
2943 + CHIP_MATRIX, /*matrix */
2944 + CHIP_EVO, /*change for GCC 4.1 -add- 07.02.12*/
2945 + CHIP_H6S1, /*Innovation (H6S1)*/
2946 + CHIP_DST2, /*Destination-2 */
2947 + CHIP_LAST /*Maximum number of chips supported. */
2950 +enum VIA_CHROME9CHIPBUS {
2956 +struct drm_via_chrome9_init {
2958 + VIA_CHROME9_INIT = 0x01,
2959 + VIA_CHROME9_CLEANUP = 0x02
2963 + int chip_sub_index;
2965 + unsigned int sarea_priv_offset;
2966 + unsigned int fb_cpp;
2967 + unsigned int front_offset;
2968 + unsigned int back_offset;
2969 + unsigned int depth_offset;
2970 + unsigned int mmio_handle;
2971 + unsigned int dma_handle;
2972 + unsigned int fb_handle;
2973 + unsigned int front_handle;
2974 + unsigned int back_handle;
2975 + unsigned int depth_handle;
2977 + unsigned int fb_tex_offset;
2978 + unsigned int fb_tex_size;
2980 + unsigned int agp_tex_size;
2981 + unsigned int agp_tex_handle;
2982 + unsigned int shadow_size;
2983 + unsigned int shadow_handle;
2984 + unsigned int garttable_size;
2985 + unsigned int garttable_offset;
2986 + unsigned long available_fb_size;
2987 + unsigned long fb_base_address;
2988 + unsigned int DMA_size;
2989 + unsigned long DMA_phys_address;
2992 + AGP_DOUBLE_BUFFER,
2995 + unsigned int hostBlt_handle;
2998 +enum dma_cmd_type {
3000 + flush_bci_and_wait,
3003 + flush_dma_and_wait
3006 +struct drm_via_chrome9_flush {
3007 + enum dma_cmd_type dma_cmd_type;
3008 + /* command buffer index */
3010 + /* command buffer offset */
3012 + /* command dword size,command always from beginning */
3014 + /* if use dma kick off,it is dma kick off command */
3015 + unsigned long dma_kickoff[2];
3016 + /* user mode DMA buffer pointer */
3017 + unsigned int *usermode_dma_buf;
3020 +struct event_value {
3025 +struct drm_via_chrome9_event_tag {
3026 + unsigned int event_size; /* event tag size */
3027 + int event_offset; /* event tag id */
3028 + struct event_value last_sent_event_value;
3029 + struct event_value current_event_value;
3035 +/* Indices into buf.Setup where various bits of state are mirrored per
3036 + * context and per buffer. These can be fired at the card as a unit,
3037 + * or in a piecewise fashion as required.
3040 +#define VIA_CHROME9_TEX_SETUP_SIZE 8
3042 +/* Flags for clear ioctl
3044 +#define VIA_CHROME9_FRONT 0x1
3045 +#define VIA_CHROME9_BACK 0x2
3046 +#define VIA_CHROME9_DEPTH 0x4
3047 +#define VIA_CHROME9_STENCIL 0x8
3048 +#define VIA_CHROME9_MEM_VIDEO 0 /* matches drm constant */
3049 +#define VIA_CHROME9_MEM_AGP 1 /* matches drm constant */
3050 +#define VIA_CHROME9_MEM_SYSTEM 2
3051 +#define VIA_CHROME9_MEM_MIXED 3
3052 +#define VIA_CHROME9_MEM_UNKNOWN 4
3054 +struct drm_via_chrome9_agp {
3059 +struct drm_via_chrome9_fb {
3064 +struct drm_via_chrome9_mem {
3068 + unsigned long index;
3069 + unsigned long offset;
3072 +struct drm_via_chrome9_aperture {
3073 + /*IN: The frame buffer offset of the surface. */
3074 + int surface_offset;
3075 + /*IN: Surface pitch in byte, */
3077 + /*IN: Surface width in pixel */
3079 + /*IN: Surface height in pixel */
3081 + /*IN: Surface color format, Columbia has more color formats */
3083 + /*IN: Rotation degrees, only for Columbia */
3084 + int rotation_degree;
3085 + /*IN Is the PCIE Video, for MATRIX support NONLOCAL Aperture */
3087 + /*IN: Is the surface tilled, only for Columbia */
3089 + /*IN: Only allocate apertur, not hardware setup. */
3090 + int allocate_only;
3091 + /* OUT: linear address for aperture */
3092 + unsigned int *aperture_linear_address;
3093 + /*OUT: The pitch of the aperture,for CPU write not for GE */
3094 + int aperture_pitch;
3095 + /*OUT: The index of the aperture */
3096 + int aperture_handle;
3098 + /* always =0xAAAAAAAA */
3099 + /* Aligned surface's width(in pixel) */
3100 + int width_aligned;
3101 + /* Aligned surface's height(in pixel) */
3102 + int height_aligned;
3106 + Some fileds of this data structure has no meaning now since
3107 + we have managed heap based on mechanism provided by DRM
3108 + Remain what it was to keep consistent with 3D driver interface.
3110 +struct drm_via_chrome9_memory_alloc {
3112 + memory_heap_video = 0,
3114 + memory_heap_pcie_video,
3120 + unsigned int alcL1Tag;
3121 + unsigned int usageCount;
3122 + unsigned int dwVersion;
3123 + unsigned int dwResHandle;
3124 + unsigned int dwProcessID;
3126 + unsigned int flags;
3127 + unsigned int size;
3128 + unsigned int physaddress;
3129 + unsigned int offset;
3130 + unsigned int align;
3131 + void *linearaddress;
3134 +struct drm_via_chrome9_dma_init {
3136 + VIA_CHROME9_INIT_DMA = 0x01,
3137 + VIA_CHROME9_CLEANUP_DMA = 0x02,
3138 + VIA_CHROME9_DMA_INITIALIZED = 0x03
3141 + unsigned long offset;
3142 + unsigned long size;
3143 + unsigned long reg_pause_addr;
3146 +struct drm_via_chrome9_cmdbuffer {
3148 + unsigned long size;
3151 +/* Warning: If you change the SAREA structure you must change the Xserver
3152 + * structure as well */
3154 +struct drm_via_chrome9_tex_region {
3155 + unsigned char next, prev; /* indices to form a circular LRU */
3156 + unsigned char inUse; /* owned by a client, or free? */
3157 + int age; /* tracked by clients to update local LRU's */
3160 +struct drm_via_chrome9_sarea {
3163 + unsigned int req_drawable;/* the X drawable id */
3164 + unsigned int req_draw_buffer;/* VIA_CHROME9_FRONT or VIA_CHROME9_BACK */
3165 + /* Last context that uploaded state */
3169 +struct drm_via_chrome9_cmdbuf_size {
3171 + VIA_CHROME9_CMDBUF_SPACE = 0x01,
3172 + VIA_CHROME9_CMDBUF_LAG = 0x02
3178 +struct drm_via_chrome9_DMA_manager {
3179 + unsigned int *addr_linear;
3180 + unsigned int DMASize;
3181 + unsigned int bDMAAgp;
3182 + unsigned int LastIssuedEventTag;
3183 + unsigned int *pBeg;
3184 + unsigned int *pInUseByHW;
3185 + unsigned int **ppInUseByHW;
3186 + unsigned int *pInUseBySW;
3187 + unsigned int *pFree;
3188 + unsigned int *pEnd;
3190 + unsigned long pPhysical;
3191 + unsigned int MaxKickoffSize;
3194 +struct get_pci_id_struct {
3202 +extern void *via_chrome9_dev_v4l;
3203 +extern void *via_chrome9_filepriv_v4l;
3204 +extern int via_chrome9_ioctl_wait_chip_idle(struct drm_device *dev,
3205 + void *data, struct drm_file *file_priv);
3206 +extern int via_chrome9_ioctl_init(struct drm_device *dev,
3207 + void *data, struct drm_file *file_priv);
3208 +extern int via_chrome9_ioctl_allocate_event_tag(struct drm_device
3209 + *dev, void *data, struct drm_file *file_priv);
3210 +extern int via_chrome9_ioctl_free_event_tag(struct drm_device *dev,
3211 + void *data, struct drm_file *file_priv);
3212 +extern int via_chrome9_driver_load(struct drm_device *dev,
3213 + unsigned long chipset);
3214 +extern int via_chrome9_driver_unload(struct drm_device *dev);
3215 +extern int via_chrome9_ioctl_process_exit(struct drm_device *dev,
3216 + void *data, struct drm_file *file_priv);
3217 +extern int via_chrome9_ioctl_restore_primary(struct drm_device *dev,
3218 + void *data, struct drm_file *file_priv);
3219 +extern int via_chrome9_drm_resume(struct pci_dev *dev);
3220 +extern int via_chrome9_drm_suspend(struct pci_dev *dev,
3221 + pm_message_t state);
3222 +extern void __via_chrome9ke_udelay(unsigned long usecs);
3223 +extern void via_chrome9_lastclose(struct drm_device *dev);
3224 +extern void via_chrome9_preclose(struct drm_device *dev,
3225 + struct drm_file *file_priv);
3226 +extern int via_chrome9_is_agp(struct drm_device *dev);
3229 +#endif /* _VIA_CHROME9_DRM_H_ */
3231 +++ b/drivers/gpu/drm/via_chrome9/via_chrome9_drv.c
3234 + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3235 + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
3237 + * Permission is hereby granted, free of charge, to any person
3238 + * obtaining a copy of this software and associated documentation
3239 + * files (the "Software"), to deal in the Software without
3240 + * restriction, including without limitation the rights to use,
3241 + * copy, modify, merge, publish, distribute, sub license,
3242 + * and/or sell copies of the Software, and to permit persons to
3243 + * whom the Software is furnished to do so, subject to the
3244 + * following conditions:
3246 + * The above copyright notice and this permission notice
3247 + * (including the next paragraph) shall be included in all
3248 + * copies or substantial portions of the Software.
3250 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
3251 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
3252 + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
3253 + * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
3254 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3255 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
3256 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
3257 + * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
3261 +#include "via_chrome9_drm.h"
3262 +#include "via_chrome9_drv.h"
3263 +#include "via_chrome9_dma.h"
3264 +#include "via_chrome9_mm.h"
3265 +#include "via_chrome9_3d_reg.h"
3267 +#define RING_BUFFER_INIT_FLAG 1
3268 +#define RING_BUFFER_CLEANUP_FLAG 2
3270 +static int dri_library_name(struct drm_device *dev, char *buf)
3272 + return snprintf(buf, PAGE_SIZE, "via_chrome9");
3275 +int via_chrome9_drm_authmagic(struct drm_device *dev, void *data,
3276 + struct drm_file *file_priv)
3281 +int via_chrome9_drm_get_pci_id(struct drm_device *dev,
3282 + void *data, struct drm_file *file_priv)
3284 + unsigned int *reg_val = data;
3285 + outl(0x8000002C, 0xCF8);
3286 + *reg_val = inl(0xCFC);
3287 + outl(0x8000012C, 0xCF8);
3288 + *(reg_val+1) = inl(0xCFC);
3289 + outl(0x8000022C, 0xCF8);
3290 + *(reg_val+2) = inl(0xCFC);
3291 + outl(0x8000052C, 0xCF8);
3292 + *(reg_val+3) = inl(0xCFC);
3296 +int via_chrome9_drm_judge(struct drm_device *dev, void *data,
3297 + struct drm_file *file_priv)
3299 + struct drm_via_chrome9_private *dev_priv =
3300 + (struct drm_via_chrome9_private *) dev->dev_private;
3302 + if (dev_priv->initialized)
3305 + *(int *)data = -1;
3309 +int via_chrome9_dma_init(struct drm_device *dev, void *data,
3310 + struct drm_file *file_priv)
3313 + unsigned char sr6c;
3314 + struct drm_via_chrome9_private *dev_priv =
3315 + (struct drm_via_chrome9_private *)dev->dev_private;
3316 + tmp = *((int *)data);
3319 + case RING_BUFFER_INIT_FLAG:
3320 + via_chrome9_dma_init_inv(dev);
3322 + case RING_BUFFER_CLEANUP_FLAG:
3323 + if (dev_priv->chip_sub_index == CHIP_H6S2) {
3324 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
3325 + sr6c = GetMMIORegisterU8(dev_priv->mmio->handle,
3328 + SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
3337 +struct drm_ioctl_desc via_chrome9_ioctls[] = {
3338 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_INIT, via_chrome9_ioctl_init,
3339 + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),/* via_chrome9_map.c*/
3340 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_FLUSH, via_chrome9_ioctl_flush, DRM_AUTH),
3341 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_FREE, via_chrome9_ioctl_free, DRM_AUTH),
3342 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_ALLOCATE_EVENT_TAG,
3343 + via_chrome9_ioctl_allocate_event_tag, DRM_AUTH),
3344 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_FREE_EVENT_TAG,
3345 + via_chrome9_ioctl_free_event_tag, DRM_AUTH),
3346 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_ALLOCATE_APERTURE,
3347 + via_chrome9_ioctl_allocate_aperture, DRM_AUTH),
3348 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_FREE_APERTURE,
3349 + via_chrome9_ioctl_free_aperture, DRM_AUTH),
3350 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_ALLOCATE_VIDEO_MEM,
3351 + via_chrome9_ioctl_allocate_mem_wrapper, DRM_AUTH),
3352 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_FREE_VIDEO_MEM,
3353 + via_chrome9_ioctl_free_mem_wrapper, DRM_AUTH),
3354 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_WAIT_CHIP_IDLE,
3355 + via_chrome9_ioctl_wait_chip_idle, DRM_AUTH),
3356 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_PROCESS_EXIT,
3357 + via_chrome9_ioctl_process_exit, DRM_AUTH),
3358 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_RESTORE_PRIMARY,
3359 + via_chrome9_ioctl_restore_primary, DRM_AUTH),
3360 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_FLUSH_CACHE,
3361 + via_chrome9_ioctl_flush_cache, DRM_AUTH),
3362 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_ALLOCMEM,
3363 + via_chrome9_ioctl_allocate_mem_base, DRM_AUTH),
3364 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_FREEMEM,
3365 + via_chrome9_ioctl_freemem_base, DRM_AUTH),
3366 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_CHECKVIDMEMSIZE,
3367 + via_chrome9_ioctl_check_vidmem_size, DRM_AUTH),
3368 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_PCIEMEMCTRL,
3369 + via_chrome9_ioctl_pciemem_ctrl, DRM_AUTH),
3370 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_AUTH_MAGIC, via_chrome9_drm_authmagic, 0),
3371 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_GET_PCI_ID,
3372 + via_chrome9_drm_get_pci_id, 0),
3373 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_INIT_JUDGE, via_chrome9_drm_judge, 0),
3374 + DRM_IOCTL_DEF(DRM_VIA_CHROME9_DMA, via_chrome9_dma_init, 0)
3377 +int via_chrome9_max_ioctl = DRM_ARRAY_SIZE(via_chrome9_ioctls);
3379 +static struct pci_device_id pciidlist[] = {
3380 + {0x1106, 0x3225, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3381 + {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_CHROME9_DX9_0},
3382 + {0x1106, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3383 + {0x1106, 0x1122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_CHROME9_PCIE_GROUP},
3384 + {0x1106, 0x5122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_CHROME9_PCIE_GROUP},
3388 +int via_chrome9_driver_open(struct drm_device *dev,
3389 + struct drm_file *priv)
3391 + priv->authenticated = 1;
3395 +static struct drm_driver driver = {
3396 + .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
3397 + DRIVER_HAVE_DMA | DRIVER_FB_DMA | DRIVER_USE_MTRR,
3398 + .open = via_chrome9_driver_open,
3399 + .load = via_chrome9_driver_load,
3400 + .unload = via_chrome9_driver_unload,
3401 + .device_is_agp = via_chrome9_is_agp,
3402 + .dri_library_name = dri_library_name,
3403 + .reclaim_buffers = drm_core_reclaim_buffers,
3404 + .reclaim_buffers_locked = NULL,
3405 + .reclaim_buffers_idlelocked = via_chrome9_reclaim_buffers_locked,
3406 + .lastclose = via_chrome9_lastclose,
3407 + .preclose = via_chrome9_preclose,
3408 + .get_map_ofs = drm_core_get_map_ofs,
3409 + .get_reg_ofs = drm_core_get_reg_ofs,
3410 + .ioctls = via_chrome9_ioctls,
3412 + .owner = THIS_MODULE,
3414 + .release = drm_release,
3415 + .ioctl = drm_ioctl,
3418 + .fasync = drm_fasync,
3421 + .name = DRIVER_NAME,
3422 + .id_table = pciidlist,
3423 + .resume = via_chrome9_drm_resume,
3424 + .suspend = via_chrome9_drm_suspend,
3427 + .name = DRIVER_NAME,
3428 + .desc = DRIVER_DESC,
3429 + .date = DRIVER_DATE,
3430 + .major = DRIVER_MAJOR,
3431 + .minor = DRIVER_MINOR,
3432 + .patchlevel = DRIVER_PATCHLEVEL,
3435 +static int __init via_chrome9_init(void)
3437 + driver.num_ioctls = via_chrome9_max_ioctl;
3438 +#if VIA_CHROME9_VERIFY_ENABLE
3439 + via_chrome9_init_command_verifier();
3440 + DRM_INFO("via_chrome9 verify function enabled. \n");
3442 + driver.dev_priv_size = sizeof(struct drm_via_chrome9_private);
3443 + return drm_init(&driver);
3446 +static void __exit via_chrome9_exit(void)
3448 + drm_exit(&driver);
3451 +module_init(via_chrome9_init);
3452 +module_exit(via_chrome9_exit);
3454 +MODULE_AUTHOR(DRIVER_AUTHOR);
3455 +MODULE_DESCRIPTION(DRIVER_DESC);
3456 +MODULE_LICENSE("GPL and additional rights");
3458 +++ b/drivers/gpu/drm/via_chrome9/via_chrome9_drv.h
3461 + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3462 + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
3464 + * Permission is hereby granted, free of charge, to any person
3465 + * obtaining a copy of this software and associated documentation
3466 + * files (the "Software"), to deal in the Software without
3467 + * restriction, including without limitation the rights to use,
3468 + * copy, modify, merge, publish, distribute, sub license,
3469 + * and/or sell copies of the Software, and to permit persons to
3470 + * whom the Software is furnished to do so, subject to the
3471 + * following conditions:
3473 + * The above copyright notice and this permission notice
3474 + * (including the next paragraph) shall be included in all
3475 + * copies or substantial portions of the Software.
3477 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
3478 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
3479 + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
3480 + * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
3481 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3482 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
3483 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
3484 + * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
3486 +#ifndef _VIA_CHROME9_DRV_H_
3487 +#define _VIA_CHROME9_DRV_H_
3489 +#include "drm_sman.h"
3490 +#include "via_chrome9_verifier.h"
3491 +#define DRIVER_AUTHOR "Various"
3493 +#define DRIVER_NAME "via_chrome9"
3494 +#define DRIVER_DESC "VIA_CHROME9 Unichrome / Pro"
3495 +#define DRIVER_DATE "20080415"
3497 +#define DRIVER_MAJOR 2
3498 +#define DRIVER_MINOR 11
3499 +#define DRIVER_PATCHLEVEL 1
3501 +#define via_chrome9_FIRE_BUF_SIZE 1024
3502 +#define via_chrome9_NUM_IRQS 4
3504 +#define MAX_MEMORY_HEAPS 4
3505 +#define NUMBER_OF_APERTURES 32
3507 +/*typedef struct drm_via_chrome9_shadow_map drm_via_chrome9_shadow_map_t;*/
3508 +struct drm_via_chrome9_shadow_map {
3509 + struct drm_map *shadow;
3510 + unsigned int shadow_size;
3511 + unsigned int *shadow_handle;
3514 +/*typedef struct drm_via_chrome9_pagetable_map
3515 + *drm_via_chrome9_pagetable_map_t;
3517 +struct drm_via_chrome9_pagetable_map {
3518 + unsigned int pagetable_offset;
3519 + unsigned int pagetable_size;
3520 + unsigned int *pagetable_handle;
3521 + unsigned int mmt_register;
3524 +/*typedef struct drm_via_chrome9_private drm_via_chrome9_private_t;*/
3525 +struct drm_via_chrome9_private {
3528 + int chip_sub_index;
3530 + unsigned long front_offset;
3531 + unsigned long back_offset;
3532 + unsigned long depth_offset;
3533 + unsigned long fb_base_address;
3534 + unsigned long available_fb_size;
3536 + int max_apertures;
3537 + struct drm_sman sman;
3538 + unsigned int alignment;
3539 + /* bit[31]:0:indicate no alignment needed,1:indicate
3540 + alignment needed and size is bit[0:30]*/
3542 + struct drm_map *sarea;
3543 + struct drm_via_chrome9_sarea *sarea_priv;
3545 + struct drm_map *mmio;
3546 + struct drm_map *hostBlt;
3547 + struct drm_map *fb;
3548 + struct drm_map *front;
3549 + struct drm_map *back;
3550 + struct drm_map *depth;
3551 + struct drm_map *agp_tex;
3552 + unsigned int agp_size;
3553 + unsigned int agp_offset;
3555 + struct semaphore *drm_s3g_sem;
3557 + struct drm_via_chrome9_shadow_map shadow_map;
3558 + struct drm_via_chrome9_pagetable_map pagetable_map;
3562 + int aperture_usage[NUMBER_OF_APERTURES];
3563 + void *event_tag_info;
3565 + /* DMA buffer manager */
3566 + void *dma_manager;
3567 + /* Indicate agp/pcie heap initialization flag */
3568 + int agp_initialized;
3569 + /* Indicate video heap initialization flag */
3570 + int vram_initialized;
3572 + unsigned long pcie_vmalloc_addr;
3574 + /* pointer to device information */
3576 + /* if agp init fail, go ahead and force dri use PCI*/
3578 + DRM_AGP_RING_BUFFER,
3579 + DRM_AGP_DOUBLE_BUFFER,
3583 +#if VIA_CHROME9_VERIFY_ENABLE
3584 + struct drm_via_chrome9_state hc_state;
3586 + unsigned long *bci_buffer;
3587 + unsigned long pcie_vmalloc_nocache;
3588 + unsigned char gti_backup[13];
3594 +enum via_chrome9_family {
3595 + VIA_CHROME9_OTHER = 0, /* Baseline */
3596 + VIA_CHROME9_PRO_GROUP_A,/* Another video engine and DMA commands */
3597 + VIA_CHROME9_DX9_0,
3598 + VIA_CHROME9_PCIE_GROUP
3601 +/* VIA_CHROME9 MMIO register access */
3602 +#define VIA_CHROME9_BASE ((dev_priv->mmio))
3604 +#define VIA_CHROME9_READ(reg) DRM_READ32(VIA_CHROME9_BASE, reg)
3605 +#define VIA_CHROME9_WRITE(reg, val) DRM_WRITE32(VIA_CHROME9_BASE, reg, val)
3606 +#define VIA_CHROME9_READ8(reg) DRM_READ8(VIA_CHROME9_BASE, reg)
3607 +#define VIA_CHROME9_WRITE8(reg, val) DRM_WRITE8(VIA_CHROME9_BASE, reg, val)
3611 +++ b/drivers/gpu/drm/via_chrome9/via_chrome9_mm.c
3614 + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3615 + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
3617 + * Permission is hereby granted, free of charge, to any person
3618 + * obtaining a copy of this software and associated documentation
3619 + * files (the "Software"), to deal in the Software without
3620 + * restriction, including without limitation the rights to use,
3621 + * copy, modify, merge, publish, distribute, sub license,
3622 + * and/or sell copies of the Software, and to permit persons to
3623 + * whom the Software is furnished to do so, subject to the
3624 + * following conditions:
3626 + * The above copyright notice and this permission notice
3627 + * (including the next paragraph) shall be included in all
3628 + * copies or substantial portions of the Software.
3630 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
3631 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
3632 + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
3633 + * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
3634 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3635 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
3636 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
3637 + * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
3641 +#include "via_chrome9_drm.h"
3642 +#include "via_chrome9_drv.h"
3643 +#include "drm_sman.h"
3644 +#include "via_chrome9_mm.h"
3646 +#define VIA_CHROME9_MM_GRANULARITY 4
3647 +#define VIA_CHROME9_MM_GRANULARITY_MASK ((1 << VIA_CHROME9_MM_GRANULARITY) - 1)
3650 +int via_chrome9_map_init(struct drm_device *dev,
3651 + struct drm_via_chrome9_init *init)
3653 + struct drm_via_chrome9_private *dev_priv =
3654 + (struct drm_via_chrome9_private *)dev->dev_private;
3656 + dev_priv->sarea = drm_getsarea(dev);
3657 + if (!dev_priv->sarea) {
3658 + DRM_ERROR("could not find sarea!\n");
3661 + dev_priv->sarea_priv =
3662 + (struct drm_via_chrome9_sarea *)((unsigned char *)dev_priv->
3663 + sarea->handle + init->sarea_priv_offset);
3665 + dev_priv->fb = drm_core_findmap(dev, init->fb_handle);
3666 + if (!dev_priv->fb) {
3667 + DRM_ERROR("could not find framebuffer!\n");
3670 + /* Frame buffer physical base address */
3671 + dev_priv->fb_base_address = init->fb_base_address;
3673 + if (init->shadow_size) {
3674 + /* find apg shadow region mappings */
3675 + dev_priv->shadow_map.shadow = drm_core_findmap(dev, init->
3677 + if (!dev_priv->shadow_map.shadow) {
3678 + DRM_ERROR("could not shadow map!\n");
3681 + dev_priv->shadow_map.shadow_size = init->shadow_size;
3682 + dev_priv->shadow_map.shadow_handle = (unsigned int *)dev_priv->
3683 + shadow_map.shadow->handle;
3684 + init->shadow_handle = dev_priv->shadow_map.shadow->offset;
3686 + if (init->agp_tex_size && init->chip_agp != CHIP_PCIE) {
3687 + /* find apg texture buffer mappings */
3688 + dev_priv->agp_tex = drm_core_findmap(dev, init->agp_tex_handle);
3689 + dev_priv->agp_size = init->agp_tex_size;
3690 + dev_priv->agp_offset = init->agp_tex_handle;
3691 + if (!dev_priv->agp_tex) {
3692 + DRM_ERROR("could not find agp texture map !\n");
3696 + /* find mmio/dma mappings */
3697 + dev_priv->mmio = drm_core_findmap(dev, init->mmio_handle);
3698 + if (!dev_priv->mmio) {
3699 + DRM_ERROR("failed to find mmio region!\n");
3703 + dev_priv->hostBlt = drm_core_findmap(dev, init->hostBlt_handle);
3704 + if (!dev_priv->hostBlt) {
3705 + DRM_ERROR("failed to find host bitblt region!\n");
3709 + dev_priv->drm_agp_type = init->agp_type;
3710 + if (init->agp_type != AGP_DISABLED && init->chip_agp != CHIP_PCIE) {
3711 + dev->agp_buffer_map = drm_core_findmap(dev, init->dma_handle);
3712 + if (!dev->agp_buffer_map) {
3713 + DRM_ERROR("failed to find dma buffer region!\n");
3718 + dev_priv->bci = (char *)dev_priv->mmio->handle + 0x10000;
3723 + /* do cleanup here, refine_later */
3727 +int via_chrome9_heap_management_init(struct drm_device *dev,
3728 + struct drm_via_chrome9_init *init)
3730 + struct drm_via_chrome9_private *dev_priv =
3731 + (struct drm_via_chrome9_private *) dev->dev_private;
3734 + /* video memory management. range: 0 ---- video_whole_size */
3735 + mutex_lock(&dev->struct_mutex);
3736 + ret = drm_sman_set_range(&dev_priv->sman, VIA_CHROME9_MEM_VIDEO,
3737 + 0, dev_priv->available_fb_size >> VIA_CHROME9_MM_GRANULARITY);
3739 + DRM_ERROR("VRAM memory manager initialization ******ERROR\
3741 + mutex_unlock(&dev->struct_mutex);
3744 + dev_priv->vram_initialized = 1;
3745 + /* agp/pcie heap management.
3746 + note:because agp is contradict with pcie, so only one is enough
3747 + for managing both of them.*/
3748 + init->agp_type = dev_priv->drm_agp_type;
3749 + if (init->agp_type != AGP_DISABLED && dev_priv->agp_size) {
3750 + ret = drm_sman_set_range(&dev_priv->sman, VIA_CHROME9_MEM_AGP,
3751 + 0, dev_priv->agp_size >> VIA_CHROME9_MM_GRANULARITY);
3753 + DRM_ERROR("AGP/PCIE memory manager initialization ******ERROR\
3755 + mutex_unlock(&dev->struct_mutex);
3758 + dev_priv->agp_initialized = 1;
3760 + mutex_unlock(&dev->struct_mutex);
3764 + /* Do error recover here, refine_later */
3769 +void via_chrome9_memory_destroy_heap(struct drm_device *dev,
3770 + struct drm_via_chrome9_private *dev_priv)
3772 + mutex_lock(&dev->struct_mutex);
3773 + drm_sman_cleanup(&dev_priv->sman);
3774 + dev_priv->vram_initialized = 0;
3775 + dev_priv->agp_initialized = 0;
3776 + mutex_unlock(&dev->struct_mutex);
3779 +void via_chrome9_reclaim_buffers_locked(struct drm_device *dev,
3780 + struct drm_file *file_priv)
3785 +int via_chrome9_ioctl_allocate_aperture(struct drm_device *dev,
3786 + void *data, struct drm_file *file_priv)
3791 +int via_chrome9_ioctl_free_aperture(struct drm_device *dev,
3792 + void *data, struct drm_file *file_priv)
3798 +/* Allocate memory from DRM module for video playing */
3799 +int via_chrome9_ioctl_allocate_mem_base(struct drm_device *dev,
3800 +void *data, struct drm_file *file_priv)
3802 + struct drm_via_chrome9_mem *mem = data;
3803 + struct drm_memblock_item *item;
3804 + struct drm_via_chrome9_private *dev_priv =
3805 + (struct drm_via_chrome9_private *) dev->dev_private;
3806 + unsigned long tmpSize = 0, offset = 0, alignment = 0;
3807 + /* modify heap_type to agp for pcie, since we treat pcie/agp heap
3808 + no difference in heap management */
3809 + if (mem->type == memory_heap_pcie) {
3810 + if (dev_priv->chip_agp != CHIP_PCIE) {
3811 + DRM_ERROR("User want to alloc memory from pcie heap \
3812 + but via_chrome9.ko has no this heap exist.\n");
3815 + mem->type = memory_heap_agp;
3818 + if (mem->type > VIA_CHROME9_MEM_AGP) {
3819 + DRM_ERROR("Unknown memory type allocation\n");
3822 + mutex_lock(&dev->struct_mutex);
3823 + if (0 == ((mem->type == VIA_CHROME9_MEM_VIDEO) ?
3824 + dev_priv->vram_initialized : dev_priv->agp_initialized)) {
3825 + DRM_ERROR("Attempt to allocate from uninitialized\
3826 + memory manager.\n");
3827 + mutex_unlock(&dev->struct_mutex);
3830 + tmpSize = (mem->size + VIA_CHROME9_MM_GRANULARITY_MASK) >>
3831 + VIA_CHROME9_MM_GRANULARITY;
3832 + mem->size = tmpSize << VIA_CHROME9_MM_GRANULARITY;
3833 + alignment = (dev_priv->alignment & 0x80000000) ? dev_priv->
3834 + alignment & 0x7FFFFFFF : 0;
3835 + alignment /= (1 << VIA_CHROME9_MM_GRANULARITY);
3836 + item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, alignment,
3837 + (unsigned long)file_priv);
3838 + mutex_unlock(&dev->struct_mutex);
3839 + /* alloc failed */
3841 + DRM_ERROR("Allocate memory failed ******ERROR******.\n");
3844 + /* Till here every thing is ok, we check the memory type allocated
3845 + and return appropriate value to user mode Here the value return to
3846 + user is very difficult to operate. BE CAREFULLY!!! */
3847 + /* offset is used by user mode ap to calculate the virtual address
3848 + which is used to access the memory allocated */
3849 + mem->index = item->user_hash.key;
3850 + offset = item->mm->offset(item->mm, item->mm_info) <<
3851 + VIA_CHROME9_MM_GRANULARITY;
3852 + switch (mem->type) {
3853 + case VIA_CHROME9_MEM_VIDEO:
3854 + mem->offset = offset + dev_priv->back_offset;
3856 + case VIA_CHROME9_MEM_AGP:
3857 + /* return different value to user according to the chip type */
3858 + if (dev_priv->chip_agp == CHIP_PCIE) {
3859 + mem->offset = offset +
3860 + ((struct drm_via_chrome9_DMA_manager *)dev_priv->
3861 + dma_manager)->DMASize * sizeof(unsigned long);
3863 + mem->offset = offset;
3867 + /* Strange thing happen! Faint. Code bug! */
3868 + DRM_ERROR("Enter here is impossible ******\
3872 + /*DONE. Need we call function copy_to_user ?NO. We can't even
3873 + touch user's space.But we are lucky, since kernel drm:drm_ioctl
3874 + will to the job for us. */
3878 +/* Allocate video/AGP/PCIE memory from heap management */
3879 +int via_chrome9_ioctl_allocate_mem_wrapper(struct drm_device
3880 + *dev, void *data, struct drm_file *file_priv)
3882 + struct drm_via_chrome9_memory_alloc *memory_alloc =
3883 + (struct drm_via_chrome9_memory_alloc *)data;
3884 + struct drm_via_chrome9_private *dev_priv =
3885 + (struct drm_via_chrome9_private *) dev->dev_private;
3886 + struct drm_via_chrome9_mem mem;
3888 + mem.size = memory_alloc->size;
3889 + mem.type = memory_alloc->heap_type;
3890 + dev_priv->alignment = memory_alloc->align | 0x80000000;
3891 + if (via_chrome9_ioctl_allocate_mem_base(dev, &mem, file_priv)) {
3892 + DRM_ERROR("Allocate memory error!.\n");
3895 + dev_priv->alignment = 0;
3896 + /* Till here every thing is ok, we check the memory type allocated and
3897 + return appropriate value to user mode Here the value return to user is
3898 + very difficult to operate. BE CAREFULLY!!!*/
3899 + /* offset is used by user mode ap to calculate the virtual address
3900 + which is used to access the memory allocated */
3901 + memory_alloc->offset = mem.offset;
3902 + memory_alloc->heap_info.lpL1Node = (void *)mem.index;
3903 + memory_alloc->size = mem.size;
3904 + switch (memory_alloc->heap_type) {
3905 + case VIA_CHROME9_MEM_VIDEO:
3906 + memory_alloc->physaddress = memory_alloc->offset +
3907 + dev_priv->fb_base_address;
3908 + memory_alloc->linearaddress = (void *)memory_alloc->physaddress;
3910 + case VIA_CHROME9_MEM_AGP:
3911 + /* return different value to user according to the chip type */
3912 + if (dev_priv->chip_agp == CHIP_PCIE) {
3913 + memory_alloc->physaddress = memory_alloc->offset;
3914 + memory_alloc->linearaddress = (void *)memory_alloc->
3917 + memory_alloc->physaddress = dev->agp->base +
3918 + memory_alloc->offset +
3919 + ((struct drm_via_chrome9_DMA_manager *)
3920 + dev_priv->dma_manager)->DMASize * sizeof(unsigned long);
3921 + memory_alloc->linearaddress =
3922 + (void *)memory_alloc->physaddress;
3926 + /* Strange thing happen! Faint. Code bug! */
3927 + DRM_ERROR("Enter here is impossible ******ERROR******.\n");
3933 +int via_chrome9_ioctl_free_mem_wrapper(struct drm_device *dev,
3934 + void *data, struct drm_file *file_priv)
3936 + struct drm_via_chrome9_memory_alloc *memory_alloc = data;
3937 + struct drm_via_chrome9_mem mem;
3939 + mem.index = (unsigned long)memory_alloc->heap_info.lpL1Node;
3940 + if (via_chrome9_ioctl_freemem_base(dev, &mem, file_priv)) {
3941 + DRM_ERROR("function free_mem_wrapper error.\n");
3948 +int via_chrome9_ioctl_freemem_base(struct drm_device *dev,
3949 + void *data, struct drm_file *file_priv)
3951 + struct drm_via_chrome9_private *dev_priv = dev->dev_private;
3952 + struct drm_via_chrome9_mem *mem = data;
3955 + mutex_lock(&dev->struct_mutex);
3956 + ret = drm_sman_free_key(&dev_priv->sman, mem->index);
3957 + mutex_unlock(&dev->struct_mutex);
3958 + DRM_DEBUG("free = 0x%lx\n", mem->index);
3963 +int via_chrome9_ioctl_check_vidmem_size(struct drm_device *dev,
3964 + void *data, struct drm_file *file_priv)
3969 +int via_chrome9_ioctl_pciemem_ctrl(struct drm_device *dev,
3970 + void *data, struct drm_file *file_priv)
3973 + struct drm_via_chrome9_private *dev_priv = dev->dev_private;
3974 + struct drm_via_chrome9_pciemem_ctrl *pcie_memory_ctrl = data;
3975 + switch (pcie_memory_ctrl->ctrl_type) {
3976 + case pciemem_copy_from_user:
3977 + result = copy_from_user((void *)(
3978 + dev_priv->pcie_vmalloc_nocache+
3979 + pcie_memory_ctrl->pcieoffset),
3980 + pcie_memory_ctrl->usermode_data,
3981 + pcie_memory_ctrl->size);
3983 + case pciemem_copy_to_user:
3984 + result = copy_to_user(pcie_memory_ctrl->usermode_data,
3985 + (void *)(dev_priv->pcie_vmalloc_nocache+
3986 + pcie_memory_ctrl->pcieoffset),
3987 + pcie_memory_ctrl->size);
3989 + case pciemem_memset:
3990 + memset((void *)(dev_priv->pcie_vmalloc_nocache +
3991 + pcie_memory_ctrl->pcieoffset),
3992 + pcie_memory_ctrl->memsetdata,
3993 + pcie_memory_ctrl->size);
4002 +int via_fb_alloc(struct drm_via_chrome9_mem *mem)
4004 + struct drm_device *dev = (struct drm_device *)via_chrome9_dev_v4l;
4005 + struct drm_via_chrome9_private *dev_priv;
4007 + if (!dev || !dev->dev_private || !via_chrome9_filepriv_v4l) {
4008 + DRM_ERROR("V4L work before X initialize DRM module !!!\n");
4012 + dev_priv = (struct drm_via_chrome9_private *)dev->dev_private;
4013 + if (!dev_priv->vram_initialized ||
4014 + mem->type != VIA_CHROME9_MEM_VIDEO) {
4015 + DRM_ERROR("the memory type from V4L is error !!!\n");
4019 + if (via_chrome9_ioctl_allocate_mem_base(dev,
4020 + mem, via_chrome9_filepriv_v4l)) {
4021 + DRM_ERROR("DRM module allocate memory error for V4L!!!\n");
4027 +EXPORT_SYMBOL(via_fb_alloc);
4029 +int via_fb_free(struct drm_via_chrome9_mem *mem)
4031 + struct drm_device *dev = (struct drm_device *)via_chrome9_dev_v4l;
4032 + struct drm_via_chrome9_private *dev_priv;
4034 + if (!dev || !dev->dev_private || !via_chrome9_filepriv_v4l)
4037 + dev_priv = (struct drm_via_chrome9_private *)dev->dev_private;
4038 + if (!dev_priv->vram_initialized ||
4039 + mem->type != VIA_CHROME9_MEM_VIDEO)
4042 + if (via_chrome9_ioctl_freemem_base(dev, mem, via_chrome9_filepriv_v4l))
4047 +EXPORT_SYMBOL(via_fb_free);
4049 +++ b/drivers/gpu/drm/via_chrome9/via_chrome9_mm.h
4052 + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
4053 + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4055 + * Permission is hereby granted, free of charge, to any person
4056 + * obtaining a copy of this software and associated documentation
4057 + * files (the "Software"), to deal in the Software without
4058 + * restriction, including without limitation the rights to use,
4059 + * copy, modify, merge, publish, distribute, sub license,
4060 + * and/or sell copies of the Software, and to permit persons to
4061 + * whom the Software is furnished to do so, subject to the
4062 + * following conditions:
4064 + * The above copyright notice and this permission notice
4065 + * (including the next paragraph) shall be included in all
4066 + * copies or substantial portions of the Software.
4068 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
4069 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
4070 + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
4071 + * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
4072 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4073 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
4074 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
4075 + * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
4077 +#ifndef _VIA_CHROME9_MM_H_
4078 +#define _VIA_CHROME9_MM_H_
4079 +struct drm_via_chrome9_pciemem_ctrl {
4081 + pciemem_copy_from_user = 0,
4082 + pciemem_copy_to_user,
4085 + unsigned int pcieoffset;
4086 + unsigned int size;/*in Byte*/
4087 + unsigned char memsetdata;/*for memset*/
4088 + void *usermode_data;/*user mode data pointer*/
4091 +extern int via_chrome9_map_init(struct drm_device *dev,
4092 + struct drm_via_chrome9_init *init);
4093 +extern int via_chrome9_heap_management_init(struct drm_device
4094 + *dev, struct drm_via_chrome9_init *init);
4095 +extern void via_chrome9_memory_destroy_heap(struct drm_device
4096 + *dev, struct drm_via_chrome9_private *dev_priv);
4097 +extern int via_chrome9_ioctl_check_vidmem_size(struct drm_device
4098 + *dev, void *data, struct drm_file *file_priv);
4099 +extern int via_chrome9_ioctl_pciemem_ctrl(struct drm_device *dev,
4100 + void *data, struct drm_file *file_priv);
4101 +extern int via_chrome9_ioctl_allocate_aperture(struct drm_device
4102 + *dev, void *data, struct drm_file *file_priv);
4103 +extern int via_chrome9_ioctl_free_aperture(struct drm_device *dev,
4104 + void *data, struct drm_file *file_priv);
4105 +extern int via_chrome9_ioctl_allocate_mem_base(struct drm_device
4106 + *dev, void *data, struct drm_file *file_priv);
4107 +extern int via_chrome9_ioctl_allocate_mem_wrapper(
4108 + struct drm_device *dev, void *data, struct drm_file *file_priv);
4109 +extern int via_chrome9_ioctl_freemem_base(struct drm_device
4110 + *dev, void *data, struct drm_file *file_priv);
4111 +extern int via_chrome9_ioctl_free_mem_wrapper(struct drm_device
4112 + *dev, void *data, struct drm_file *file_priv);
4113 +extern void via_chrome9_reclaim_buffers_locked(struct drm_device
4114 + *dev, struct drm_file *file_priv);
4119 +++ b/drivers/gpu/drm/via_chrome9/via_chrome9_verifier.c
4122 +* Copyright 2004 The Unichrome Project. All Rights Reserved.
4123 +* Copyright 2005 Thomas Hellstrom. All Rights Reserved.
4125 +* Permission is hereby granted, free of charge, to any person obtaining a
4126 +* copy of this software and associated documentation files (the "Software"),
4127 +* to deal in the Software without restriction, including without limitation
4128 +* the rights to use, copy, modify, merge, publish, distribute, sub license,
4129 +* and/or sell copies of the Software, and to permit persons to whom the
4130 +* Software is furnished to do so, subject to the following conditions:
4132 +* The above copyright notice and this permission notice (including the
4133 +* next paragraph) shall be included in all copies or substantial portions
4136 +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4137 +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4138 +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
4139 +* THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES
4140 +* OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
4141 +* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
4142 +* DEALINGS IN THE SOFTWARE.
4144 +* This code was written using docs obtained under NDA from VIA Inc.
4146 +* Don't run this code directly on an AGP buffer. Due to cache problems it will
4150 +#include "via_chrome9_3d_reg.h"
4153 +#include "via_chrome9_drm.h"
4154 +#include "via_chrome9_verifier.h"
4155 +#include "via_chrome9_drv.h"
4157 +#if VIA_CHROME9_VERIFY_ENABLE
4159 +enum verifier_state {
4174 + check_render_target_addr0,
4175 + check_render_target_addr1,
4176 + check_render_target_addr_mode,
4177 + check_z_buffer_addr0,
4178 + check_z_buffer_addr1,
4179 + check_z_buffer_addr_mode,
4180 + check_zocclusion_addr0,
4181 + check_zocclusion_addr1,
4182 + check_coarse_z_addr0,
4183 + check_coarse_z_addr1,
4184 + check_fvf_addr_mode,
4185 + check_t_level0_facen_addr0,
4186 + check_fence_cmd_addr0,
4187 + check_fence_cmd_addr1,
4188 + check_fence_cmd_addr2,
4193 + * Associates each hazard above with a possible multi-command
4194 + * sequence. For example an address that is split over multiple
4195 + * commands and that needs to be checked at the first command
4196 + * that does not include any part of the address.
4199 +static enum drm_via_chrome9_sequence seqs[] = {
4207 + zocclusion_address,
4208 + zocclusion_address,
4213 + fence_cmd_address,
4214 + fence_cmd_address,
4215 + fence_cmd_address,
4220 + unsigned int code;
4223 +/* for atrribute other than context hazard detect */
4224 +static struct hz_init init_table1[] = {
4233 + {0x10, check_z_buffer_addr0},
4234 + {0x11, check_z_buffer_addr1},
4235 + {0x12, check_z_buffer_addr_mode},
4250 + {0x21, check_zocclusion_addr0},
4251 + {0x22, check_zocclusion_addr1},
4259 + {0x29, check_coarse_z_addr0},
4260 + {0x2a, check_coarse_z_addr1},
4274 + /*render target check */
4275 + {0x50, check_render_target_addr0},
4276 + /* H5/H6 different */
4277 + {0x51, check_render_target_addr_mode},
4278 + {0x52, check_render_target_addr1},
4280 + {0x58, check_render_target_addr0},
4281 + {0x59, check_render_target_addr_mode},
4282 + {0x5a, check_render_target_addr1},
4284 + {0x60, check_render_target_addr0},
4285 + {0x61, check_render_target_addr_mode},
4286 + {0x62, check_render_target_addr1},
4288 + {0x68, check_render_target_addr0},
4289 + {0x69, check_render_target_addr_mode},
4290 + {0x6a, check_render_target_addr1},
4327 +/* for texture stage's hazard detect */
4328 +static struct hz_init init_table2[] = {
4341 + /* H5/H6 diffent */
4342 + {0x18, check_t_level0_facen_addr0},
4352 +/*Check for flexible vertex format */
4353 +static struct hz_init init_table3[] = {
4360 + /* H5/H6 different */
4361 + {0x00, check_fvf_addr_mode},
4425 +/*Check for 364 fence command id*/
4426 +static struct hz_init init_table4[] = {
4434 + {0x01, check_fence_cmd_addr0},
4435 + {0x02, check_fence_cmd_addr1},
4436 + {0x03, check_fence_cmd_addr2},
4452 +/*Check for 353 fence command id*/
4453 +static struct hz_init init_table5[] = {
4464 + {0x04, check_fence_cmd_addr0},
4465 + {0x05, check_fence_cmd_addr1},
4467 + {0x07, check_fence_cmd_addr2},
4493 +static enum hazard init_table_01_00[256];
4494 +static enum hazard init_table_02_0n[256];
4495 +static enum hazard init_table_04_00[256];
4496 +static enum hazard init_table_11_364[256];
4497 +static enum hazard init_table_11_353[256];
4499 +/*Require fence command id location reside in the shadow system memory */
4501 +check_fence_cmd_addr_range(struct drm_via_chrome9_state *seq,
4502 + unsigned long fence_cmd_add, unsigned long size, struct drm_device *dev)
4504 + struct drm_via_chrome9_private *dev_priv =
4505 + (struct drm_via_chrome9_private *)dev->dev_private;
4506 + if (!dev_priv->shadow_map.shadow)
4508 + if ((fence_cmd_add < dev_priv->shadow_map.shadow->offset) ||
4509 + (fence_cmd_add + size >
4510 + dev_priv->shadow_map.shadow->offset +
4511 + dev_priv->shadow_map.shadow->size))
4517 + * Currently we only catch the fence cmd's address, which will
4518 + * access system memory inevitably.
4519 + * NOTE:No care about AGP address.(we just think all AGP access are safe now).
4522 +static inline int finish_current_sequence(struct drm_via_chrome9_state *cur_seq)
4524 + switch (cur_seq->unfinished) {
4525 + case fence_cmd_address:
4526 + if (cur_seq->fence_need_check)
4527 + if (check_fence_cmd_addr_range(cur_seq,
4528 + cur_seq->fence_cmd_addr, 4, cur_seq->dev))
4534 + cur_seq->unfinished = no_sequence;
4537 +/* Only catch the cmd which potentially access the system memory, and treat all
4538 + * the other cmds are safe.
4541 +investigate_hazard(uint32_t cmd, enum hazard hz,
4542 + struct drm_via_chrome9_state *cur_seq)
4544 + register uint32_t tmp;
4546 + if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
4547 + int ret = finish_current_sequence(cur_seq);
4553 + case check_render_target_addr0:
4554 + tmp = ((cmd >> 24) - 0x50) >> 3;
4555 + cur_seq->unfinished = dest_address;
4556 + cur_seq->render_target_addr[tmp] = cmd << 8;
4558 + case check_render_target_addr1:
4559 + cur_seq->unfinished = dest_address;
4560 + tmp = ((cmd >> 24) - 0x50) >> 3;
4561 + cur_seq->render_target_pitch[tmp] = (cmd & 0x000001FF) >> 5;
4563 + case check_render_target_addr_mode:
4564 + cur_seq->unfinished = dest_address;
4565 + if (!cur_seq->agp)
4566 + if (((cmd & 0x00300000) >> 20) == 2) {
4567 + DRM_ERROR("Attempt to place \
4568 + render target in system memory\n");
4572 + case check_z_buffer_addr0:
4573 + cur_seq->unfinished = z_address;
4575 + case check_z_buffer_addr1:
4576 + cur_seq->unfinished = z_address;
4577 + if ((cmd & 0x00000003) == 2) {
4578 + DRM_ERROR("Attempt to place \
4579 + Z buffer in system memory\n");
4583 + case check_z_buffer_addr_mode:
4584 + cur_seq->unfinished = z_address;
4585 + if (((cmd & 0x00000060) >> 5) == 2) {
4586 + DRM_ERROR("Attempt to place \
4587 + stencil buffer in system memory\n");
4591 + case check_zocclusion_addr0:
4592 + cur_seq->unfinished = zocclusion_address;
4594 + case check_zocclusion_addr1:
4595 + cur_seq->unfinished = zocclusion_address;
4596 + if (((cmd & 0x00c00000) >> 22) == 2) {
4597 + DRM_ERROR("Attempt to access system memory\n");
4601 + case check_coarse_z_addr0:
4602 + cur_seq->unfinished = coarse_z_address;
4603 + if (((cmd & 0x00300000) >> 20) == 2)
4606 + case check_coarse_z_addr1:
4607 + cur_seq->unfinished = coarse_z_address;
4609 + case check_fvf_addr_mode:
4610 + cur_seq->unfinished = fvf_address;
4611 + if (!cur_seq->agp)
4612 + if (((cmd & 0x0000c000) >> 14) == 2) {
4613 + DRM_ERROR("Attempt to place \
4614 + fvf buffer in system memory\n");
4618 + case check_t_level0_facen_addr0:
4619 + cur_seq->unfinished = tex_address;
4620 + if (!cur_seq->agp)
4621 + if ((cmd & 0x00000003) == 2 ||
4622 + ((cmd & 0x0000000c) >> 2) == 2 ||
4623 + ((cmd & 0x00000030) >> 4) == 2 ||
4624 + ((cmd & 0x000000c0) >> 6) == 2 ||
4625 + ((cmd & 0x0000c000) >> 14) == 2 ||
4626 + ((cmd & 0x00030000) >> 16) == 2) {
4627 + DRM_ERROR("Attempt to place \
4628 + texture buffer in system memory\n");
4632 + case check_fence_cmd_addr0:
4633 + cur_seq->unfinished = fence_cmd_address;
4635 + cur_seq->fence_cmd_addr =
4636 + (cur_seq->fence_cmd_addr & 0xFF000000) |
4637 + (cmd & 0x00FFFFFF);
4639 + cur_seq->fence_cmd_addr =
4640 + (cur_seq->fence_cmd_addr & 0x00FFFFFF) |
4641 + ((cmd & 0x000000FF) << 24);
4643 + case check_fence_cmd_addr1:
4644 + cur_seq->unfinished = fence_cmd_address;
4645 + if (!cur_seq->agp)
4646 + cur_seq->fence_cmd_addr =
4647 + (cur_seq->fence_cmd_addr & 0xFF000000) |
4648 + (cmd & 0x00FFFFFF);
4650 + case check_fence_cmd_addr2:
4651 + cur_seq->unfinished = fence_cmd_address;
4652 + if (cmd & 0x00040000)
4653 + cur_seq->fence_need_check = 1;
4655 + cur_seq->fence_need_check = 0;
4658 + /*We think all the other cmd are safe.*/
4664 +static inline int verify_mmio_address(uint32_t address)
4666 + if ((address > 0x3FF) && (address < 0xC00)) {
4667 + DRM_ERROR("Invalid VIDEO DMA command. "
4668 + "Attempt to access 3D- or command burst area.\n");
4670 + } else if ((address > 0xDFF) && (address < 0x1200)) {
4671 + DRM_ERROR("Invalid VIDEO DMA command. "
4672 + "Attempt to access PCI DMA area.\n");
4674 + } else if ((address > 0x1DFF) && (address < 0x2200)) {
4675 + DRM_ERROR("Invalid VIDEO DMA command. "
4676 + "Attempt to access CBU ROTATE SPACE registers.\n");
4678 + } else if ((address > 0x23FF) && (address < 0x3200)) {
4679 + DRM_ERROR("Invalid VIDEO DMA command. "
4680 + "Attempt to access PCI DMA2 area..\n");
4682 + } else if (address > 0x33FF) {
4683 + DRM_ERROR("Invalid VIDEO DMA command. "
4684 + "Attempt to access VGA registers.\n");
4690 +static inline int is_dummy_cmd(uint32_t cmd)
4692 + if ((cmd & INV_DUMMY_MASK) == 0xCC000000 ||
4693 + (cmd & INV_DUMMY_MASK) == 0xCD000000 ||
4694 + (cmd & INV_DUMMY_MASK) == 0xCE000000 ||
4695 + (cmd & INV_DUMMY_MASK) == 0xCF000000 ||
4696 + (cmd & INV_DUMMY_MASK) == 0xDD000000)
4702 +verify_2d_tail(uint32_t const **buffer, const uint32_t *buf_end,
4705 + const uint32_t *buf = *buffer;
4707 + if (buf_end - buf < dwords) {
4708 + DRM_ERROR("Illegal termination of 2d command.\n");
4712 + while (dwords--) {
4713 + if (!is_dummy_cmd(*buf++)) {
4714 + DRM_ERROR("Illegal 2d command tail.\n");
4724 +verify_video_tail(uint32_t const **buffer, const uint32_t *buf_end,
4727 + const uint32_t *buf = *buffer;
4729 + if (buf_end - buf < dwords) {
4730 + DRM_ERROR("Illegal termination of video command.\n");
4733 + while (dwords--) {
4734 + if (*buf && !is_dummy_cmd(*buf)) {
4735 + DRM_ERROR("Illegal video command tail.\n");
4744 +static inline enum verifier_state
4745 +via_chrome9_check_header0(uint32_t const **buffer, const uint32_t *buf_end)
4747 + const uint32_t *buf = *buffer;
4748 + uint32_t cmd, qword, dword;
4752 + dword = qword << 1;
4754 + if (buf_end - buf < dword)
4755 + return state_error;
4757 + while (qword-- > 0) {
4759 + /* Is this consition too restrict? */
4760 + if ((cmd & 0xFFFF) > 0x1FF) {
4761 + DRM_ERROR("Invalid header0 command io address 0x%x \
4762 + Attempt to access non-2D mmio area.\n", cmd);
4763 + return state_error;
4768 + if ((dword & 3) && verify_2d_tail(&buf, buf_end, 4 - (dword & 0x3)))
4769 + return state_error;
4772 + return state_command;
4775 +static inline enum verifier_state
4776 +via_chrome9_check_header1(uint32_t const **buffer, const uint32_t *buf_end)
4779 + const uint32_t *buf = *buffer;
4781 + dword = *(buf + 1);
4784 + if (buf + dword > buf_end)
4785 + return state_error;
4789 + if ((dword & 0x3) && verify_2d_tail(&buf, buf_end, 4 - (dword & 0x3)))
4790 + return state_error;
4793 + return state_command;
4796 +static inline enum verifier_state
4797 +via_chrome9_check_header2(uint32_t const **buffer,
4798 + const uint32_t *buf_end, struct drm_via_chrome9_state *hc_state)
4800 + uint32_t cmd1, cmd2;
4802 + const uint32_t *buf = *buffer;
4803 + const enum hazard *hz_table;
4805 + if ((buf_end - buf) < 4) {
4807 + ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
4808 + return state_error;
4810 + cmd1 = *buf & 0x0000FFFF;
4811 + cmd2 = *++buf & 0x0000FFFF;
4812 + if (((cmd1 != INV_REG_CR_BEGIN) && (cmd1 != INV_REG_3D_BEGIN)) ||
4813 + ((cmd2 != INV_REG_CR_TRANS) && (cmd2 != INV_REG_3D_TRANS))) {
4815 + ("Illegal IO address of DMA HALCYON_HEADER2 sequence.\n");
4816 + return state_error;
4818 + /* Advance to get paratype and subparatype */
4819 + cmd1 = *++buf & 0xFFFF0000;
4822 + case INV_ParaType_Attr:
4824 + hz_table = init_table_01_00;
4826 + case (INV_ParaType_Tex | (INV_SubType_Tex0 << 24)):
4827 + case (INV_ParaType_Tex | (INV_SubType_Tex1 << 24)):
4828 + case (INV_ParaType_Tex | (INV_SubType_Tex2 << 24)):
4829 + case (INV_ParaType_Tex | (INV_SubType_Tex3 << 24)):
4830 + case (INV_ParaType_Tex | (INV_SubType_Tex4 << 24)):
4831 + case (INV_ParaType_Tex | (INV_SubType_Tex5 << 24)):
4832 + case (INV_ParaType_Tex | (INV_SubType_Tex6 << 24)):
4833 + case (INV_ParaType_Tex | (INV_SubType_Tex7 << 24)):
4835 + hc_state->texture_index = (cmd1 & INV_ParaSubType_MASK) >> 24;
4836 + hz_table = init_table_02_0n;
4838 + case INV_ParaType_FVF:
4840 + hz_table = init_table_04_00;
4842 + case INV_ParaType_CR:
4844 + if (hc_state->agp)
4845 + hz_table = init_table_11_364;
4847 + hz_table = init_table_11_353;
4849 + case INV_ParaType_Dummy:
4851 + while ((buf < buf_end) && !is_agp_header(*buf))
4852 + if (!is_dummy_cmd(*buf))
4853 + return state_error;
4857 + if ((buf_end > buf) && ((buf_end - buf) & 0x3))
4858 + return state_error;
4859 + return state_command;
4860 + /* We think cases below are all safe. So we feedback only when these
4861 + these cmd has another header there.
4863 + case INV_ParaType_Vdata:
4864 + case (INV_ParaType_Tex |
4865 + ((INV_SubType_Tex0 | INV_SubType_TexSample) << 24)):
4866 + case (INV_ParaType_Tex |
4867 + ((INV_SubType_Tex1 | INV_SubType_TexSample) << 24)):
4868 + case (INV_ParaType_Tex |
4869 + ((INV_SubType_Tex2 | INV_SubType_TexSample) << 24)):
4870 + case (INV_ParaType_Tex |
4871 + ((INV_SubType_Tex3 | INV_SubType_TexSample) << 24)):
4872 + case (INV_ParaType_Tex |
4873 + ((INV_SubType_Tex4 | INV_SubType_TexSample) << 24)):
4874 + case (INV_ParaType_Tex |
4875 + ((INV_SubType_Tex5 | INV_SubType_TexSample) << 24)):
4876 + case (INV_ParaType_Tex |
4877 + ((INV_SubType_Tex6 | INV_SubType_TexSample) << 24)):
4878 + case (INV_ParaType_Tex |
4879 + ((INV_SubType_Tex7 | INV_SubType_TexSample) << 24)):
4880 + case (INV_ParaType_Tex | (INV_SubType_General << 24)):
4881 + case INV_ParaType_Pal:
4882 + case INV_ParaType_PreCR:
4883 + case INV_ParaType_Cfg:
4886 + while ((buf < buf_end) && !is_agp_header(*buf))
4889 + return state_command;
4892 + while (buf < buf_end && !is_agp_header(*buf)) {
4894 + hz = hz_table[cmd1 >> 24];
4896 + if (investigate_hazard(cmd1, hz, hc_state))
4897 + return state_error;
4898 + } else if (hc_state->unfinished &&
4899 + finish_current_sequence(hc_state))
4900 + return state_error;
4903 + if (hc_state->unfinished && finish_current_sequence(hc_state))
4904 + return state_error;
4906 + return state_command;
4909 +static inline enum verifier_state
4910 +via_chrome9_check_header3(uint32_t const **buffer,
4911 + const uint32_t *buf_end)
4913 + const uint32_t *buf = *buffer;
4916 + while (buf < buf_end && !is_agp_header(*buf))
4920 + return state_command;
4924 +static inline enum verifier_state
4925 +via_chrome9_check_vheader4(uint32_t const **buffer,
4926 + const uint32_t *buf_end)
4929 + const uint32_t *buf = *buffer;
4931 + if (buf_end - buf < 4) {
4932 + DRM_ERROR("Illegal termination of video header4 command\n");
4933 + return state_error;
4936 + data = *buf++ & ~INV_AGPHeader_MASK;
4937 + if (verify_mmio_address(data))
4938 + return state_error;
4943 + if (*buf++ != 0x00000000) {
4944 + DRM_ERROR("Illegal header4 header data\n");
4945 + return state_error;
4948 + if (buf_end - buf < data)
4949 + return state_error;
4952 + if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
4953 + return state_error;
4955 + return state_command;
4959 +static inline enum verifier_state
4960 +via_chrome9_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
4963 + const uint32_t *buf = *buffer;
4966 + if (buf_end - buf < 4) {
4967 + DRM_ERROR("Illegal termination of video header5 command\n");
4968 + return state_error;
4974 + if (*buf++ != 0x00000000) {
4975 + DRM_ERROR("Illegal header5 header data\n");
4976 + return state_error;
4978 + if ((buf_end - buf) < (data << 1)) {
4979 + DRM_ERROR("Illegal termination of video header5 command\n");
4980 + return state_error;
4982 + for (i = 0; i < data; ++i) {
4983 + if (verify_mmio_address(*buf++))
4984 + return state_error;
4988 + if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
4989 + return state_error;
4991 + return state_command;
4995 +via_chrome9_verify_command_stream(const uint32_t *buf,
4996 + unsigned int size, struct drm_device *dev, int agp)
4999 + struct drm_via_chrome9_private *dev_priv =
5000 + (struct drm_via_chrome9_private *) dev->dev_private;
5001 + struct drm_via_chrome9_state *hc_state = &dev_priv->hc_state;
5002 + struct drm_via_chrome9_state saved_state = *hc_state;
5004 + const uint32_t *buf_end = buf + (size >> 2);
5005 + enum verifier_state state = state_command;
5007 + hc_state->dev = dev;
5008 + hc_state->unfinished = no_sequence;
5009 + hc_state->agp = agp;
5011 + while (buf < buf_end) {
5014 + case state_header0:
5015 + state = via_chrome9_check_header0(&buf, buf_end);
5017 + case state_header1:
5018 + state = via_chrome9_check_header1(&buf, buf_end);
5020 + case state_header2:
5021 + state = via_chrome9_check_header2(&buf,
5022 + buf_end, hc_state);
5024 + case state_header3:
5025 + state = via_chrome9_check_header3(&buf, buf_end);
5027 + case state_header4:
5028 + state = via_chrome9_check_vheader4(&buf, buf_end);
5030 + case state_header5:
5031 + state = via_chrome9_check_vheader5(&buf, buf_end);
5033 + case state_header6:
5034 + case state_header7:
5035 + DRM_ERROR("Unimplemented Header 6/7 command.\n");
5036 + state = state_error;
5038 + case state_command:
5040 + if (INV_AGPHeader2 == (cmd & INV_AGPHeader_MASK))
5041 + state = state_header2;
5042 + else if (INV_AGPHeader1 == (cmd & INV_AGPHeader_MASK))
5043 + state = state_header1;
5044 + else if (INV_AGPHeader5 == (cmd & INV_AGPHeader_MASK))
5045 + state = state_header5;
5046 + else if (INV_AGPHeader6 == (cmd & INV_AGPHeader_MASK))
5047 + state = state_header6;
5048 + else if (INV_AGPHeader3 == (cmd & INV_AGPHeader_MASK))
5049 + state = state_header3;
5050 + else if (INV_AGPHeader4 == (cmd & INV_AGPHeader_MASK))
5051 + state = state_header4;
5052 + else if (INV_AGPHeader7 == (cmd & INV_AGPHeader_MASK))
5053 + state = state_header7;
5054 + else if (INV_AGPHeader0 == (cmd & INV_AGPHeader_MASK))
5055 + state = state_header0;
5057 + DRM_ERROR("Invalid command sequence\n");
5058 + state = state_error;
5063 + *hc_state = saved_state;
5067 + if (state == state_error) {
5068 + *hc_state = saved_state;
5076 +setup_hazard_table(struct hz_init init_table[],
5077 +enum hazard table[], int size)
5081 + for (i = 0; i < 256; ++i)
5082 + table[i] = forbidden_command;
5084 + for (i = 0; i < size; ++i)
5085 + table[init_table[i].code] = init_table[i].hz;
5088 +void via_chrome9_init_command_verifier(void)
5090 + setup_hazard_table(init_table1, init_table_01_00,
5091 + sizeof(init_table1) / sizeof(struct hz_init));
5092 + setup_hazard_table(init_table2, init_table_02_0n,
5093 + sizeof(init_table2) / sizeof(struct hz_init));
5094 + setup_hazard_table(init_table3, init_table_04_00,
5095 + sizeof(init_table3) / sizeof(struct hz_init));
5096 + setup_hazard_table(init_table4, init_table_11_364,
5097 + sizeof(init_table4) / sizeof(struct hz_init));
5098 + setup_hazard_table(init_table5, init_table_11_353,
5099 + sizeof(init_table5) / sizeof(struct hz_init));
5104 +++ b/drivers/gpu/drm/via_chrome9/via_chrome9_verifier.h
5107 +* Copyright 2004 The Unichrome Project. All Rights Reserved.
5109 +* Permission is hereby granted, free of charge, to any person obtaining a
5110 +* copy of this software and associated documentation files (the "Software"),
5111 +* to deal in the Software without restriction, including without limitation
5112 +* the rights to use, copy, modify, merge, publish, distribute, sub license,
5113 +* and/or sell copies of the Software, and to permit persons to whom the
5114 +* Software is furnished to do so, subject to the following conditions:
5116 +* The above copyright notice and this permission notice (including the
5117 +* next paragraph) shall be included in all copies or substantial portions
5120 +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
5121 +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
5122 +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
5123 +* THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
5124 +* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
5125 +* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
5126 +* THE USE OR OTHER DEALINGS IN THE SOFTWARE.
5128 +* Author: Scott Fang 2008.
5131 +#ifndef _via_chrome9_VERIFIER_H_
5132 +#define _via_chrome9_VERIFIER_H_
5134 +#define VIA_CHROME9_VERIFY_ENABLE 1
5136 +enum drm_via_chrome9_sequence {
5141 + zocclusion_address,
5147 +struct drm_via_chrome9_state {
5148 + uint32_t texture_index;
5149 + uint32_t render_target_addr[4];
5150 + uint32_t render_target_pitch[4];
5152 + uint32_t fence_cmd_addr;
5153 + uint32_t fence_need_check;
5154 + enum drm_via_chrome9_sequence unfinished;
5157 + struct drm_device *dev;
5159 + const uint32_t *buf_start;
5162 +extern int via_chrome9_verify_command_stream(const uint32_t *buf,
5163 + unsigned int size, struct drm_device *dev, int agp);
5164 +void via_chrome9_init_command_verifier(void);