]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/suse-2.6.27.31/patches.drivers/add-via-chrome9-drm-support.patch
Merge branch 'master' into next
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.drivers / add-via-chrome9-drm-support.patch
CommitLineData
2cb7cef9
BS
1From: Bruce Chang<BruceChang@via.com.tw>
2Subject: add Via chrome9 drm support
3Patch-mainline: 2.6.29
4
5Signed-off-by: Bruce Chang<BruceChang@via.com.tw>
6Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
7
8---
9 drivers/gpu/drm/Kconfig | 7
10 drivers/gpu/drm/Makefile | 1
11 drivers/gpu/drm/via_chrome9/Makefile | 8
12 drivers/gpu/drm/via_chrome9/via_chrome9_3d_reg.h | 407 ++++++
13 drivers/gpu/drm/via_chrome9/via_chrome9_dma.c | 1285 +++++++++++++++++++++
14 drivers/gpu/drm/via_chrome9/via_chrome9_dma.h | 69 +
15 drivers/gpu/drm/via_chrome9/via_chrome9_drm.c | 950 +++++++++++++++
16 drivers/gpu/drm/via_chrome9/via_chrome9_drm.h | 443 +++++++
17 drivers/gpu/drm/via_chrome9/via_chrome9_drv.c | 224 +++
18 drivers/gpu/drm/via_chrome9/via_chrome9_drv.h | 150 ++
19 drivers/gpu/drm/via_chrome9/via_chrome9_mm.c | 435 +++++++
20 drivers/gpu/drm/via_chrome9/via_chrome9_mm.h | 67 +
21 drivers/gpu/drm/via_chrome9/via_chrome9_verifier.c | 982 ++++++++++++++++
22 drivers/gpu/drm/via_chrome9/via_chrome9_verifier.h | 61
23 14 files changed, 5089 insertions(+)
24
25--- a/drivers/gpu/drm/Kconfig
26+++ b/drivers/gpu/drm/Kconfig
27@@ -99,6 +99,13 @@ config DRM_VIA
28 Choose this option if you have a Via unichrome or compatible video
29 chipset. If M is selected the module will be called via.
30
31+config DRM_VIA_CHROME9
32+ tristate "Via chrome9 video cards"
33+ depends on DRM
34+ help
35+ Choose this option if you have a Via chrome9 or compatible video
36+ chipset. If M is selected the module will be called via_chrome9.
37+
38 config DRM_SAVAGE
39 tristate "Savage video cards"
40 depends on DRM
41--- a/drivers/gpu/drm/Makefile
42+++ b/drivers/gpu/drm/Makefile
43@@ -23,4 +23,5 @@ obj-$(CONFIG_DRM_I915) += i915/
44 obj-$(CONFIG_DRM_SIS) += sis/
45 obj-$(CONFIG_DRM_SAVAGE)+= savage/
46 obj-$(CONFIG_DRM_VIA) +=via/
47+obj-$(CONFIG_DRM_VIA_CHROME9) +=via_chrome9/
48
49--- /dev/null
50+++ b/drivers/gpu/drm/via_chrome9/Makefile
51@@ -0,0 +1,8 @@
52+#
53+# Makefile for the drm device driver. This driver provides support for the
54+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
55+
56+ccflags-y := -Iinclude/drm
57+via_chrome9-y := via_chrome9_drv.o via_chrome9_drm.o via_chrome9_mm.o via_chrome9_dma.o via_chrome9_verifier.o
58+
59+obj-$(CONFIG_DRM_VIA_CHROME9) += via_chrome9.o
60\ No newline at end of file
61--- /dev/null
62+++ b/drivers/gpu/drm/via_chrome9/via_chrome9_3d_reg.h
63@@ -0,0 +1,407 @@
64+/*
65+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
66+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
67+ *
68+ * Permission is hereby granted, free of charge, to any person
69+ * obtaining a copy of this software and associated documentation
70+ * files (the "Software"), to deal in the Software without
71+ * restriction, including without limitation the rights to use,
72+ * copy, modify, merge, publish, distribute, sub license,
73+ * and/or sell copies of the Software, and to permit persons to
74+ * whom the Software is furnished to do so, subject to the
75+ * following conditions:
76+ *
77+ * The above copyright notice and this permission notice
78+ * (including the next paragraph) shall be included in all
79+ * copies or substantial portions of the Software.
80+ *
81+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
82+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
83+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
84+ * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
85+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
86+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
87+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
88+ * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
89+ */
90+
91+#ifndef VIA_CHROME9_3D_REG_H
92+#define VIA_CHROME9_3D_REG_H
93+#define GetMMIORegister(base, offset) \
94+ (*(__volatile__ unsigned int *)(void *)(((unsigned char *)(base)) + \
95+ (offset)))
96+#define SetMMIORegister(base, offset, val) \
97+ (*(__volatile__ unsigned int *)(void *)(((unsigned char *)(base)) + \
98+ (offset)) = (val))
99+
100+#define GetMMIORegisterU8(base, offset) \
101+ (*(__volatile__ unsigned char *)(void *)(((unsigned char *)(base)) + \
102+ (offset)))
103+#define SetMMIORegisterU8(base, offset, val) \
104+ (*(__volatile__ unsigned char *)(void *)(((unsigned char *)(base)) + \
105+ (offset)) = (val))
106+
107+#define BCI_SEND(bci, value) (*(bci)++ = (unsigned long)(value))
108+#define BCI_SET_STREAM_REGISTER(bci_base, bci_index, reg_value) \
109+do { \
110+ unsigned long cmd; \
111+ \
112+ cmd = (0x90000000 \
113+ | (1<<16) /* stream processor register */ \
114+ | (bci_index & 0x3FFC)); /* MMIO register address */ \
115+ BCI_SEND(bci_base, cmd); \
116+ BCI_SEND(bci_base, reg_value); \
117+ } while (0)
118+
119+/* Command Header Type */
120+
121+#define INV_AGPHeader0 0xFE000000
122+#define INV_AGPHeader1 0xFE010000
123+#define INV_AGPHeader2 0xFE020000
124+#define INV_AGPHeader3 0xFE030000
125+#define INV_AGPHeader4 0xFE040000
126+#define INV_AGPHeader5 0xFE050000
127+#define INV_AGPHeader6 0xFE060000
128+#define INV_AGPHeader7 0xFE070000
129+#define INV_AGPHeader82 0xFE820000
130+#define INV_AGPHeader_MASK 0xFFFF0000
131+#define INV_DUMMY_MASK 0xFF000000
132+
133+/*send pause address of AGP ring command buffer via_chrome9 this IO port*/
134+#define INV_REG_PCIPAUSE 0x294
135+#define INV_REG_PCIPAUSE_ENABLE 0x4
136+
137+#define INV_CMDBUF_THRESHOLD (8)
138+#define INV_QW_PAUSE_ALIGN 0x40
139+
140+/* Transmission IO Space*/
141+#define INV_REG_CR_TRANS 0x041C
142+#define INV_REG_CR_BEGIN 0x0420
143+#define INV_REG_CR_END 0x0438
144+
145+#define INV_REG_3D_TRANS 0x043C
146+#define INV_REG_3D_BEGIN 0x0440
147+#define INV_REG_3D_END 0x06FC
148+#define INV_REG_23D_WAIT 0x326C
149+/*3D / 2D ID Control (Only For Group A)*/
150+#define INV_REG_2D3D_ID_CTRL 0x060
151+
152+
153+/* Engine Status */
154+
155+#define INV_RB_ENG_STATUS 0x0400
156+#define INV_ENG_BUSY_HQV0 0x00040000
157+#define INV_ENG_BUSY_HQV1 0x00020000
158+#define INV_ENG_BUSY_CR 0x00000010
159+#define INV_ENG_BUSY_MPEG 0x00000008
160+#define INV_ENG_BUSY_VQ 0x00000004
161+#define INV_ENG_BUSY_2D 0x00000002
162+#define INV_ENG_BUSY_3D 0x00001FE1
163+#define INV_ENG_BUSY_ALL \
164+ (INV_ENG_BUSY_2D | INV_ENG_BUSY_3D | INV_ENG_BUSY_CR)
165+
166+/* Command Queue Status*/
167+#define INV_RB_VQ_STATUS 0x0448
168+#define INV_VQ_FULL 0x40000000
169+
170+/* AGP command buffer pointer current position*/
171+#define INV_RB_AGPCMD_CURRADDR 0x043C
172+
173+/* AGP command buffer status*/
174+#define INV_RB_AGPCMD_STATUS 0x0444
175+#define INV_AGPCMD_InPause 0x80000000
176+
177+/*AGP command buffer pause address*/
178+#define INV_RB_AGPCMD_PAUSEADDR 0x045C
179+
180+/*AGP command buffer jump address*/
181+#define INV_RB_AGPCMD_JUMPADDR 0x0460
182+
183+/*AGP command buffer start address*/
184+#define INV_RB_AGPCMD_STARTADDR 0x0464
185+
186+
187+/* Constants */
188+#define NUMBER_OF_EVENT_TAGS 1024
189+#define NUMBER_OF_APERTURES_CLB 16
190+
191+/* Register definition */
192+#define HW_SHADOW_ADDR 0x8520
193+#define HW_GARTTABLE_ADDR 0x8540
194+
195+#define INV_HSWFlag_DBGMASK 0x00000FFF
196+#define INV_HSWFlag_ENCODEMASK 0x007FFFF0
197+#define INV_HSWFlag_ADDRSHFT 8
198+#define INV_HSWFlag_DECODEMASK \
199+ (INV_HSWFlag_ENCODEMASK << INV_HSWFlag_ADDRSHFT)
200+#define INV_HSWFlag_ADDR_ENCODE(x) 0xCC000000
201+#define INV_HSWFlag_ADDR_DECODE(x) \
202+ (((unsigned int)x & INV_HSWFlag_DECODEMASK) >> INV_HSWFlag_ADDRSHFT)
203+
204+
205+#define INV_SubA_HAGPBstL 0x60000000
206+#define INV_SubA_HAGPBstH 0x61000000
207+#define INV_SubA_HAGPBendL 0x62000000
208+#define INV_SubA_HAGPBendH 0x63000000
209+#define INV_SubA_HAGPBpL 0x64000000
210+#define INV_SubA_HAGPBpID 0x65000000
211+#define INV_HAGPBpID_PAUSE 0x00000000
212+#define INV_HAGPBpID_JUMP 0x00000100
213+#define INV_HAGPBpID_STOP 0x00000200
214+
215+#define INV_HAGPBpH_MASK 0x000000FF
216+#define INV_HAGPBpH_SHFT 0
217+
218+#define INV_SubA_HAGPBjumpL 0x66000000
219+#define INV_SubA_HAGPBjumpH 0x67000000
220+#define INV_HAGPBjumpH_MASK 0x000000FF
221+#define INV_HAGPBjumpH_SHFT 0
222+
223+#define INV_SubA_HFthRCM 0x68000000
224+#define INV_HFthRCM_MASK 0x003F0000
225+#define INV_HFthRCM_SHFT 16
226+#define INV_HFthRCM_8 0x00080000
227+#define INV_HFthRCM_10 0x000A0000
228+#define INV_HFthRCM_18 0x00120000
229+#define INV_HFthRCM_24 0x00180000
230+#define INV_HFthRCM_32 0x00200000
231+
232+#define INV_HAGPBClear 0x00000008
233+
234+#define INV_HRSTTrig_RestoreAGP 0x00000004
235+#define INV_HRSTTrig_RestoreAll 0x00000002
236+#define INV_HAGPBTrig 0x00000001
237+
238+#define INV_ParaSubType_MASK 0xff000000
239+#define INV_ParaType_MASK 0x00ff0000
240+#define INV_ParaOS_MASK 0x0000ff00
241+#define INV_ParaAdr_MASK 0x000000ff
242+#define INV_ParaSubType_SHIFT 24
243+#define INV_ParaType_SHIFT 16
244+#define INV_ParaOS_SHIFT 8
245+#define INV_ParaAdr_SHIFT 0
246+
247+#define INV_ParaType_Vdata 0x00000000
248+#define INV_ParaType_Attr 0x00010000
249+#define INV_ParaType_Tex 0x00020000
250+#define INV_ParaType_Pal 0x00030000
251+#define INV_ParaType_FVF 0x00040000
252+#define INV_ParaType_PreCR 0x00100000
253+#define INV_ParaType_CR 0x00110000
254+#define INV_ParaType_Cfg 0x00fe0000
255+#define INV_ParaType_Dummy 0x00300000
256+
257+#define INV_SubType_Tex0 0x00000000
258+#define INV_SubType_Tex1 0x00000001
259+#define INV_SubType_Tex2 0x00000002
260+#define INV_SubType_Tex3 0x00000003
261+#define INV_SubType_Tex4 0x00000004
262+#define INV_SubType_Tex5 0x00000005
263+#define INV_SubType_Tex6 0x00000006
264+#define INV_SubType_Tex7 0x00000007
265+#define INV_SubType_General 0x000000fe
266+#define INV_SubType_TexSample 0x00000020
267+
268+#define INV_HWBasL_MASK 0x00FFFFFF
269+#define INV_HWBasH_MASK 0xFF000000
270+#define INV_HWBasH_SHFT 24
271+#define INV_HWBasL(x) ((unsigned int)(x) & INV_HWBasL_MASK)
272+#define INV_HWBasH(x) ((unsigned int)(x) >> INV_HWBasH_SHFT)
273+#define INV_HWBas256(x) ((unsigned int)(x) >> 8)
274+#define INV_HWPit32(x) ((unsigned int)(x) >> 5)
275+
276+/* Read Back Register Setting */
277+#define INV_SubA_HSetRBGID 0x02000000
278+#define INV_HSetRBGID_CR 0x00000000
279+#define INV_HSetRBGID_FE 0x00000001
280+#define INV_HSetRBGID_PE 0x00000002
281+#define INV_HSetRBGID_RC 0x00000003
282+#define INV_HSetRBGID_PS 0x00000004
283+#define INV_HSetRBGID_XE 0x00000005
284+#define INV_HSetRBGID_BE 0x00000006
285+
286+
287+struct drm_clb_event_tag_info {
288+ unsigned int *linear_address;
289+ unsigned int *event_tag_linear_address;
290+ int usage[NUMBER_OF_EVENT_TAGS];
291+ unsigned int pid[NUMBER_OF_EVENT_TAGS];
292+};
293+
294+static inline int is_agp_header(unsigned int data)
295+{
296+ switch (data & INV_AGPHeader_MASK) {
297+ case INV_AGPHeader0:
298+ case INV_AGPHeader1:
299+ case INV_AGPHeader2:
300+ case INV_AGPHeader3:
301+ case INV_AGPHeader4:
302+ case INV_AGPHeader5:
303+ case INV_AGPHeader6:
304+ case INV_AGPHeader7:
305+ return 1;
306+ default:
307+ return 0;
308+ }
309+}
310+
311+/* Header0: 2D */
312+#define ADDCmdHeader0_INVI(pCmd, dwCount) \
313+{ \
314+ /* 4 unsigned int align, insert NULL Command for padding */ \
315+ while (((unsigned long *)(pCmd)) & 0xF) { \
316+ *(pCmd)++ = 0xCC000000; \
317+ } \
318+ *(pCmd)++ = INV_AGPHeader0; \
319+ *(pCmd)++ = (dwCount); \
320+ *(pCmd)++ = 0; \
321+ *(pCmd)++ = (unsigned int)INV_HSWFlag_ADDR_ENCODE(pCmd); \
322+}
323+
324+/* Header1: 2D */
325+#define ADDCmdHeader1_INVI(pCmd, dwAddr, dwCount) \
326+{ \
327+ /* 4 unsigned int align, insert NULL Command for padding */ \
328+ while (((unsigned long *)(pCmd)) & 0xF) { \
329+ *(pCmd)++ = 0xCC000000; \
330+ } \
331+ *(pCmd)++ = INV_AGPHeader1 | (dwAddr); \
332+ *(pCmd)++ = (dwCount); \
333+ *(pCmd)++ = 0; \
334+ *(pCmd)++ = (unsigned int)INV_HSWFlag_ADDR_ENCODE(pCmd); \
335+}
336+
337+/* Header2: CR/3D */
338+#define ADDCmdHeader2_INVI(pCmd, dwAddr, dwType) \
339+{ \
340+ /* 4 unsigned int align, insert NULL Command for padding */ \
341+ while (((unsigned int)(pCmd)) & 0xF) { \
342+ *(pCmd)++ = 0xCC000000; \
343+ } \
344+ *(pCmd)++ = INV_AGPHeader2 | ((dwAddr)+4); \
345+ *(pCmd)++ = (dwAddr); \
346+ *(pCmd)++ = (dwType); \
347+ *(pCmd)++ = (unsigned int)INV_HSWFlag_ADDR_ENCODE(pCmd); \
348+}
349+
350+/* Header2: CR/3D with SW Flag */
351+#define ADDCmdHeader2_SWFlag_INVI(pCmd, dwAddr, dwType, dwSWFlag) \
352+{ \
353+ /* 4 unsigned int align, insert NULL Command for padding */ \
354+ while (((unsigned long *)(pCmd)) & 0xF) { \
355+ *(pCmd)++ = 0xCC000000; \
356+ } \
357+ *(pCmd)++ = INV_AGPHeader2 | ((dwAddr)+4); \
358+ *(pCmd)++ = (dwAddr); \
359+ *(pCmd)++ = (dwType); \
360+ *(pCmd)++ = (dwSWFlag); \
361+}
362+
363+
364+/* Header3: 3D */
365+#define ADDCmdHeader3_INVI(pCmd, dwType, dwStart, dwCount) \
366+{ \
367+ /* 4 unsigned int align, insert NULL Command for padding */ \
368+ while (((unsigned long *)(pCmd)) & 0xF) { \
369+ *(pCmd)++ = 0xCC000000; \
370+ } \
371+ *(pCmd)++ = INV_AGPHeader3 | INV_REG_3D_TRANS; \
372+ *(pCmd)++ = (dwCount); \
373+ *(pCmd)++ = (dwType) | ((dwStart) & 0xFFFF); \
374+ *(pCmd)++ = (unsigned int)INV_HSWFlag_ADDR_ENCODE(pCmd); \
375+}
376+
377+/* Header3: 3D with SW Flag */
378+#define ADDCmdHeader3_SWFlag_INVI(pCmd, dwType, dwStart, dwSWFlag, dwCount) \
379+{ \
380+ /* 4 unsigned int align, insert NULL Command for padding */ \
381+ while (((unsigned long *)(pCmd)) & 0xF) { \
382+ *(pCmd)++ = 0xCC000000; \
383+ } \
384+ *(pCmd)++ = INV_AGPHeader3 | INV_REG_3D_TRANS; \
385+ *(pCmd)++ = (dwCount); \
386+ *(pCmd)++ = (dwType) | ((dwStart) & 0xFFFF); \
387+ *(pCmd)++ = (dwSWFlag); \
388+}
389+
390+/* Header4: DVD */
391+#define ADDCmdHeader4_INVI(pCmd, dwAddr, dwCount, id) \
392+{ \
393+ /* 4 unsigned int align, insert NULL Command for padding */ \
394+ while (((unsigned long *)(pCmd)) & 0xF) { \
395+ *(pCmd)++ = 0xCC000000; \
396+ } \
397+ *(pCmd)++ = INV_AGPHeader4 | (dwAddr); \
398+ *(pCmd)++ = (dwCount); \
399+ *(pCmd)++ = (id); \
400+ *(pCmd)++ = 0; \
401+}
402+
403+/* Header5: DVD */
404+#define ADDCmdHeader5_INVI(pCmd, dwQWcount, id) \
405+{ \
406+ /* 4 unsigned int align, insert NULL Command for padding */ \
407+ while (((unsigned long *)(pCmd)) & 0xF) { \
408+ *(pCmd)++ = 0xCC000000; \
409+ } \
410+ *(pCmd)++ = INV_AGPHeader5; \
411+ *(pCmd)++ = (dwQWcount); \
412+ *(pCmd)++ = (id); \
413+ *(pCmd)++ = 0; \
414+}
415+
416+/* Header6: DEBUG */
417+#define ADDCmdHeader6_INVI(pCmd) \
418+{ \
419+ /* 4 unsigned int align, insert NULL Command for padding */ \
420+ while (((unsigned long *)(pCmd)) & 0xF) { \
421+ *(pCmd)++ = 0xCC000000; \
422+ } \
423+ *(pCmd)++ = INV_AGPHeader6; \
424+ *(pCmd)++ = 0; \
425+ *(pCmd)++ = 0; \
426+ *(pCmd)++ = 0; \
427+}
428+
429+/* Header7: DMA */
430+#define ADDCmdHeader7_INVI(pCmd, dwQWcount, id) \
431+{ \
432+ /* 4 unsigned int align, insert NULL Command for padding */ \
433+ while (((unsigned long *)(pCmd)) & 0xF) { \
434+ *(pCmd)++ = 0xCC000000; \
435+ } \
436+ *(pCmd)++ = INV_AGPHeader7; \
437+ *(pCmd)++ = (dwQWcount); \
438+ *(pCmd)++ = (id); \
439+ *(pCmd)++ = 0; \
440+}
441+
442+/* Header82: Branch buffer */
443+#define ADDCmdHeader82_INVI(pCmd, dwAddr, dwType); \
444+{ \
445+ /* 4 unsigned int align, insert NULL Command for padding */ \
446+ while (((unsigned long *)(pCmd)) & 0xF) { \
447+ *(pCmd)++ = 0xCC000000; \
448+ } \
449+ *(pCmd)++ = INV_AGPHeader82 | ((dwAddr)+4); \
450+ *(pCmd)++ = (dwAddr); \
451+ *(pCmd)++ = (dwType); \
452+ *(pCmd)++ = 0xCC000000; \
453+}
454+
455+
456+#define ADD2DCmd_INVI(pCmd, dwAddr, dwCmd) \
457+{ \
458+ *(pCmd)++ = (dwAddr); \
459+ *(pCmd)++ = (dwCmd); \
460+}
461+
462+#define ADDCmdData_INVI(pCmd, dwCmd) (*(pCmd)++ = (dwCmd))
463+
464+#define ADDCmdDataStream_INVI(pCmdBuf, pCmd, dwCount) \
465+{ \
466+ memcpy((pCmdBuf), (pCmd), ((dwCount)<<2)); \
467+ (pCmdBuf) += (dwCount); \
468+}
469+
470+#endif
471--- /dev/null
472+++ b/drivers/gpu/drm/via_chrome9/via_chrome9_dma.c
473@@ -0,0 +1,1285 @@
474+/*
475+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
476+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
477+ *
478+ * Permission is hereby granted, free of charge, to any person
479+ * obtaining a copy of this software and associated documentation
480+ * files (the "Software"), to deal in the Software without
481+ * restriction, including without limitation the rights to use,
482+ * copy, modify, merge, publish, distribute, sub license,
483+ * and/or sell copies of the Software, and to permit persons to
484+ * whom the Software is furnished to do so, subject to the
485+ * following conditions:
486+ *
487+ * The above copyright notice and this permission notice
488+ * (including the next paragraph) shall be included in all
489+ * copies or substantial portions of the Software.
490+ *
491+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
492+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
493+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
494+ * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
495+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
496+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
497+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
498+ * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
499+ */
500+
501+#include "drmP.h"
502+#include "drm.h"
503+#include "via_chrome9_drm.h"
504+#include "via_chrome9_drv.h"
505+#include "via_chrome9_3d_reg.h"
506+#include "via_chrome9_dma.h"
507+
508+#define NULLCOMMANDNUMBER 256
509+unsigned int NULL_COMMAND_INV[4] =
510+ { 0xCC000000, 0xCD000000, 0xCE000000, 0xCF000000 };
511+
512+void
513+via_chrome9ke_assert(int a)
514+{
515+}
516+
517+unsigned int
518+ProtectSizeValue(unsigned int size)
519+{
520+ unsigned int i;
521+ for (i = 0; i < 8; i++)
522+ if ((size > (1 << (i + 12)))
523+ && (size <= (1 << (i + 13))))
524+ return i + 1;
525+ return 0;
526+}
527+
528+void via_chrome9_dma_init_inv(struct drm_device *dev)
529+{
530+ struct drm_via_chrome9_private *dev_priv =
531+ (struct drm_via_chrome9_private *)dev->dev_private;
532+ struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
533+ dev_priv->dma_manager;
534+
535+ if (dev_priv->chip_sub_index == CHIP_H6S2) {
536+ unsigned int *pGARTTable;
537+ unsigned int i, entries, GARTOffset;
538+ unsigned char sr6a, sr6b, sr6c, sr6f, sr7b;
539+ unsigned int *addrlinear;
540+ unsigned int size, alignedoffset;
541+
542+ entries = dev_priv->pagetable_map.pagetable_size /
543+ sizeof(unsigned int);
544+ pGARTTable = dev_priv->pagetable_map.pagetable_handle;
545+
546+ GARTOffset = dev_priv->pagetable_map.pagetable_offset;
547+
548+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
549+ sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
550+ sr6c &= (~0x80);
551+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
552+
553+ sr6a = (unsigned char)((GARTOffset & 0xff000) >> 12);
554+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6a);
555+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6a);
556+
557+ sr6b = (unsigned char)((GARTOffset & 0xff00000) >> 20);
558+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6b);
559+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6b);
560+
561+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
562+ sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
563+ sr6c |= ((unsigned char)((GARTOffset >> 28) & 0x01));
564+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
565+
566+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x7b);
567+ sr7b = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
568+ sr7b &= (~0x0f);
569+ sr7b |= ProtectSizeValue(dev_priv->
570+ pagetable_map.pagetable_size);
571+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr7b);
572+
573+ for (i = 0; i < entries; i++)
574+ writel(0x80000000, pGARTTable+i);
575+
576+ /*flush*/
577+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6f);
578+ do {
579+ sr6f = GetMMIORegisterU8(dev_priv->mmio->handle,
580+ 0x83c5);
581+ } while (sr6f & 0x80);
582+
583+ sr6f |= 0x80;
584+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6f);
585+
586+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
587+ sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
588+ sr6c |= 0x80;
589+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
590+
591+ if (dev_priv->drm_agp_type != DRM_AGP_DISABLED) {
592+ size = lpcmDMAManager->DMASize * sizeof(unsigned int) +
593+ dev_priv->agp_size;
594+ alignedoffset = 0;
595+ entries = (size + PAGE_SIZE - 1) / PAGE_SIZE;
596+ addrlinear =
597+ (unsigned int *)dev_priv->pcie_vmalloc_nocache;
598+
599+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
600+ sr6c =
601+ GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
602+ sr6c &= (~0x80);
603+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
604+
605+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6f);
606+ do {
607+ sr6f = GetMMIORegisterU8(dev_priv->mmio->handle,
608+ 0x83c5);
609+ } while (sr6f & 0x80);
610+
611+ for (i = 0; i < entries; i++)
612+ writel(page_to_pfn(vmalloc_to_page(
613+ (void *)addrlinear + PAGE_SIZE * i)) &
614+ 0x3fffffff, pGARTTable + i + alignedoffset);
615+
616+ sr6f |= 0x80;
617+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6f);
618+
619+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
620+ sr6c =
621+ GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
622+ sr6c |= 0x80;
623+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
624+ }
625+
626+ }
627+
628+ if (dev_priv->drm_agp_type == DRM_AGP_DOUBLE_BUFFER)
629+ SetAGPDoubleCmd_inv(dev);
630+ else if (dev_priv->drm_agp_type == DRM_AGP_RING_BUFFER)
631+ SetAGPRingCmdRegs_inv(dev);
632+
633+ return ;
634+}
635+
636+static unsigned int
637+InitPCIEGART(struct drm_via_chrome9_private *dev_priv)
638+{
639+ unsigned int *pGARTTable;
640+ unsigned int i, entries, GARTOffset;
641+ unsigned char sr6a, sr6b, sr6c, sr6f, sr7b;
642+
643+ if (!dev_priv->pagetable_map.pagetable_size)
644+ return 0;
645+
646+ entries = dev_priv->pagetable_map.pagetable_size / sizeof(unsigned int);
647+
648+ pGARTTable =
649+ ioremap_nocache(dev_priv->fb_base_address +
650+ dev_priv->pagetable_map.pagetable_offset,
651+ dev_priv->pagetable_map.pagetable_size);
652+ if (pGARTTable)
653+ dev_priv->pagetable_map.pagetable_handle = pGARTTable;
654+ else
655+ return 0;
656+
657+ /*set gart table base */
658+ GARTOffset = dev_priv->pagetable_map.pagetable_offset;
659+
660+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
661+ sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
662+ sr6c &= (~0x80);
663+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
664+
665+ sr6a = (unsigned char) ((GARTOffset & 0xff000) >> 12);
666+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6a);
667+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6a);
668+
669+ sr6b = (unsigned char) ((GARTOffset & 0xff00000) >> 20);
670+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6b);
671+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6b);
672+
673+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
674+ sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
675+ sr6c |= ((unsigned char) ((GARTOffset >> 28) & 0x01));
676+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
677+
678+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x7b);
679+ sr7b = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
680+ sr7b &= (~0x0f);
681+ sr7b |= ProtectSizeValue(dev_priv->pagetable_map.pagetable_size);
682+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr7b);
683+
684+ for (i = 0; i < entries; i++)
685+ writel(0x80000000, pGARTTable + i);
686+ /*flush */
687+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6f);
688+ do {
689+ sr6f = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
690+ }
691+ while (sr6f & 0x80)
692+ ;
693+
694+ sr6f |= 0x80;
695+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6f);
696+
697+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
698+ sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
699+ sr6c |= 0x80;
700+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
701+
702+ return 1;
703+}
704+
705+
706+static unsigned int *
707+AllocAndBindPCIEMemory(struct drm_via_chrome9_private *dev_priv,
708+ unsigned int size, unsigned int offset)
709+{
710+ unsigned int *addrlinear;
711+ unsigned int *pGARTTable;
712+ unsigned int entries, alignedoffset, i;
713+ unsigned char sr6c, sr6f;
714+
715+ if (!size)
716+ return NULL;
717+
718+ entries = (size + PAGE_SIZE - 1) / PAGE_SIZE;
719+ alignedoffset = (offset + PAGE_SIZE - 1) / PAGE_SIZE;
720+
721+ if ((entries + alignedoffset) >
722+ (dev_priv->pagetable_map.pagetable_size / sizeof(unsigned int)))
723+ return NULL;
724+
725+ addrlinear =
726+ __vmalloc(entries * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM,
727+ PAGE_KERNEL_NOCACHE);
728+
729+ if (!addrlinear)
730+ return NULL;
731+
732+ pGARTTable = dev_priv->pagetable_map.pagetable_handle;
733+
734+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
735+ sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
736+ sr6c &= (~0x80);
737+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
738+
739+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6f);
740+ do {
741+ sr6f = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
742+ }
743+ while (sr6f & 0x80)
744+ ;
745+
746+ for (i = 0; i < entries; i++)
747+ writel(page_to_pfn
748+ (vmalloc_to_page((void *) addrlinear + PAGE_SIZE * i)) &
749+ 0x3fffffff, pGARTTable + i + alignedoffset);
750+
751+ sr6f |= 0x80;
752+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6f);
753+
754+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
755+ sr6c = GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
756+ sr6c |= 0x80;
757+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
758+
759+ return addrlinear;
760+
761+}
762+
763+void
764+SetAGPDoubleCmd_inv(struct drm_device *dev)
765+{
766+ /* we now don't use double buffer */
767+ return;
768+}
769+
770+void
771+SetAGPRingCmdRegs_inv(struct drm_device *dev)
772+{
773+ struct drm_via_chrome9_private *dev_priv =
774+ (struct drm_via_chrome9_private *) dev->dev_private;
775+ struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
776+ (struct drm_via_chrome9_DMA_manager *) dev_priv->dma_manager;
777+ unsigned int AGPBufLinearBase = 0, AGPBufPhysicalBase = 0;
778+ unsigned long *pFree;
779+ unsigned int dwStart, dwEnd, dwPause, AGPCurrAddr, AGPCurStat, CurrAGP;
780+ unsigned int dwReg60, dwReg61, dwReg62, dwReg63,
781+ dwReg64, dwReg65, dwJump;
782+
783+ lpcmDMAManager->pFree = lpcmDMAManager->pBeg;
784+
785+ AGPBufLinearBase = (unsigned int) lpcmDMAManager->addr_linear;
786+ AGPBufPhysicalBase =
787+ (dev_priv->chip_agp ==
788+ CHIP_PCIE) ? 0 : (unsigned int) dev->agp->base +
789+ lpcmDMAManager->pPhysical;
790+ /*add shadow offset */
791+
792+ CurrAGP =
793+ GetMMIORegister(dev_priv->mmio->handle, INV_RB_AGPCMD_CURRADDR);
794+ AGPCurStat =
795+ GetMMIORegister(dev_priv->mmio->handle, INV_RB_AGPCMD_STATUS);
796+
797+ if (AGPCurStat & INV_AGPCMD_InPause) {
798+ AGPCurrAddr =
799+ GetMMIORegister(dev_priv->mmio->handle,
800+ INV_RB_AGPCMD_CURRADDR);
801+ pFree = (unsigned long *) (AGPBufLinearBase + AGPCurrAddr -
802+ AGPBufPhysicalBase);
803+ ADDCmdHeader2_INVI(pFree, INV_REG_CR_TRANS, INV_ParaType_Dummy);
804+ if (dev_priv->chip_sub_index == CHIP_H6S2)
805+ do {
806+ ADDCmdData_INVI(pFree, 0xCCCCCCC0);
807+ ADDCmdData_INVI(pFree, 0xDDD00000);
808+ }
809+ while ((u32)((unsigned int) pFree) & 0x7f)
810+ ;
811+ /*for 8*128bit aligned */
812+ else
813+ do {
814+ ADDCmdData_INVI(pFree, 0xCCCCCCC0);
815+ ADDCmdData_INVI(pFree, 0xDDD00000);
816+ }
817+ while ((u32) ((unsigned int) pFree) & 0x1f)
818+ ;
819+ /*for 256bit aligned */
820+ dwPause =
821+ (u32) (((unsigned int) pFree) - AGPBufLinearBase +
822+ AGPBufPhysicalBase - 16);
823+
824+ dwReg64 = INV_SubA_HAGPBpL | INV_HWBasL(dwPause);
825+ dwReg65 =
826+ INV_SubA_HAGPBpID | INV_HWBasH(dwPause) |
827+ INV_HAGPBpID_STOP;
828+
829+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
830+ INV_ParaType_PreCR);
831+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
832+ dwReg64);
833+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
834+ dwReg65);
835+
836+ while (GetMMIORegister(dev_priv->mmio->handle,
837+ INV_RB_ENG_STATUS) & INV_ENG_BUSY_ALL)
838+ ;
839+ }
840+ dwStart =
841+ (u32) ((unsigned int) lpcmDMAManager->pBeg - AGPBufLinearBase +
842+ AGPBufPhysicalBase);
843+ dwEnd = (u32) ((unsigned int) lpcmDMAManager->pEnd - AGPBufLinearBase +
844+ AGPBufPhysicalBase);
845+
846+ lpcmDMAManager->pFree = lpcmDMAManager->pBeg;
847+ if (dev_priv->chip_sub_index == CHIP_H6S2) {
848+ ADDCmdHeader2_INVI(lpcmDMAManager->pFree, INV_REG_CR_TRANS,
849+ INV_ParaType_Dummy);
850+ do {
851+ ADDCmdData_INVI(lpcmDMAManager->pFree, 0xCCCCCCC0);
852+ ADDCmdData_INVI(lpcmDMAManager->pFree, 0xDDD00000);
853+ }
854+ while ((u32)((unsigned long *) lpcmDMAManager->pFree) & 0x7f)
855+ ;
856+ }
857+ dwJump = 0xFFFFFFF0;
858+ dwPause =
859+ (u32)(((unsigned int) lpcmDMAManager->pFree) -
860+ 16 - AGPBufLinearBase + AGPBufPhysicalBase);
861+
862+ DRM_DEBUG("dwStart = %08x, dwEnd = %08x, dwPause = %08x\n", dwStart,
863+ dwEnd, dwPause);
864+
865+ dwReg60 = INV_SubA_HAGPBstL | INV_HWBasL(dwStart);
866+ dwReg61 = INV_SubA_HAGPBstH | INV_HWBasH(dwStart);
867+ dwReg62 = INV_SubA_HAGPBendL | INV_HWBasL(dwEnd);
868+ dwReg63 = INV_SubA_HAGPBendH | INV_HWBasH(dwEnd);
869+ dwReg64 = INV_SubA_HAGPBpL | INV_HWBasL(dwPause);
870+ dwReg65 = INV_SubA_HAGPBpID | INV_HWBasH(dwPause) | INV_HAGPBpID_PAUSE;
871+
872+ if (dev_priv->chip_sub_index == CHIP_H6S2)
873+ dwReg60 |= 0x01;
874+
875+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
876+ INV_ParaType_PreCR);
877+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg60);
878+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg61);
879+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg62);
880+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg63);
881+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg64);
882+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg65);
883+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
884+ INV_SubA_HAGPBjumpL | INV_HWBasL(dwJump));
885+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
886+ INV_SubA_HAGPBjumpH | INV_HWBasH(dwJump));
887+
888+ /* Trigger AGP cycle */
889+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
890+ INV_SubA_HFthRCM | INV_HFthRCM_10 | INV_HAGPBTrig);
891+
892+ /*for debug */
893+ CurrAGP =
894+ GetMMIORegister(dev_priv->mmio->handle, INV_RB_AGPCMD_CURRADDR);
895+
896+ lpcmDMAManager->pInUseBySW = lpcmDMAManager->pFree;
897+}
898+
899+/* Do hw intialization and determine whether to use dma or mmio to
900+talk with hw */
901+int
902+via_chrome9_hw_init(struct drm_device *dev,
903+ struct drm_via_chrome9_init *init)
904+{
905+ struct drm_via_chrome9_private *dev_priv =
906+ (struct drm_via_chrome9_private *) dev->dev_private;
907+ unsigned retval = 0;
908+ unsigned int *pGARTTable, *addrlinear = NULL;
909+ int pages;
910+ struct drm_clb_event_tag_info *event_tag_info;
911+ struct drm_via_chrome9_DMA_manager *lpcmDMAManager = NULL;
912+
913+ if (init->chip_agp == CHIP_PCIE) {
914+ dev_priv->pagetable_map.pagetable_offset =
915+ init->garttable_offset;
916+ dev_priv->pagetable_map.pagetable_size = init->garttable_size;
917+ dev_priv->agp_size = init->agp_tex_size;
918+ /*Henry :prepare for PCIE texture buffer */
919+ } else {
920+ dev_priv->pagetable_map.pagetable_offset = 0;
921+ dev_priv->pagetable_map.pagetable_size = 0;
922+ }
923+
924+ dev_priv->dma_manager =
925+ kmalloc(sizeof(struct drm_via_chrome9_DMA_manager), GFP_KERNEL);
926+ if (!dev_priv->dma_manager) {
927+ DRM_ERROR("could not allocate system for dma_manager!\n");
928+ return -ENOMEM;
929+ }
930+
931+ lpcmDMAManager =
932+ (struct drm_via_chrome9_DMA_manager *) dev_priv->dma_manager;
933+ ((struct drm_via_chrome9_DMA_manager *)
934+ dev_priv->dma_manager)->DMASize = init->DMA_size;
935+ ((struct drm_via_chrome9_DMA_manager *)
936+ dev_priv->dma_manager)->pPhysical = init->DMA_phys_address;
937+
938+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS, 0x00110000);
939+ if (dev_priv->chip_sub_index == CHIP_H6S2) {
940+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
941+ 0x06000000);
942+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
943+ 0x07100000);
944+ } else {
945+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
946+ 0x02000000);
947+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
948+ 0x03100000);
949+ }
950+
951+ /* Specify fence command read back ID */
952+ /* Default the read back ID is CR */
953+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
954+ INV_ParaType_PreCR);
955+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
956+ INV_SubA_HSetRBGID | INV_HSetRBGID_CR);
957+
958+ DRM_DEBUG("begin to init\n");
959+
960+ if (dev_priv->chip_sub_index == CHIP_H6S2) {
961+ dev_priv->pcie_vmalloc_nocache = 0;
962+ if (dev_priv->pagetable_map.pagetable_size)
963+ retval = InitPCIEGART(dev_priv);
964+
965+ if (retval && dev_priv->drm_agp_type != DRM_AGP_DISABLED) {
966+ addrlinear =
967+ AllocAndBindPCIEMemory(dev_priv,
968+ lpcmDMAManager->DMASize +
969+ dev_priv->agp_size, 0);
970+ if (addrlinear) {
971+ dev_priv->pcie_vmalloc_nocache = (unsigned long)
972+ addrlinear;
973+ } else {
974+ dev_priv->bci_buffer =
975+ vmalloc(MAX_BCI_BUFFER_SIZE);
976+ dev_priv->drm_agp_type = DRM_AGP_DISABLED;
977+ }
978+ } else {
979+ dev_priv->bci_buffer = vmalloc(MAX_BCI_BUFFER_SIZE);
980+ dev_priv->drm_agp_type = DRM_AGP_DISABLED;
981+ }
982+ } else {
983+ if (dev_priv->drm_agp_type != DRM_AGP_DISABLED) {
984+ pGARTTable = NULL;
985+ addrlinear = (unsigned int *)
986+ ioremap(dev->agp->base +
987+ lpcmDMAManager->pPhysical,
988+ lpcmDMAManager->DMASize);
989+ dev_priv->bci_buffer = NULL;
990+ } else {
991+ dev_priv->bci_buffer = vmalloc(MAX_BCI_BUFFER_SIZE);
992+ /*Homer, BCI path always use this block of memory8 */
993+ }
994+ }
995+
996+ /*till here we have known whether support dma or not */
997+ pages = dev->sg->pages;
998+ event_tag_info = vmalloc(sizeof(struct drm_clb_event_tag_info));
999+ memset(event_tag_info, 0, sizeof(struct drm_clb_event_tag_info));
1000+ if (!event_tag_info)
1001+ return DRM_ERROR(" event_tag_info allocate error!");
1002+
1003+ /* aligned to 16k alignment */
1004+ event_tag_info->linear_address =
1005+ (int
1006+ *) (((unsigned int) dev_priv->shadow_map.shadow_handle +
1007+ 0x3fff) & 0xffffc000);
1008+ event_tag_info->event_tag_linear_address =
1009+ event_tag_info->linear_address + 3;
1010+ dev_priv->event_tag_info = (void *) event_tag_info;
1011+ dev_priv->max_apertures = NUMBER_OF_APERTURES_CLB;
1012+
1013+ /* Initialize DMA data structure */
1014+ lpcmDMAManager->DMASize /= sizeof(unsigned int);
1015+ lpcmDMAManager->pBeg = addrlinear;
1016+ lpcmDMAManager->pFree = lpcmDMAManager->pBeg;
1017+ lpcmDMAManager->pInUseBySW = lpcmDMAManager->pBeg;
1018+ lpcmDMAManager->pInUseByHW = lpcmDMAManager->pBeg;
1019+ lpcmDMAManager->LastIssuedEventTag = (unsigned int) (unsigned long *)
1020+ lpcmDMAManager->pBeg;
1021+ lpcmDMAManager->ppInUseByHW =
1022+ (unsigned int **) ((char *) (dev_priv->mmio->handle) +
1023+ INV_RB_AGPCMD_CURRADDR);
1024+ lpcmDMAManager->bDMAAgp = dev_priv->chip_agp;
1025+ lpcmDMAManager->addr_linear = (unsigned int *) addrlinear;
1026+
1027+ if (dev_priv->drm_agp_type == DRM_AGP_DOUBLE_BUFFER) {
1028+ lpcmDMAManager->MaxKickoffSize = lpcmDMAManager->DMASize >> 1;
1029+ lpcmDMAManager->pEnd =
1030+ lpcmDMAManager->addr_linear +
1031+ (lpcmDMAManager->DMASize >> 1) - 1;
1032+ SetAGPDoubleCmd_inv(dev);
1033+ if (dev_priv->chip_sub_index == CHIP_H6S2) {
1034+ DRM_INFO("DMA buffer initialized finished. ");
1035+ DRM_INFO("Use PCIE Double Buffer type!\n");
1036+ DRM_INFO("Total PCIE DMA buffer size = %8d bytes. \n",
1037+ lpcmDMAManager->DMASize << 2);
1038+ } else {
1039+ DRM_INFO("DMA buffer initialized finished. ");
1040+ DRM_INFO("Use AGP Double Buffer type!\n");
1041+ DRM_INFO("Total AGP DMA buffer size = %8d bytes. \n",
1042+ lpcmDMAManager->DMASize << 2);
1043+ }
1044+ } else if (dev_priv->drm_agp_type == DRM_AGP_RING_BUFFER) {
1045+ lpcmDMAManager->MaxKickoffSize = lpcmDMAManager->DMASize;
1046+ lpcmDMAManager->pEnd =
1047+ lpcmDMAManager->addr_linear + lpcmDMAManager->DMASize;
1048+ SetAGPRingCmdRegs_inv(dev);
1049+ if (dev_priv->chip_sub_index == CHIP_H6S2) {
1050+ DRM_INFO("DMA buffer initialized finished. \n");
1051+ DRM_INFO("Use PCIE Ring Buffer type!");
1052+ DRM_INFO("Total PCIE DMA buffer size = %8d bytes. \n",
1053+ lpcmDMAManager->DMASize << 2);
1054+ } else {
1055+ DRM_INFO("DMA buffer initialized finished. ");
1056+ DRM_INFO("Use AGP Ring Buffer type!\n");
1057+ DRM_INFO("Total AGP DMA buffer size = %8d bytes. \n",
1058+ lpcmDMAManager->DMASize << 2);
1059+ }
1060+ } else if (dev_priv->drm_agp_type == DRM_AGP_DISABLED) {
1061+ lpcmDMAManager->MaxKickoffSize = 0x0;
1062+ if (dev_priv->chip_sub_index == CHIP_H6S2)
1063+ DRM_INFO("PCIE init failed! Use PCI\n");
1064+ else
1065+ DRM_INFO("AGP init failed! Use PCI\n");
1066+ }
1067+ return 0;
1068+}
1069+
1070+static void
1071+kickoff_bci_inv(struct drm_device *dev,
1072+ struct drm_via_chrome9_flush *dma_info)
1073+{
1074+ u32 HdType, dwQWCount, i, dwCount, Addr1, Addr2, SWPointer,
1075+ SWPointerEnd;
1076+ unsigned long *pCmdData;
1077+ int result;
1078+
1079+ struct drm_via_chrome9_private *dev_priv =
1080+ (struct drm_via_chrome9_private *) dev->dev_private;
1081+ /*pCmdData = __s3gke_vmalloc(dma_info->cmd_size<<2); */
1082+ pCmdData = dev_priv->bci_buffer;
1083+
1084+ if (!pCmdData)
1085+ return;
1086+ result = copy_from_user((int *) pCmdData, dma_info->usermode_dma_buf,
1087+ dma_info->cmd_size << 2);
1088+ if (result) {
1089+ DRM_ERROR("In function kickoff_bci_inv,\
1090+ copy_from_user is fault. \n");
1091+ return ;
1092+ }
1093+#if VIA_CHROME9_VERIFY_ENABLE
1094+ result = via_chrome9_verify_command_stream(
1095+ (const uint32_t *)pCmdData, dma_info->cmd_size << 2,
1096+ dev, dev_priv->chip_sub_index == CHIP_H6S2 ? 0 : 1);
1097+ if (result) {
1098+ DRM_ERROR("The command has the security issue \n");
1099+ return ;
1100+ }
1101+#endif
1102+ SWPointer = 0;
1103+ SWPointerEnd = (u32) dma_info->cmd_size;
1104+ while (SWPointer < SWPointerEnd) {
1105+ HdType = pCmdData[SWPointer] & INV_AGPHeader_MASK;
1106+ switch (HdType) {
1107+ case INV_AGPHeader0:
1108+ case INV_AGPHeader5:
1109+ dwQWCount = pCmdData[SWPointer + 1];
1110+ SWPointer += 4;
1111+
1112+ for (i = 0; i < dwQWCount; i++) {
1113+ SetMMIORegister(dev_priv->mmio->handle,
1114+ pCmdData[SWPointer],
1115+ pCmdData[SWPointer + 1]);
1116+ SWPointer += 2;
1117+ }
1118+ break;
1119+
1120+ case INV_AGPHeader1:
1121+ dwCount = pCmdData[SWPointer + 1];
1122+ Addr1 = 0x0;
1123+ SWPointer += 4; /* skip 128-bit. */
1124+
1125+ for (; dwCount > 0; dwCount--, SWPointer++,
1126+ Addr1 += 4) {
1127+ SetMMIORegister(dev_priv->hostBlt->handle,
1128+ Addr1, pCmdData[SWPointer]);
1129+ }
1130+ break;
1131+
1132+ case INV_AGPHeader4:
1133+ dwCount = pCmdData[SWPointer + 1];
1134+ Addr1 = pCmdData[SWPointer] & 0x0000FFFF;
1135+ SWPointer += 4; /* skip 128-bit. */
1136+
1137+ for (; dwCount > 0; dwCount--, SWPointer++)
1138+ SetMMIORegister(dev_priv->mmio->handle, Addr1,
1139+ pCmdData[SWPointer]);
1140+ break;
1141+
1142+ case INV_AGPHeader2:
1143+ Addr1 = pCmdData[SWPointer + 1] & 0xFFFF;
1144+ Addr2 = pCmdData[SWPointer] & 0xFFFF;
1145+
1146+ /* Write first data (either ParaType or whatever) to
1147+ Addr1 */
1148+ SetMMIORegister(dev_priv->mmio->handle, Addr1,
1149+ pCmdData[SWPointer + 2]);
1150+ SWPointer += 4;
1151+
1152+ /* The following data are all written to Addr2,
1153+ until another header is met */
1154+ while (!is_agp_header(pCmdData[SWPointer])
1155+ && (SWPointer < SWPointerEnd)) {
1156+ SetMMIORegister(dev_priv->mmio->handle, Addr2,
1157+ pCmdData[SWPointer]);
1158+ SWPointer++;
1159+ }
1160+ break;
1161+
1162+ case INV_AGPHeader3:
1163+ Addr1 = pCmdData[SWPointer] & 0xFFFF;
1164+ Addr2 = Addr1 + 4;
1165+ dwCount = pCmdData[SWPointer + 1];
1166+
1167+ /* Write first data (either ParaType or whatever) to
1168+ Addr1 */
1169+ SetMMIORegister(dev_priv->mmio->handle, Addr1,
1170+ pCmdData[SWPointer + 2]);
1171+ SWPointer += 4;
1172+
1173+ for (i = 0; i < dwCount; i++) {
1174+ SetMMIORegister(dev_priv->mmio->handle, Addr2,
1175+ pCmdData[SWPointer]);
1176+ SWPointer++;
1177+ }
1178+ break;
1179+
1180+ case INV_AGPHeader6:
1181+ break;
1182+
1183+ case INV_AGPHeader7:
1184+ break;
1185+
1186+ default:
1187+ SWPointer += 4; /* Advance to next header */
1188+ }
1189+
1190+ SWPointer = (SWPointer + 3) & ~3;
1191+ }
1192+}
1193+
1194+void
1195+kickoff_dma_db_inv(struct drm_device *dev)
1196+{
1197+ struct drm_via_chrome9_private *dev_priv =
1198+ (struct drm_via_chrome9_private *) dev->dev_private;
1199+ struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
1200+ dev_priv->dma_manager;
1201+
1202+ u32 BufferSize = (u32) (lpcmDMAManager->pFree - lpcmDMAManager->pBeg);
1203+
1204+ unsigned int AGPBufLinearBase =
1205+ (unsigned int) lpcmDMAManager->addr_linear;
1206+ unsigned int AGPBufPhysicalBase =
1207+ (unsigned int) dev->agp->base + lpcmDMAManager->pPhysical;
1208+ /*add shadow offset */
1209+
1210+ unsigned int dwStart, dwEnd, dwPause;
1211+ unsigned int dwReg60, dwReg61, dwReg62, dwReg63, dwReg64, dwReg65;
1212+ unsigned int CR_Status;
1213+
1214+ if (BufferSize == 0)
1215+ return;
1216+
1217+ /* 256-bit alignment of AGP pause address */
1218+ if ((u32) ((unsigned long *) lpcmDMAManager->pFree) & 0x1f) {
1219+ ADDCmdHeader2_INVI(lpcmDMAManager->pFree, INV_REG_CR_TRANS,
1220+ INV_ParaType_Dummy);
1221+ do {
1222+ ADDCmdData_INVI(lpcmDMAManager->pFree, 0xCCCCCCC0);
1223+ ADDCmdData_INVI(lpcmDMAManager->pFree, 0xDDD00000);
1224+ }
1225+ while (((unsigned int) lpcmDMAManager->pFree) & 0x1f)
1226+ ;
1227+ }
1228+
1229+ dwStart =
1230+ (u32) (unsigned long *)lpcmDMAManager->pBeg -
1231+ AGPBufLinearBase + AGPBufPhysicalBase;
1232+ dwEnd = (u32) (unsigned long *)lpcmDMAManager->pEnd -
1233+ AGPBufLinearBase + AGPBufPhysicalBase;
1234+ dwPause =
1235+ (u32)(unsigned long *)lpcmDMAManager->pFree -
1236+ AGPBufLinearBase + AGPBufPhysicalBase - 4;
1237+
1238+ dwReg60 = INV_SubA_HAGPBstL | INV_HWBasL(dwStart);
1239+ dwReg61 = INV_SubA_HAGPBstH | INV_HWBasH(dwStart);
1240+ dwReg62 = INV_SubA_HAGPBendL | INV_HWBasL(dwEnd);
1241+ dwReg63 = INV_SubA_HAGPBendH | INV_HWBasH(dwEnd);
1242+ dwReg64 = INV_SubA_HAGPBpL | INV_HWBasL(dwPause);
1243+ dwReg65 = INV_SubA_HAGPBpID | INV_HWBasH(dwPause) | INV_HAGPBpID_STOP;
1244+
1245+ /* wait CR idle */
1246+ CR_Status = GetMMIORegister(dev_priv->mmio->handle, INV_RB_ENG_STATUS);
1247+ while (CR_Status & INV_ENG_BUSY_CR)
1248+ CR_Status =
1249+ GetMMIORegister(dev_priv->mmio->handle,
1250+ INV_RB_ENG_STATUS);
1251+
1252+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
1253+ INV_ParaType_PreCR);
1254+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg60);
1255+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg61);
1256+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg62);
1257+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg63);
1258+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg64);
1259+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg65);
1260+
1261+ /* Trigger AGP cycle */
1262+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
1263+ INV_SubA_HFthRCM | INV_HFthRCM_10 | INV_HAGPBTrig);
1264+
1265+ if (lpcmDMAManager->pBeg == lpcmDMAManager->addr_linear) {
1266+ /* The second AGP command buffer */
1267+ lpcmDMAManager->pBeg =
1268+ lpcmDMAManager->addr_linear +
1269+ (lpcmDMAManager->DMASize >> 2);
1270+ lpcmDMAManager->pEnd =
1271+ lpcmDMAManager->addr_linear + lpcmDMAManager->DMASize;
1272+ lpcmDMAManager->pFree = lpcmDMAManager->pBeg;
1273+ } else {
1274+ /* The first AGP command buffer */
1275+ lpcmDMAManager->pBeg = lpcmDMAManager->addr_linear;
1276+ lpcmDMAManager->pEnd =
1277+ lpcmDMAManager->addr_linear +
1278+ (lpcmDMAManager->DMASize / 2) - 1;
1279+ lpcmDMAManager->pFree = lpcmDMAManager->pBeg;
1280+ }
1281+ CR_Status = GetMMIORegister(dev_priv->mmio->handle, INV_RB_ENG_STATUS);
1282+}
1283+
1284+
1285+void
1286+kickoff_dma_ring_inv(struct drm_device *dev)
1287+{
1288+ unsigned int dwPause, dwReg64, dwReg65;
1289+
1290+ struct drm_via_chrome9_private *dev_priv =
1291+ (struct drm_via_chrome9_private *) dev->dev_private;
1292+ struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
1293+ dev_priv->dma_manager;
1294+
1295+ unsigned int AGPBufLinearBase =
1296+ (unsigned int) lpcmDMAManager->addr_linear;
1297+ unsigned int AGPBufPhysicalBase =
1298+ (dev_priv->chip_agp ==
1299+ CHIP_PCIE) ? 0 : (unsigned int) dev->agp->base +
1300+ lpcmDMAManager->pPhysical;
1301+ /*add shadow offset */
1302+
1303+ /* 256-bit alignment of AGP pause address */
1304+ if (dev_priv->chip_sub_index == CHIP_H6S2) {
1305+ if ((u32)
1306+ ((unsigned long *) lpcmDMAManager->pFree) & 0x7f) {
1307+ ADDCmdHeader2_INVI(lpcmDMAManager->pFree,
1308+ INV_REG_CR_TRANS,
1309+ INV_ParaType_Dummy);
1310+ do {
1311+ ADDCmdData_INVI(lpcmDMAManager->pFree,
1312+ 0xCCCCCCC0);
1313+ ADDCmdData_INVI(lpcmDMAManager->pFree,
1314+ 0xDDD00000);
1315+ }
1316+ while ((u32)((unsigned long *) lpcmDMAManager->pFree) &
1317+ 0x7f)
1318+ ;
1319+ }
1320+ } else {
1321+ if ((u32)
1322+ ((unsigned long *) lpcmDMAManager->pFree) & 0x1f) {
1323+ ADDCmdHeader2_INVI(lpcmDMAManager->pFree,
1324+ INV_REG_CR_TRANS,
1325+ INV_ParaType_Dummy);
1326+ do {
1327+ ADDCmdData_INVI(lpcmDMAManager->pFree,
1328+ 0xCCCCCCC0);
1329+ ADDCmdData_INVI(lpcmDMAManager->pFree,
1330+ 0xDDD00000);
1331+ }
1332+ while ((u32)((unsigned long *) lpcmDMAManager->pFree) &
1333+ 0x1f)
1334+ ;
1335+ }
1336+ }
1337+
1338+
1339+ dwPause = (u32) ((unsigned long *) lpcmDMAManager->pFree)
1340+ - AGPBufLinearBase + AGPBufPhysicalBase - 16;
1341+
1342+ dwReg64 = INV_SubA_HAGPBpL | INV_HWBasL(dwPause);
1343+ dwReg65 = INV_SubA_HAGPBpID | INV_HWBasH(dwPause) | INV_HAGPBpID_PAUSE;
1344+
1345+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
1346+ INV_ParaType_PreCR);
1347+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg64);
1348+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg65);
1349+
1350+ lpcmDMAManager->pInUseBySW = lpcmDMAManager->pFree;
1351+}
1352+
1353+static int
1354+waitchipidle_inv(struct drm_via_chrome9_private *dev_priv)
1355+{
1356+ unsigned int count = 50000;
1357+ unsigned int eng_status;
1358+ unsigned int engine_busy;
1359+
1360+ do {
1361+ eng_status =
1362+ GetMMIORegister(dev_priv->mmio->handle,
1363+ INV_RB_ENG_STATUS);
1364+ engine_busy = eng_status & INV_ENG_BUSY_ALL;
1365+ count--;
1366+ }
1367+ while (engine_busy && count)
1368+ ;
1369+ if (count && engine_busy == 0)
1370+ return 0;
1371+ return -1;
1372+}
1373+
1374+void
1375+get_space_db_inv(struct drm_device *dev,
1376+ struct cmd_get_space *lpcmGetSpaceData)
1377+{
1378+ struct drm_via_chrome9_private *dev_priv =
1379+ (struct drm_via_chrome9_private *) dev->dev_private;
1380+ struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
1381+ dev_priv->dma_manager;
1382+
1383+ unsigned int dwRequestSize = lpcmGetSpaceData->dwRequestSize;
1384+ if (dwRequestSize > lpcmDMAManager->MaxKickoffSize) {
1385+ DRM_INFO("too big DMA buffer request!!!\n");
1386+ via_chrome9ke_assert(0);
1387+ *lpcmGetSpaceData->pCmdData = (unsigned int) NULL;
1388+ return;
1389+ }
1390+
1391+ if ((lpcmDMAManager->pFree + dwRequestSize) >
1392+ (lpcmDMAManager->pEnd - INV_CMDBUF_THRESHOLD * 2))
1393+ kickoff_dma_db_inv(dev);
1394+
1395+ *lpcmGetSpaceData->pCmdData = (unsigned int) lpcmDMAManager->pFree;
1396+}
1397+
1398+void
1399+RewindRingAGP_inv(struct drm_device *dev)
1400+{
1401+ struct drm_via_chrome9_private *dev_priv =
1402+ (struct drm_via_chrome9_private *) dev->dev_private;
1403+ struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
1404+ dev_priv->dma_manager;
1405+
1406+ unsigned int AGPBufLinearBase =
1407+ (unsigned int) lpcmDMAManager->addr_linear;
1408+ unsigned int AGPBufPhysicalBase =
1409+ (dev_priv->chip_agp ==
1410+ CHIP_PCIE) ? 0 : (unsigned int) dev->agp->base +
1411+ lpcmDMAManager->pPhysical;
1412+ /*add shadow offset */
1413+
1414+ unsigned int dwPause, dwJump;
1415+ unsigned int dwReg66, dwReg67;
1416+ unsigned int dwReg64, dwReg65;
1417+
1418+ ADDCmdHeader2_INVI(lpcmDMAManager->pFree, INV_REG_CR_TRANS,
1419+ INV_ParaType_Dummy);
1420+ ADDCmdData_INVI(lpcmDMAManager->pFree, 0xCCCCCCC7);
1421+ if (dev_priv->chip_sub_index == CHIP_H6S2)
1422+ while ((unsigned int) lpcmDMAManager->pFree & 0x7F)
1423+ ADDCmdData_INVI(lpcmDMAManager->pFree, 0xCCCCCCC7);
1424+ else
1425+ while ((unsigned int) lpcmDMAManager->pFree & 0x1F)
1426+ ADDCmdData_INVI(lpcmDMAManager->pFree, 0xCCCCCCC7);
1427+ dwJump = ((u32) ((unsigned long *) lpcmDMAManager->pFree))
1428+ - AGPBufLinearBase + AGPBufPhysicalBase - 16;
1429+
1430+ lpcmDMAManager->pFree = lpcmDMAManager->pBeg;
1431+
1432+ dwPause = ((u32) ((unsigned long *) lpcmDMAManager->pFree))
1433+ - AGPBufLinearBase + AGPBufPhysicalBase - 16;
1434+
1435+ dwReg64 = INV_SubA_HAGPBpL | INV_HWBasL(dwPause);
1436+ dwReg65 = INV_SubA_HAGPBpID | INV_HWBasH(dwPause) | INV_HAGPBpID_PAUSE;
1437+
1438+ dwReg66 = INV_SubA_HAGPBjumpL | INV_HWBasL(dwJump);
1439+ dwReg67 = INV_SubA_HAGPBjumpH | INV_HWBasH(dwJump);
1440+
1441+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
1442+ INV_ParaType_PreCR);
1443+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg66);
1444+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg67);
1445+
1446+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg64);
1447+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN, dwReg65);
1448+ lpcmDMAManager->pInUseBySW = lpcmDMAManager->pFree;
1449+}
1450+
1451+
1452+void
1453+get_space_ring_inv(struct drm_device *dev,
1454+ struct cmd_get_space *lpcmGetSpaceData)
1455+{
1456+ struct drm_via_chrome9_private *dev_priv =
1457+ (struct drm_via_chrome9_private *) dev->dev_private;
1458+ struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
1459+ dev_priv->dma_manager;
1460+ unsigned int dwUnFlushed;
1461+ unsigned int dwRequestSize = lpcmGetSpaceData->dwRequestSize;
1462+
1463+ unsigned int AGPBufLinearBase =
1464+ (unsigned int) lpcmDMAManager->addr_linear;
1465+ unsigned int AGPBufPhysicalBase =
1466+ (dev_priv->chip_agp ==
1467+ CHIP_PCIE) ? 0 : (unsigned int) dev->agp->base +
1468+ lpcmDMAManager->pPhysical;
1469+ /*add shadow offset */
1470+ u32 BufStart, BufEnd, CurSW, CurHW, NextSW, BoundaryCheck;
1471+
1472+ dwUnFlushed =
1473+ (unsigned int) (lpcmDMAManager->pFree - lpcmDMAManager->pBeg);
1474+ /*default bEnableModuleSwitch is on for metro,is off for rest */
1475+ /*cmHW_Module_Switch is context-wide variable which is enough for 2d/3d
1476+ switch in a context. */
1477+ /*But we must keep the dma buffer being wrapped head and tail by 3d cmds
1478+ when it is kicked off to kernel mode. */
1479+ /*Get DMA Space (If requested, or no BCI space and BCI not forced. */
1480+
1481+ if (dwRequestSize > lpcmDMAManager->MaxKickoffSize) {
1482+ DRM_INFO("too big DMA buffer request!!!\n");
1483+ via_chrome9ke_assert(0);
1484+ *lpcmGetSpaceData->pCmdData = 0;
1485+ return;
1486+ }
1487+
1488+ if (dwUnFlushed + dwRequestSize > lpcmDMAManager->MaxKickoffSize)
1489+ kickoff_dma_ring_inv(dev);
1490+
1491+ BufStart =
1492+ (u32)((unsigned int) lpcmDMAManager->pBeg) - AGPBufLinearBase +
1493+ AGPBufPhysicalBase;
1494+ BufEnd = (u32)((unsigned int) lpcmDMAManager->pEnd) - AGPBufLinearBase +
1495+ AGPBufPhysicalBase;
1496+ dwRequestSize = lpcmGetSpaceData->dwRequestSize << 2;
1497+ NextSW = (u32) ((unsigned int) lpcmDMAManager->pFree) + dwRequestSize +
1498+ INV_CMDBUF_THRESHOLD * 8 - AGPBufLinearBase +
1499+ AGPBufPhysicalBase;
1500+
1501+ CurSW = (u32)((unsigned int) lpcmDMAManager->pFree) - AGPBufLinearBase +
1502+ AGPBufPhysicalBase;
1503+ CurHW = GetMMIORegister(dev_priv->mmio->handle, INV_RB_AGPCMD_CURRADDR);
1504+
1505+ if (NextSW >= BufEnd) {
1506+ kickoff_dma_ring_inv(dev);
1507+ CurSW = (u32) ((unsigned int) lpcmDMAManager->pFree) -
1508+ AGPBufLinearBase + AGPBufPhysicalBase;
1509+ /* make sure the last rewind is completed */
1510+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1511+ INV_RB_AGPCMD_CURRADDR);
1512+ while (CurHW > CurSW)
1513+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1514+ INV_RB_AGPCMD_CURRADDR);
1515+ /* Sometime the value read from HW is unreliable,
1516+ so need double confirm. */
1517+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1518+ INV_RB_AGPCMD_CURRADDR);
1519+ while (CurHW > CurSW)
1520+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1521+ INV_RB_AGPCMD_CURRADDR);
1522+ BoundaryCheck =
1523+ BufStart + dwRequestSize + INV_QW_PAUSE_ALIGN * 16;
1524+ if (BoundaryCheck >= BufEnd)
1525+ /* If an empty command buffer can't hold
1526+ the request data. */
1527+ via_chrome9ke_assert(0);
1528+ else {
1529+ /* We need to guarntee the new commands have no chance
1530+ to override the unexected commands or wait until there
1531+ is no unexecuted commands in agp buffer */
1532+ if (CurSW <= BoundaryCheck) {
1533+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1534+ INV_RB_AGPCMD_CURRADDR);
1535+ while (CurHW < CurSW)
1536+ CurHW = GetMMIORegister(
1537+ dev_priv->mmio->handle,
1538+ INV_RB_AGPCMD_CURRADDR);
1539+ /*Sometime the value read from HW is unreliable,
1540+ so need double confirm. */
1541+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1542+ INV_RB_AGPCMD_CURRADDR);
1543+ while (CurHW < CurSW) {
1544+ CurHW = GetMMIORegister(
1545+ dev_priv->mmio->handle,
1546+ INV_RB_AGPCMD_CURRADDR);
1547+ }
1548+ RewindRingAGP_inv(dev);
1549+ CurSW = (u32) ((unsigned long *)
1550+ lpcmDMAManager->pFree) -
1551+ AGPBufLinearBase + AGPBufPhysicalBase;
1552+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1553+ INV_RB_AGPCMD_CURRADDR);
1554+ /* Waiting until hw pointer jump to start
1555+ and hw pointer will */
1556+ /* equal to sw pointer */
1557+ while (CurHW != CurSW) {
1558+ CurHW = GetMMIORegister(
1559+ dev_priv->mmio->handle,
1560+ INV_RB_AGPCMD_CURRADDR);
1561+ }
1562+ } else {
1563+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1564+ INV_RB_AGPCMD_CURRADDR);
1565+
1566+ while (CurHW <= BoundaryCheck) {
1567+ CurHW = GetMMIORegister(
1568+ dev_priv->mmio->handle,
1569+ INV_RB_AGPCMD_CURRADDR);
1570+ }
1571+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1572+ INV_RB_AGPCMD_CURRADDR);
1573+ /* Sometime the value read from HW is
1574+ unreliable, so need double confirm. */
1575+ while (CurHW <= BoundaryCheck) {
1576+ CurHW = GetMMIORegister(
1577+ dev_priv->mmio->handle,
1578+ INV_RB_AGPCMD_CURRADDR);
1579+ }
1580+ RewindRingAGP_inv(dev);
1581+ }
1582+ }
1583+ } else {
1584+ /* no need to rewind Ensure unexecuted agp commands will
1585+ not be override by new
1586+ agp commands */
1587+ CurSW = (u32) ((unsigned int) lpcmDMAManager->pFree) -
1588+ AGPBufLinearBase + AGPBufPhysicalBase;
1589+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1590+ INV_RB_AGPCMD_CURRADDR);
1591+
1592+ while ((CurHW > CurSW) && (CurHW <= NextSW))
1593+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1594+ INV_RB_AGPCMD_CURRADDR);
1595+
1596+ /* Sometime the value read from HW is unreliable,
1597+ so need double confirm. */
1598+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1599+ INV_RB_AGPCMD_CURRADDR);
1600+ while ((CurHW > CurSW) && (CurHW <= NextSW))
1601+ CurHW = GetMMIORegister(dev_priv->mmio->handle,
1602+ INV_RB_AGPCMD_CURRADDR);
1603+ }
1604+ /*return the space handle */
1605+ *lpcmGetSpaceData->pCmdData = (unsigned int) lpcmDMAManager->pFree;
1606+}
1607+
1608+void
1609+release_space_inv(struct drm_device *dev,
1610+ struct cmd_release_space *lpcmReleaseSpaceData)
1611+{
1612+ struct drm_via_chrome9_private *dev_priv =
1613+ (struct drm_via_chrome9_private *) dev->dev_private;
1614+ struct drm_via_chrome9_DMA_manager *lpcmDMAManager =
1615+ dev_priv->dma_manager;
1616+ unsigned int dwReleaseSize = lpcmReleaseSpaceData->dwReleaseSize;
1617+ int i = 0;
1618+
1619+ lpcmDMAManager->pFree += dwReleaseSize;
1620+
1621+ /* aligned address */
1622+ while (((unsigned int) lpcmDMAManager->pFree) & 0xF) {
1623+ /* not in 4 unsigned ints (16 Bytes) align address,
1624+ insert NULL Commands */
1625+ *lpcmDMAManager->pFree++ = NULL_COMMAND_INV[i & 0x3];
1626+ i++;
1627+ }
1628+
1629+ if ((dev_priv->chip_sub_index == CHIP_H5 ||
1630+ dev_priv->chip_sub_index == CHIP_H6S2) &&
1631+ (dev_priv->drm_agp_type == DRM_AGP_RING_BUFFER)) {
1632+ ADDCmdHeader2_INVI(lpcmDMAManager->pFree, INV_REG_CR_TRANS,
1633+ INV_ParaType_Dummy);
1634+ for (i = 0; i < NULLCOMMANDNUMBER; i++)
1635+ ADDCmdData_INVI(lpcmDMAManager->pFree, 0xCC000000);
1636+ }
1637+}
1638+
1639+int
1640+via_chrome9_ioctl_flush(struct drm_device *dev, void *data,
1641+ struct drm_file *file_priv)
1642+{
1643+ struct drm_via_chrome9_flush *dma_info = data;
1644+ struct drm_via_chrome9_private *dev_priv =
1645+ (struct drm_via_chrome9_private *) dev->dev_private;
1646+ int ret = 0;
1647+ int result = 0;
1648+ struct cmd_get_space getspace;
1649+ struct cmd_release_space releasespace;
1650+ unsigned long *pCmdData = NULL;
1651+
1652+ switch (dma_info->dma_cmd_type) {
1653+ /* Copy DMA buffer to BCI command buffer */
1654+ case flush_bci:
1655+ case flush_bci_and_wait:
1656+ if (dma_info->cmd_size <= 0)
1657+ return 0;
1658+ if (dma_info->cmd_size > MAX_BCI_BUFFER_SIZE) {
1659+ DRM_INFO("too big BCI space request!!!\n");
1660+ return 0;
1661+ }
1662+
1663+ kickoff_bci_inv(dev, dma_info);
1664+ waitchipidle_inv(dev_priv);
1665+ break;
1666+ /* Use DRM DMA buffer manager to kick off DMA directly */
1667+ case dma_kickoff:
1668+ break;
1669+
1670+ /* Copy user mode DMA buffer to kernel DMA buffer,
1671+ then kick off DMA */
1672+ case flush_dma_buffer:
1673+ case flush_dma_and_wait:
1674+ if (dma_info->cmd_size <= 0)
1675+ return 0;
1676+
1677+ getspace.dwRequestSize = dma_info->cmd_size;
1678+ if ((dev_priv->chip_sub_index == CHIP_H5 ||
1679+ dev_priv->chip_sub_index == CHIP_H6S2) &&
1680+ (dev_priv->drm_agp_type == DRM_AGP_RING_BUFFER))
1681+ getspace.dwRequestSize += (NULLCOMMANDNUMBER + 4);
1682+ /*henry:Patch for VT3293 agp ring buffer stability */
1683+ getspace.pCmdData = (unsigned int *) &pCmdData;
1684+
1685+ if (dev_priv->drm_agp_type == DRM_AGP_DOUBLE_BUFFER)
1686+ get_space_db_inv(dev, &getspace);
1687+ else if (dev_priv->drm_agp_type == DRM_AGP_RING_BUFFER)
1688+ get_space_ring_inv(dev, &getspace);
1689+ if (pCmdData) {
1690+ /*copy data from userspace to kernel-dma-agp buffer */
1691+ result = copy_from_user((int *)
1692+ pCmdData,
1693+ dma_info->usermode_dma_buf,
1694+ dma_info->cmd_size << 2);
1695+ if (result) {
1696+ DRM_ERROR("In function via_chrome9_ioctl_flush,\
1697+ copy_from_user is fault. \n");
1698+ return -EINVAL;
1699+ }
1700+
1701+#if VIA_CHROME9_VERIFY_ENABLE
1702+ result = via_chrome9_verify_command_stream(
1703+ (const uint32_t *)pCmdData, dma_info->cmd_size << 2,
1704+ dev, dev_priv->chip_sub_index == CHIP_H6S2 ? 0 : 1);
1705+ if (result) {
1706+ DRM_ERROR("The user command has security issue.\n");
1707+ return -EINVAL;
1708+ }
1709+#endif
1710+
1711+ releasespace.dwReleaseSize = dma_info->cmd_size;
1712+ release_space_inv(dev, &releasespace);
1713+ if (dev_priv->drm_agp_type == DRM_AGP_DOUBLE_BUFFER)
1714+ kickoff_dma_db_inv(dev);
1715+ else if (dev_priv->drm_agp_type == DRM_AGP_RING_BUFFER)
1716+ kickoff_dma_ring_inv(dev);
1717+
1718+ if (dma_info->dma_cmd_type == flush_dma_and_wait)
1719+ waitchipidle_inv(dev_priv);
1720+ } else {
1721+ DRM_INFO("No enough DMA space");
1722+ ret = -ENOMEM;
1723+ }
1724+ break;
1725+
1726+ default:
1727+ DRM_INFO("Invalid DMA buffer type");
1728+ ret = -EINVAL;
1729+ break;
1730+ }
1731+ return ret;
1732+}
1733+
1734+int
1735+via_chrome9_ioctl_free(struct drm_device *dev, void *data,
1736+ struct drm_file *file_priv)
1737+{
1738+ return 0;
1739+}
1740+
1741+int
1742+via_chrome9_ioctl_wait_chip_idle(struct drm_device *dev, void *data,
1743+ struct drm_file *file_priv)
1744+{
1745+ struct drm_via_chrome9_private *dev_priv =
1746+ (struct drm_via_chrome9_private *) dev->dev_private;
1747+
1748+ waitchipidle_inv(dev_priv);
1749+ /* maybe_bug here, do we always return 0 */
1750+ return 0;
1751+}
1752+
1753+int
1754+via_chrome9_ioctl_flush_cache(struct drm_device *dev, void *data,
1755+ struct drm_file *file_priv)
1756+{
1757+ return 0;
1758+}
1759--- /dev/null
1760+++ b/drivers/gpu/drm/via_chrome9/via_chrome9_dma.h
1761@@ -0,0 +1,69 @@
1762+/*
1763+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
1764+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
1765+ *
1766+ * Permission is hereby granted, free of charge, to any person
1767+ * obtaining a copy of this software and associated documentation
1768+ * files (the "Software"), to deal in the Software without
1769+ * restriction, including without limitation the rights to use,
1770+ * copy, modify, merge, publish, distribute, sub license,
1771+ * and/or sell copies of the Software, and to permit persons to
1772+ * whom the Software is furnished to do so, subject to the
1773+ * following conditions:
1774+ *
1775+ * The above copyright notice and this permission notice
1776+ * (including the next paragraph) shall be included in all
1777+ * copies or substantial portions of the Software.
1778+ *
1779+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1780+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
1781+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1782+ * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
1783+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1784+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1785+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
1786+ * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1787+ */
1788+#ifndef _VIA_CHROME9_DMA_H_
1789+#define _VIA_CHROME9_DMA_H_
1790+
1791+#define MAX_BCI_BUFFER_SIZE (16 * 1024 * 1024)
1792+
1793+enum cmd_request_type {
1794+ CM_REQUEST_BCI,
1795+ CM_REQUEST_DMA,
1796+ CM_REQUEST_RB,
1797+ CM_REQUEST_RB_FORCED_DMA,
1798+ CM_REQUEST_NOTAVAILABLE
1799+};
1800+
1801+struct cmd_get_space {
1802+ unsigned int dwRequestSize;
1803+ enum cmd_request_type hint;
1804+ __volatile__ unsigned int *pCmdData;
1805+};
1806+
1807+struct cmd_release_space {
1808+ unsigned int dwReleaseSize;
1809+};
1810+
1811+extern int via_chrome9_hw_init(struct drm_device *dev,
1812+ struct drm_via_chrome9_init *init);
1813+extern int via_chrome9_ioctl_flush(struct drm_device *dev, void *data,
1814+ struct drm_file *file_priv);
1815+extern int via_chrome9_ioctl_free(struct drm_device *dev, void *data,
1816+ struct drm_file *file_prev);
1817+extern int via_chrome9_ioctl_wait_chip_idle(struct drm_device *dev,
1818+ void *data, struct drm_file *file_priv);
1819+extern int via_chrome9_ioctl_flush_cache(struct drm_device *dev,
1820+ void *data, struct drm_file *file_priv);
1821+extern int via_chrome9_ioctl_flush(struct drm_device *dev, void *data,
1822+ struct drm_file *file_priv);
1823+extern int via_chrome9_ioctl_free(struct drm_device *dev, void *data,
1824+ struct drm_file *file_priv);
1825+extern unsigned int ProtectSizeValue(unsigned int size);
1826+extern void SetAGPDoubleCmd_inv(struct drm_device *dev);
1827+extern void SetAGPRingCmdRegs_inv(struct drm_device *dev);
1828+extern void via_chrome9_dma_init_inv(struct drm_device *dev);
1829+
1830+#endif
1831--- /dev/null
1832+++ b/drivers/gpu/drm/via_chrome9/via_chrome9_drm.c
1833@@ -0,0 +1,950 @@
1834+/*
1835+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
1836+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
1837+ *
1838+ * Permission is hereby granted, free of charge, to any person
1839+ * obtaining a copy of this software and associated documentation
1840+ * files (the "Software"), to deal in the Software without
1841+ * restriction, including without limitation the rights to use,
1842+ * copy, modify, merge, publish, distribute, sub license,
1843+ * and/or sell copies of the Software, and to permit persons to
1844+ * whom the Software is furnished to do so, subject to the
1845+ * following conditions:
1846+ *
1847+ * The above copyright notice and this permission notice
1848+ * (including the next paragraph) shall be included in all
1849+ * copies or substantial portions of the Software.
1850+ *
1851+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1852+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
1853+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1854+ * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
1855+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1856+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1857+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
1858+ * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1859+ */
1860+#include "drmP.h"
1861+#include "via_chrome9_drm.h"
1862+#include "via_chrome9_drv.h"
1863+#include "via_chrome9_mm.h"
1864+#include "via_chrome9_dma.h"
1865+#include "via_chrome9_3d_reg.h"
1866+
1867+#define VIA_CHROME9DRM_VIDEO_STARTADDRESS_ALIGNMENT 10
1868+
1869+void *via_chrome9_dev_v4l;
1870+void *via_chrome9_filepriv_v4l;
1871+
1872+void __via_chrome9ke_udelay(unsigned long usecs)
1873+{
1874+ unsigned long start;
1875+ unsigned long stop;
1876+ unsigned long period;
1877+ unsigned long wait_period;
1878+ struct timespec tval;
1879+
1880+#ifdef NDELAY_LIMIT
1881+#define UDELAY_LIMIT (NDELAY_LIMIT/1000) /* supposed to be 10 msec */
1882+#else
1883+#define UDELAY_LIMIT (10000) /* 10 msec */
1884+#endif
1885+
1886+ if (usecs > UDELAY_LIMIT) {
1887+ start = jiffies;
1888+ tval.tv_sec = usecs / 1000000;
1889+ tval.tv_nsec = (usecs - tval.tv_sec * 1000000) * 1000;
1890+ wait_period = timespec_to_jiffies(&tval);
1891+ do {
1892+ stop = jiffies;
1893+
1894+ if (stop < start)
1895+ period = ((unsigned long)-1 - start) + stop + 1;
1896+ else
1897+ period = stop - start;
1898+
1899+ } while (period < wait_period);
1900+ } else
1901+ udelay(usecs); /* delay value might get checked once again */
1902+}
1903+
1904+int via_chrome9_ioctl_process_exit(struct drm_device *dev, void *data,
1905+ struct drm_file *file_priv)
1906+{
1907+ return 0;
1908+}
1909+
1910+int via_chrome9_ioctl_restore_primary(struct drm_device *dev,
1911+ void *data, struct drm_file *file_priv)
1912+{
1913+ return 0;
1914+}
1915+
1916+void Initialize3DEngine(struct drm_via_chrome9_private *dev_priv)
1917+{
1918+ int i;
1919+ unsigned int StageOfTexture;
1920+
1921+ if (dev_priv->chip_sub_index == CHIP_H5 ||
1922+ dev_priv->chip_sub_index == CHIP_H5S1) {
1923+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
1924+ 0x00010000);
1925+
1926+ for (i = 0; i <= 0x8A; i++) {
1927+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
1928+ (unsigned int) i << 24);
1929+ }
1930+
1931+ /* Initial Texture Stage Setting*/
1932+ for (StageOfTexture = 0; StageOfTexture < 0xf;
1933+ StageOfTexture++) {
1934+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
1935+ (0x00020000 | 0x00000000 |
1936+ (StageOfTexture & 0xf)<<24));
1937+ /* *((unsigned int volatile*)(pMapIOPort+HC_REG_TRANS_SET)) =
1938+ (0x00020000 | HC_ParaSubType_Tex0 | (StageOfTexture &
1939+ 0xf)<<24);*/
1940+ for (i = 0 ; i <= 0x30 ; i++) {
1941+ SetMMIORegister(dev_priv->mmio->handle,
1942+ 0x440, (unsigned int) i << 24);
1943+ }
1944+ }
1945+
1946+ /* Initial Texture Sampler Setting*/
1947+ for (StageOfTexture = 0; StageOfTexture < 0xf;
1948+ StageOfTexture++) {
1949+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
1950+ (0x00020000 | 0x00020000 |
1951+ (StageOfTexture & 0xf)<<24));
1952+ /* *((unsigned int volatile*)(pMapIOPort+
1953+ HC_REG_TRANS_SET)) = (0x00020000 | 0x00020000 |
1954+ ( StageOfTexture & 0xf)<<24);*/
1955+ for (i = 0 ; i <= 0x30 ; i++) {
1956+ SetMMIORegister(dev_priv->mmio->handle,
1957+ 0x440, (unsigned int) i << 24);
1958+ }
1959+ }
1960+
1961+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
1962+ (0x00020000 | 0xfe000000));
1963+ /* *((unsigned int volatile*)(pMapIOPort+HC_REG_TRANS_SET)) =
1964+ (0x00020000 | HC_ParaSubType_TexGen);*/
1965+ for (i = 0 ; i <= 0x13 ; i++) {
1966+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
1967+ (unsigned int) i << 24);
1968+ /* *((unsigned int volatile*)(pMapIOPort+
1969+ HC_REG_Hpara0)) = ((unsigned int) i << 24);*/
1970+ }
1971+
1972+ /* Initial Gamma Table Setting*/
1973+ /* Initial Gamma Table Setting*/
1974+ /* 5 + 4 = 9 (12) dwords*/
1975+ /* sRGB texture is not directly support by H3 hardware.
1976+ We have to set the deGamma table for texture sampling.*/
1977+
1978+ /* degamma table*/
1979+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
1980+ (0x00030000 | 0x15000000));
1981+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
1982+ (0x40000000 | (30 << 20) | (15 << 10) | (5)));
1983+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
1984+ ((119 << 20) | (81 << 10) | (52)));
1985+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
1986+ ((283 << 20) | (219 << 10) | (165)));
1987+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
1988+ ((535 << 20) | (441 << 10) | (357)));
1989+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
1990+ ((119 << 20) | (884 << 20) | (757 << 10) |
1991+ (640)));
1992+
1993+ /* gamma table*/
1994+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
1995+ (0x00030000 | 0x17000000));
1996+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
1997+ (0x40000000 | (13 << 20) | (13 << 10) | (13)));
1998+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
1999+ (0x40000000 | (26 << 20) | (26 << 10) | (26)));
2000+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2001+ (0x40000000 | (39 << 20) | (39 << 10) | (39)));
2002+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2003+ ((51 << 20) | (51 << 10) | (51)));
2004+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2005+ ((71 << 20) | (71 << 10) | (71)));
2006+ SetMMIORegister(dev_priv->mmio->handle,
2007+ 0x440, (87 << 20) | (87 << 10) | (87));
2008+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2009+ (113 << 20) | (113 << 10) | (113));
2010+ SetMMIORegister(dev_priv->mmio->handle,
2011+ 0x440, (135 << 20) | (135 << 10) | (135));
2012+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2013+ (170 << 20) | (170 << 10) | (170));
2014+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2015+ (199 << 20) | (199 << 10) | (199));
2016+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2017+ (246 << 20) | (246 << 10) | (246));
2018+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2019+ (284 << 20) | (284 << 10) | (284));
2020+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2021+ (317 << 20) | (317 << 10) | (317));
2022+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2023+ (347 << 20) | (347 << 10) | (347));
2024+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2025+ (373 << 20) | (373 << 10) | (373));
2026+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2027+ (398 << 20) | (398 << 10) | (398));
2028+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2029+ (442 << 20) | (442 << 10) | (442));
2030+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2031+ (481 << 20) | (481 << 10) | (481));
2032+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2033+ (517 << 20) | (517 << 10) | (517));
2034+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2035+ (550 << 20) | (550 << 10) | (550));
2036+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2037+ (609 << 20) | (609 << 10) | (609));
2038+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2039+ (662 << 20) | (662 << 10) | (662));
2040+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2041+ (709 << 20) | (709 << 10) | (709));
2042+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2043+ (753 << 20) | (753 << 10) | (753));
2044+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2045+ (794 << 20) | (794 << 10) | (794));
2046+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2047+ (832 << 20) | (832 << 10) | (832));
2048+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2049+ (868 << 20) | (868 << 10) | (868));
2050+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2051+ (902 << 20) | (902 << 10) | (902));
2052+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2053+ (934 << 20) | (934 << 10) | (934));
2054+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2055+ (966 << 20) | (966 << 10) | (966));
2056+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2057+ (996 << 20) | (996 << 10) | (996));
2058+
2059+
2060+ /*
2061+ For Interrupt Restore only All types of write through
2062+ regsiters should be write header data to hardware at
2063+ least before it can restore. H/W will automatically
2064+ record the header to write through state buffer for
2065+ resture usage.
2066+ By Jaren:
2067+ HParaType = 8'h03, HParaSubType = 8'h00
2068+ 8'h11
2069+ 8'h12
2070+ 8'h14
2071+ 8'h15
2072+ 8'h17
2073+ HParaSubType 8'h12, 8'h15 is initialized.
2074+ [HWLimit]
2075+ 1. All these write through registers can't be partial
2076+ update.
2077+ 2. All these write through must be AGP command
2078+ 16 entries : 4 128-bit data */
2079+
2080+ /* Initialize INV_ParaSubType_TexPal */
2081+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2082+ (0x00030000 | 0x00000000));
2083+ for (i = 0; i < 16; i++) {
2084+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2085+ 0x00000000);
2086+ }
2087+
2088+ /* Initialize INV_ParaSubType_4X4Cof */
2089+ /* 32 entries : 8 128-bit data */
2090+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2091+ (0x00030000 | 0x11000000));
2092+ for (i = 0; i < 32; i++) {
2093+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2094+ 0x00000000);
2095+ }
2096+
2097+ /* Initialize INV_ParaSubType_StipPal */
2098+ /* 5 entries : 2 128-bit data */
2099+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2100+ (0x00030000 | 0x14000000));
2101+ for (i = 0; i < (5+3); i++) {
2102+ SetMMIORegister(dev_priv->mmio->handle,
2103+ 0x440, 0x00000000);
2104+ }
2105+
2106+ /* primitive setting & vertex format*/
2107+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2108+ (0x00040000 | 0x14000000));
2109+ for (i = 0; i < 52; i++) {
2110+ SetMMIORegister(dev_priv->mmio->handle,
2111+ 0x440, ((unsigned int) i << 24));
2112+ }
2113+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2114+ 0x00fe0000);
2115+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2116+ 0x4000840f);
2117+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2118+ 0x47000400);
2119+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2120+ 0x44000000);
2121+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2122+ 0x46000000);
2123+
2124+ /* setting Misconfig*/
2125+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2126+ 0x00fe0000);
2127+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2128+ 0x00001004);
2129+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2130+ 0x0800004b);
2131+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2132+ 0x0a000049);
2133+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2134+ 0x0b0000fb);
2135+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2136+ 0x0c000001);
2137+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2138+ 0x0d0000cb);
2139+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2140+ 0x0e000009);
2141+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2142+ 0x10000000);
2143+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2144+ 0x110000ff);
2145+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2146+ 0x12000000);
2147+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2148+ 0x130000db);
2149+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2150+ 0x14000000);
2151+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2152+ 0x15000000);
2153+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2154+ 0x16000000);
2155+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2156+ 0x17000000);
2157+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2158+ 0x18000000);
2159+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2160+ 0x19000000);
2161+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2162+ 0x20000000);
2163+ } else if (dev_priv->chip_sub_index == CHIP_H6S2) {
2164+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2165+ 0x00010000);
2166+ for (i = 0; i <= 0x9A; i++) {
2167+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2168+ (unsigned int) i << 24);
2169+ }
2170+
2171+ /* Initial Texture Stage Setting*/
2172+ for (StageOfTexture = 0; StageOfTexture <= 0xf;
2173+ StageOfTexture++) {
2174+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2175+ (0x00020000 | 0x00000000 |
2176+ (StageOfTexture & 0xf)<<24));
2177+ for (i = 0 ; i <= 0x30 ; i++) {
2178+ SetMMIORegister(dev_priv->mmio->handle,
2179+ 0x440, (unsigned int) i << 24);
2180+ }
2181+ }
2182+
2183+ /* Initial Texture Sampler Setting*/
2184+ for (StageOfTexture = 0; StageOfTexture <= 0xf;
2185+ StageOfTexture++) {
2186+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2187+ (0x00020000 | 0x20000000 |
2188+ (StageOfTexture & 0xf)<<24));
2189+ for (i = 0 ; i <= 0x36 ; i++) {
2190+ SetMMIORegister(dev_priv->mmio->handle,
2191+ 0x440, (unsigned int) i << 24);
2192+ }
2193+ }
2194+
2195+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2196+ (0x00020000 | 0xfe000000));
2197+ for (i = 0 ; i <= 0x13 ; i++) {
2198+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2199+ (unsigned int) i << 24);
2200+ /* *((unsigned int volatile*)(pMapIOPort+
2201+ HC_REG_Hpara0)) =((unsigned int) i << 24);*/
2202+ }
2203+
2204+ /* Initial Gamma Table Setting*/
2205+ /* Initial Gamma Table Setting*/
2206+ /* 5 + 4 = 9 (12) dwords*/
2207+ /* sRGB texture is not directly support by
2208+ H3 hardware.*/
2209+ /* We have to set the deGamma table for texture
2210+ sampling.*/
2211+
2212+ /* degamma table*/
2213+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2214+ (0x00030000 | 0x15000000));
2215+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2216+ (0x40000000 | (30 << 20) | (15 << 10) | (5)));
2217+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2218+ ((119 << 20) | (81 << 10) | (52)));
2219+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2220+ ((283 << 20) | (219 << 10) | (165)));
2221+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2222+ ((535 << 20) | (441 << 10) | (357)));
2223+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2224+ ((119 << 20) | (884 << 20) | (757 << 10)
2225+ | (640)));
2226+
2227+ /* gamma table*/
2228+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2229+ (0x00030000 | 0x17000000));
2230+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2231+ (0x40000000 | (13 << 20) | (13 << 10) | (13)));
2232+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2233+ (0x40000000 | (26 << 20) | (26 << 10) | (26)));
2234+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2235+ (0x40000000 | (39 << 20) | (39 << 10) | (39)));
2236+ SetMMIORegister(dev_priv->mmio->handle,
2237+ 0x440, ((51 << 20) | (51 << 10) | (51)));
2238+ SetMMIORegister(dev_priv->mmio->handle,
2239+ 0x440, ((71 << 20) | (71 << 10) | (71)));
2240+ SetMMIORegister(dev_priv->mmio->handle,
2241+ 0x440, (87 << 20) | (87 << 10) | (87));
2242+ SetMMIORegister(dev_priv->mmio->handle,
2243+ 0x440, (113 << 20) | (113 << 10) | (113));
2244+ SetMMIORegister(dev_priv->mmio->handle,
2245+ 0x440, (135 << 20) | (135 << 10) | (135));
2246+ SetMMIORegister(dev_priv->mmio->handle,
2247+ 0x440, (170 << 20) | (170 << 10) | (170));
2248+ SetMMIORegister(dev_priv->mmio->handle,
2249+ 0x440, (199 << 20) | (199 << 10) | (199));
2250+ SetMMIORegister(dev_priv->mmio->handle,
2251+ 0x440, (246 << 20) | (246 << 10) | (246));
2252+ SetMMIORegister(dev_priv->mmio->handle,
2253+ 0x440, (284 << 20) | (284 << 10) | (284));
2254+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2255+ (317 << 20) | (317 << 10) | (317));
2256+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2257+ (347 << 20) | (347 << 10) | (347));
2258+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2259+ (373 << 20) | (373 << 10) | (373));
2260+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2261+ (398 << 20) | (398 << 10) | (398));
2262+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2263+ (442 << 20) | (442 << 10) | (442));
2264+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2265+ (481 << 20) | (481 << 10) | (481));
2266+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2267+ (517 << 20) | (517 << 10) | (517));
2268+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2269+ (550 << 20) | (550 << 10) | (550));
2270+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2271+ (609 << 20) | (609 << 10) | (609));
2272+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2273+ (662 << 20) | (662 << 10) | (662));
2274+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2275+ (709 << 20) | (709 << 10) | (709));
2276+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2277+ (753 << 20) | (753 << 10) | (753));
2278+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2279+ (794 << 20) | (794 << 10) | (794));
2280+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2281+ (832 << 20) | (832 << 10) | (832));
2282+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2283+ (868 << 20) | (868 << 10) | (868));
2284+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2285+ (902 << 20) | (902 << 10) | (902));
2286+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2287+ (934 << 20) | (934 << 10) | (934));
2288+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2289+ (966 << 20) | (966 << 10) | (966));
2290+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2291+ (996 << 20) | (996 << 10) | (996));
2292+
2293+
2294+ /* For Interrupt Restore only
2295+ All types of write through regsiters should be write
2296+ header data to hardware at least before it can restore.
2297+ H/W will automatically record the header to write
2298+ through state buffer for restureusage.
2299+ By Jaren:
2300+ HParaType = 8'h03, HParaSubType = 8'h00
2301+ 8'h11
2302+ 8'h12
2303+ 8'h14
2304+ 8'h15
2305+ 8'h17
2306+ HParaSubType 8'h12, 8'h15 is initialized.
2307+ [HWLimit]
2308+ 1. All these write through registers can't be partial
2309+ update.
2310+ 2. All these write through must be AGP command
2311+ 16 entries : 4 128-bit data */
2312+
2313+ /* Initialize INV_ParaSubType_TexPal */
2314+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2315+ (0x00030000 | 0x00000000));
2316+ for (i = 0; i < 16; i++) {
2317+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2318+ 0x00000000);
2319+ }
2320+
2321+ /* Initialize INV_ParaSubType_4X4Cof */
2322+ /* 32 entries : 8 128-bit data */
2323+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2324+ (0x00030000 | 0x11000000));
2325+ for (i = 0; i < 32; i++) {
2326+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2327+ 0x00000000);
2328+ }
2329+
2330+ /* Initialize INV_ParaSubType_StipPal */
2331+ /* 5 entries : 2 128-bit data */
2332+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2333+ (0x00030000 | 0x14000000));
2334+ for (i = 0; i < (5+3); i++) {
2335+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2336+ 0x00000000);
2337+ }
2338+
2339+ /* primitive setting & vertex format*/
2340+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2341+ (0x00040000));
2342+ for (i = 0; i <= 0x62; i++) {
2343+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2344+ ((unsigned int) i << 24));
2345+ }
2346+
2347+ /*ParaType 0xFE - Configure and Misc Setting*/
2348+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2349+ (0x00fe0000));
2350+ for (i = 0; i <= 0x47; i++) {
2351+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2352+ ((unsigned int) i << 24));
2353+ }
2354+ /*ParaType 0x11 - Frame Buffer Auto-Swapping and
2355+ Command Regulator Misc*/
2356+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2357+ (0x00110000));
2358+ for (i = 0; i <= 0x20; i++) {
2359+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2360+ ((unsigned int) i << 24));
2361+ }
2362+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2363+ 0x00fe0000);
2364+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2365+ 0x4000840f);
2366+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2367+ 0x47000404);
2368+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2369+ 0x44000000);
2370+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2371+ 0x46000005);
2372+
2373+ /* setting Misconfig*/
2374+ SetMMIORegister(dev_priv->mmio->handle, 0x43C,
2375+ 0x00fe0000);
2376+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2377+ 0x00001004);
2378+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2379+ 0x08000249);
2380+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2381+ 0x0a0002c9);
2382+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2383+ 0x0b0002fb);
2384+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2385+ 0x0c000000);
2386+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2387+ 0x0d0002cb);
2388+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2389+ 0x0e000009);
2390+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2391+ 0x10000049);
2392+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2393+ 0x110002ff);
2394+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2395+ 0x12000008);
2396+ SetMMIORegister(dev_priv->mmio->handle, 0x440,
2397+ 0x130002db);
2398+ }
2399+}
2400+
2401+int via_chrome9_drm_resume(struct pci_dev *pci)
2402+{
2403+ struct drm_device *dev = (struct drm_device *)pci_get_drvdata(pci);
2404+ struct drm_via_chrome9_private *dev_priv =
2405+ (struct drm_via_chrome9_private *)dev->dev_private;
2406+
2407+ if (!dev_priv->initialized)
2408+ return 0;
2409+
2410+ Initialize3DEngine(dev_priv);
2411+
2412+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS, 0x00110000);
2413+ if (dev_priv->chip_sub_index == CHIP_H6S2) {
2414+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
2415+ 0x06000000);
2416+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
2417+ 0x07100000);
2418+ } else{
2419+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
2420+ 0x02000000);
2421+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
2422+ 0x03100000);
2423+ }
2424+
2425+
2426+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_TRANS,
2427+ INV_ParaType_PreCR);
2428+ SetMMIORegister(dev_priv->mmio->handle, INV_REG_CR_BEGIN,
2429+ INV_SubA_HSetRBGID | INV_HSetRBGID_CR);
2430+
2431+ if (dev_priv->chip_sub_index == CHIP_H6S2) {
2432+ unsigned int i;
2433+ /* Here restore SR66~SR6F SR79~SR7B */
2434+ for (i = 0; i < 10; i++) {
2435+ SetMMIORegisterU8(dev_priv->mmio->handle,
2436+ 0x83c4, 0x66 + i);
2437+ SetMMIORegisterU8(dev_priv->mmio->handle,
2438+ 0x83c5, dev_priv->gti_backup[i]);
2439+ }
2440+
2441+ for (i = 0; i < 3; i++) {
2442+ SetMMIORegisterU8(dev_priv->mmio->handle,
2443+ 0x83c4, 0x79 + i);
2444+ SetMMIORegisterU8(dev_priv->mmio->handle,
2445+ 0x83c5, dev_priv->gti_backup[10 + i]);
2446+ }
2447+ }
2448+
2449+ via_chrome9_dma_init_inv(dev);
2450+
2451+ return 0;
2452+}
2453+
2454+int via_chrome9_drm_suspend(struct pci_dev *pci,
2455+ pm_message_t state)
2456+{
2457+ int i;
2458+ struct drm_device *dev = (struct drm_device *)pci_get_drvdata(pci);
2459+ struct drm_via_chrome9_private *dev_priv =
2460+ (struct drm_via_chrome9_private *)dev->dev_private;
2461+
2462+ if (!dev_priv->initialized)
2463+ return 0;
2464+
2465+ if (dev_priv->chip_sub_index != CHIP_H6S2)
2466+ return 0;
2467+
2468+ /* Save registers from SR66~SR6F */
2469+ for (i = 0; i < 10; i++) {
2470+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x66 + i);
2471+ dev_priv->gti_backup[i] =
2472+ GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
2473+ }
2474+
2475+ /* Save registers from SR79~SR7B */
2476+ for (i = 0; i < 3; i++) {
2477+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x79 + i);
2478+ dev_priv->gti_backup[10 + i] =
2479+ GetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5);
2480+ }
2481+
2482+ return 0;
2483+}
2484+
2485+int via_chrome9_driver_load(struct drm_device *dev,
2486+ unsigned long chipset)
2487+{
2488+ struct drm_via_chrome9_private *dev_priv;
2489+ int ret = 0;
2490+ static int associate;
2491+
2492+ if (!associate) {
2493+ pci_set_drvdata(dev->pdev, dev);
2494+ dev->pdev->driver = &dev->driver->pci_driver;
2495+ associate = 1;
2496+ }
2497+
2498+ dev->counters += 4;
2499+ dev->types[6] = _DRM_STAT_IRQ;
2500+ dev->types[7] = _DRM_STAT_PRIMARY;
2501+ dev->types[8] = _DRM_STAT_SECONDARY;
2502+ dev->types[9] = _DRM_STAT_DMA;
2503+
2504+ dev_priv = drm_calloc(1, sizeof(struct drm_via_chrome9_private),
2505+ DRM_MEM_DRIVER);
2506+ if (dev_priv == NULL)
2507+ return -ENOMEM;
2508+
2509+ /* Clear */
2510+ memset(dev_priv, 0, sizeof(struct drm_via_chrome9_private));
2511+
2512+ dev_priv->dev = dev;
2513+ dev->dev_private = (void *)dev_priv;
2514+
2515+ dev_priv->chip_index = chipset;
2516+
2517+ ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
2518+ if (ret)
2519+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
2520+ return ret;
2521+}
2522+
2523+int via_chrome9_driver_unload(struct drm_device *dev)
2524+{
2525+ struct drm_via_chrome9_private *dev_priv = dev->dev_private;
2526+
2527+ drm_sman_takedown(&dev_priv->sman);
2528+
2529+ drm_free(dev_priv, sizeof(struct drm_via_chrome9_private),
2530+ DRM_MEM_DRIVER);
2531+
2532+ dev->dev_private = 0;
2533+
2534+ return 0;
2535+}
2536+
2537+static int via_chrome9_initialize(struct drm_device *dev,
2538+ struct drm_via_chrome9_init *init)
2539+{
2540+ struct drm_via_chrome9_private *dev_priv =
2541+ (struct drm_via_chrome9_private *)dev->dev_private;
2542+
2543+ dev_priv->chip_agp = init->chip_agp;
2544+ dev_priv->chip_index = init->chip_index;
2545+ dev_priv->chip_sub_index = init->chip_sub_index;
2546+
2547+ dev_priv->usec_timeout = init->usec_timeout;
2548+ dev_priv->front_offset = init->front_offset;
2549+ dev_priv->back_offset = init->back_offset >>
2550+ VIA_CHROME9DRM_VIDEO_STARTADDRESS_ALIGNMENT <<
2551+ VIA_CHROME9DRM_VIDEO_STARTADDRESS_ALIGNMENT;
2552+ dev_priv->available_fb_size = init->available_fb_size -
2553+ (init->available_fb_size %
2554+ (1 << VIA_CHROME9DRM_VIDEO_STARTADDRESS_ALIGNMENT));
2555+ dev_priv->depth_offset = init->depth_offset;
2556+
2557+ /* Find all the map added first, doing this is necessary to
2558+ intialize hw */
2559+ if (via_chrome9_map_init(dev, init)) {
2560+ DRM_ERROR("function via_chrome9_map_init ERROR !\n");
2561+ goto error;
2562+ }
2563+
2564+ /* Necessary information has been gathered for initialize hw */
2565+ if (via_chrome9_hw_init(dev, init)) {
2566+ DRM_ERROR("function via_chrome9_hw_init ERROR !\n");
2567+ goto error;
2568+ }
2569+
2570+ /* After hw intialization, we have kown whether to use agp
2571+ or to use pcie for texture */
2572+ if (via_chrome9_heap_management_init(dev, init)) {
2573+ DRM_ERROR("function \
2574+ via_chrome9_heap_management_init ERROR !\n");
2575+ goto error;
2576+ }
2577+
2578+ dev_priv->initialized = 1;
2579+
2580+ return 0;
2581+
2582+error:
2583+ /* all the error recover has been processed in relevant function,
2584+ so here just return error */
2585+ return -EINVAL;
2586+}
2587+
2588+static void via_chrome9_cleanup(struct drm_device *dev,
2589+ struct drm_via_chrome9_init *init)
2590+{
2591+ struct drm_via_chrome9_DMA_manager *lpcmDMAManager = NULL;
2592+ struct drm_via_chrome9_private *dev_priv =
2593+ (struct drm_via_chrome9_private *)dev->dev_private;
2594+ DRM_DEBUG("function via_chrome9_cleanup run!\n");
2595+
2596+ if (!dev_priv)
2597+ return ;
2598+
2599+ lpcmDMAManager =
2600+ (struct drm_via_chrome9_DMA_manager *)dev_priv->dma_manager;
2601+ if (dev_priv->pcie_vmalloc_nocache) {
2602+ vfree((void *)dev_priv->pcie_vmalloc_nocache);
2603+ dev_priv->pcie_vmalloc_nocache = 0;
2604+ if (lpcmDMAManager)
2605+ lpcmDMAManager->addr_linear = NULL;
2606+ }
2607+
2608+ if (dev_priv->pagetable_map.pagetable_handle) {
2609+ iounmap(dev_priv->pagetable_map.pagetable_handle);
2610+ dev_priv->pagetable_map.pagetable_handle = NULL;
2611+ }
2612+
2613+ if (lpcmDMAManager && lpcmDMAManager->addr_linear) {
2614+ iounmap(lpcmDMAManager->addr_linear);
2615+ lpcmDMAManager->addr_linear = NULL;
2616+ }
2617+
2618+ kfree(lpcmDMAManager);
2619+ dev_priv->dma_manager = NULL;
2620+
2621+ if (dev_priv->event_tag_info) {
2622+ vfree(dev_priv->event_tag_info);
2623+ dev_priv->event_tag_info = NULL;
2624+ }
2625+
2626+ if (dev_priv->bci_buffer) {
2627+ vfree(dev_priv->bci_buffer);
2628+ dev_priv->bci_buffer = NULL;
2629+ }
2630+
2631+ via_chrome9_memory_destroy_heap(dev, dev_priv);
2632+
2633+ /* After cleanup, it should to set the value to null */
2634+ dev_priv->sarea = dev_priv->mmio = dev_priv->hostBlt =
2635+ dev_priv->fb = dev_priv->front = dev_priv->back =
2636+ dev_priv->depth = dev_priv->agp_tex =
2637+ dev_priv->shadow_map.shadow = 0;
2638+ dev_priv->sarea_priv = 0;
2639+ dev_priv->initialized = 0;
2640+}
2641+
2642+/*
2643+Do almost everything intialize here,include:
2644+1.intialize all addmaps in private data structure
2645+2.intialize memory heap management for video agp/pcie
2646+3.intialize hw for dma(pcie/agp) function
2647+
2648+Note:all this function will dispatch into relevant function
2649+*/
2650+int via_chrome9_ioctl_init(struct drm_device *dev, void *data,
2651+ struct drm_file *file_priv)
2652+{
2653+ struct drm_via_chrome9_init *init = (struct drm_via_chrome9_init *)data;
2654+
2655+ switch (init->func) {
2656+ case VIA_CHROME9_INIT:
2657+ if (via_chrome9_initialize(dev, init)) {
2658+ DRM_ERROR("function via_chrome9_initialize error\n");
2659+ return -1;
2660+ }
2661+ via_chrome9_filepriv_v4l = (void *)file_priv;
2662+ via_chrome9_dev_v4l = (void *)dev;
2663+ break;
2664+
2665+ case VIA_CHROME9_CLEANUP:
2666+ via_chrome9_cleanup(dev, init);
2667+ via_chrome9_filepriv_v4l = 0;
2668+ via_chrome9_dev_v4l = 0;
2669+ break;
2670+
2671+ default:
2672+ return -1;
2673+ }
2674+
2675+ return 0;
2676+}
2677+
2678+int via_chrome9_ioctl_allocate_event_tag(struct drm_device *dev,
2679+ void *data, struct drm_file *file_priv)
2680+{
2681+ struct drm_via_chrome9_event_tag *event_tag = data;
2682+ struct drm_via_chrome9_private *dev_priv =
2683+ (struct drm_via_chrome9_private *)dev->dev_private;
2684+ struct drm_clb_event_tag_info *event_tag_info =
2685+ dev_priv->event_tag_info;
2686+ unsigned int *event_addr = 0, i = 0;
2687+
2688+ for (i = 0; i < NUMBER_OF_EVENT_TAGS; i++) {
2689+ if (!event_tag_info->usage[i])
2690+ break;
2691+ }
2692+
2693+ if (i < NUMBER_OF_EVENT_TAGS) {
2694+ event_tag_info->usage[i] = 1;
2695+ event_tag->event_offset = i;
2696+ event_tag->last_sent_event_value.event_low = 0;
2697+ event_tag->current_event_value.event_low = 0;
2698+ event_addr = event_tag_info->linear_address +
2699+ event_tag->event_offset * 4;
2700+ *event_addr = 0;
2701+ return 0;
2702+ } else {
2703+ return -7;
2704+ }
2705+
2706+ return 0;
2707+}
2708+
2709+int via_chrome9_ioctl_free_event_tag(struct drm_device *dev,
2710+ void *data, struct drm_file *file_priv)
2711+{
2712+ struct drm_via_chrome9_private *dev_priv =
2713+ (struct drm_via_chrome9_private *)dev->dev_private;
2714+ struct drm_clb_event_tag_info *event_tag_info =
2715+ dev_priv->event_tag_info;
2716+ struct drm_via_chrome9_event_tag *event_tag = data;
2717+
2718+ event_tag_info->usage[event_tag->event_offset] = 0;
2719+ return 0;
2720+}
2721+
2722+void via_chrome9_lastclose(struct drm_device *dev)
2723+{
2724+ via_chrome9_cleanup(dev, 0);
2725+ return ;
2726+}
2727+
2728+static int via_chrome9_do_wait_vblank(struct drm_via_chrome9_private
2729+ *dev_priv)
2730+{
2731+ int i;
2732+
2733+ for (i = 0; i < dev_priv->usec_timeout; i++) {
2734+ VIA_CHROME9_WRITE8(0x83d4, 0x34);
2735+ if ((VIA_CHROME9_READ8(0x83d5)) & 0x8)
2736+ return 0;
2737+ __via_chrome9ke_udelay(1);
2738+ }
2739+
2740+ return -1;
2741+}
2742+
2743+void via_chrome9_preclose(struct drm_device *dev, struct drm_file *file_priv)
2744+{
2745+ struct drm_via_chrome9_private *dev_priv =
2746+ (struct drm_via_chrome9_private *) dev->dev_private;
2747+ struct drm_via_chrome9_sarea *sarea_priv = NULL;
2748+
2749+ if (!dev_priv)
2750+ return ;
2751+
2752+ sarea_priv = dev_priv->sarea_priv;
2753+ if (!sarea_priv)
2754+ return ;
2755+
2756+ if ((sarea_priv->page_flip == 1) &&
2757+ (sarea_priv->current_page != VIA_CHROME9_FRONT)) {
2758+ __volatile__ unsigned long *bci_base;
2759+ if (via_chrome9_do_wait_vblank(dev_priv))
2760+ return;
2761+
2762+ bci_base = (__volatile__ unsigned long *)(dev_priv->bci);
2763+
2764+ BCI_SET_STREAM_REGISTER(bci_base, 0x81c4, 0xc0000000);
2765+ BCI_SET_STREAM_REGISTER(bci_base, 0x81c0,
2766+ dev_priv->front_offset);
2767+ BCI_SEND(bci_base, 0x64000000);/* wait vsync */
2768+
2769+ sarea_priv->current_page = VIA_CHROME9_FRONT;
2770+ }
2771+}
2772+
2773+int via_chrome9_is_agp(struct drm_device *dev)
2774+{
2775+ /* filter out pcie group which has no AGP device */
2776+ if (dev->pci_device == 0x1122 || dev->pci_device == 0x5122) {
2777+ dev->driver->driver_features &=
2778+ ~(DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_REQUIRE_AGP);
2779+ return 0;
2780+ }
2781+ return 1;
2782+}
2783+
2784--- /dev/null
2785+++ b/drivers/gpu/drm/via_chrome9/via_chrome9_drm.h
2786@@ -0,0 +1,443 @@
2787+/*
2788+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
2789+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
2790+ *
2791+ * Permission is hereby granted, free of charge, to any person
2792+ * obtaining a copy of this software and associated documentation
2793+ * files (the "Software"), to deal in the Software without
2794+ * restriction, including without limitation the rights to use,
2795+ * copy, modify, merge, publish, distribute, sub license,
2796+ * and/or sell copies of the Software, and to permit persons to
2797+ * whom the Software is furnished to do so, subject to the
2798+ * following conditions:
2799+ *
2800+ * The above copyright notice and this permission notice
2801+ * (including the next paragraph) shall be included in all
2802+ * copies or substantial portions of the Software.
2803+ *
2804+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2805+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
2806+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2807+ * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
2808+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2809+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
2810+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
2811+ * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2812+ */
2813+#ifndef _VIA_CHROME9_DRM_H_
2814+#define _VIA_CHROME9_DRM_H_
2815+
2816+/* WARNING: These defines must be the same as what the Xserver uses.
2817+ * if you change them, you must change the defines in the Xserver.
2818+ */
2819+
2820+#ifndef _VIA_CHROME9_DEFINES_
2821+#define _VIA_CHROME9_DEFINES_
2822+
2823+#ifndef __KERNEL__
2824+#include "via_drmclient.h"
2825+#endif
2826+
2827+#define VIA_CHROME9_NR_SAREA_CLIPRECTS 8
2828+#define VIA_CHROME9_NR_XVMC_PORTS 10
2829+#define VIA_CHROME9_NR_XVMC_LOCKS 5
2830+#define VIA_CHROME9_MAX_CACHELINE_SIZE 64
2831+#define XVMCLOCKPTR(saPriv,lockNo) \
2832+ ((__volatile__ struct drm_hw_lock *) \
2833+ (((((unsigned long) (saPriv)->XvMCLockArea) + \
2834+ (VIA_CHROME9_MAX_CACHELINE_SIZE - 1)) & \
2835+ ~(VIA_CHROME9_MAX_CACHELINE_SIZE - 1)) + \
2836+ VIA_CHROME9_MAX_CACHELINE_SIZE*(lockNo)))
2837+
2838+/* Each region is a minimum of 64k, and there are at most 64 of them.
2839+ */
2840+#define VIA_CHROME9_NR_TEX_REGIONS 64
2841+#define VIA_CHROME9_LOG_MIN_TEX_REGION_SIZE 16
2842+#endif
2843+
2844+#define VIA_CHROME9_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
2845+#define VIA_CHROME9_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
2846+#define VIA_CHROME9_UPLOAD_CTX 0x4
2847+#define VIA_CHROME9_UPLOAD_BUFFERS 0x8
2848+#define VIA_CHROME9_UPLOAD_TEX0 0x10
2849+#define VIA_CHROME9_UPLOAD_TEX1 0x20
2850+#define VIA_CHROME9_UPLOAD_CLIPRECTS 0x40
2851+#define VIA_CHROME9_UPLOAD_ALL 0xff
2852+
2853+/* VIA_CHROME9 specific ioctls */
2854+#define DRM_VIA_CHROME9_ALLOCMEM 0x00
2855+#define DRM_VIA_CHROME9_FREEMEM 0x01
2856+#define DRM_VIA_CHROME9_FREE 0x02
2857+#define DRM_VIA_CHROME9_ALLOCATE_EVENT_TAG 0x03
2858+#define DRM_VIA_CHROME9_FREE_EVENT_TAG 0x04
2859+#define DRM_VIA_CHROME9_ALLOCATE_APERTURE 0x05
2860+#define DRM_VIA_CHROME9_FREE_APERTURE 0x06
2861+#define DRM_VIA_CHROME9_ALLOCATE_VIDEO_MEM 0x07
2862+#define DRM_VIA_CHROME9_FREE_VIDEO_MEM 0x08
2863+#define DRM_VIA_CHROME9_WAIT_CHIP_IDLE 0x09
2864+#define DRM_VIA_CHROME9_PROCESS_EXIT 0x0A
2865+#define DRM_VIA_CHROME9_RESTORE_PRIMARY 0x0B
2866+#define DRM_VIA_CHROME9_FLUSH_CACHE 0x0C
2867+#define DRM_VIA_CHROME9_INIT 0x0D
2868+#define DRM_VIA_CHROME9_FLUSH 0x0E
2869+#define DRM_VIA_CHROME9_CHECKVIDMEMSIZE 0x0F
2870+#define DRM_VIA_CHROME9_PCIEMEMCTRL 0x10
2871+#define DRM_VIA_CHROME9_AUTH_MAGIC 0x11
2872+#define DRM_VIA_CHROME9_GET_PCI_ID 0x12
2873+#define DRM_VIA_CHROME9_INIT_JUDGE 0x16
2874+#define DRM_VIA_CHROME9_DMA 0x17
2875+
2876+#define DRM_IOCTL_VIA_CHROME9_INIT \
2877+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_INIT, \
2878+ struct drm_via_chrome9_init)
2879+#define DRM_IOCTL_VIA_CHROME9_FLUSH \
2880+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FLUSH, \
2881+ struct drm_via_chrome9_flush)
2882+#define DRM_IOCTL_VIA_CHROME9_FREE \
2883+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FREE, int)
2884+#define DRM_IOCTL_VIA_CHROME9_ALLOCATE_EVENT_TAG \
2885+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_ALLOCATE_EVENT_TAG, \
2886+ struct drm_event_via_chrome9_tag)
2887+#define DRM_IOCTL_VIA_CHROME9_FREE_EVENT_TAG \
2888+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FREE_EVENT_TAG, \
2889+ struct drm_event_via_chrome9_tag)
2890+#define DRM_IOCTL_VIA_CHROME9_ALLOCATE_APERTURE \
2891+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_ALLOCATE_APERTURE, \
2892+ struct drm_via_chrome9_aperture)
2893+#define DRM_IOCTL_VIA_CHROME9_FREE_APERTURE \
2894+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FREE_APERTURE, \
2895+ struct drm_via_chrome9_aperture)
2896+#define DRM_IOCTL_VIA_CHROME9_ALLOCATE_VIDEO_MEM \
2897+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_ALLOCATE_VIDEO_MEM, \
2898+ struct drm_via_chrome9_memory_alloc)
2899+#define DRM_IOCTL_VIA_CHROME9_FREE_VIDEO_MEM \
2900+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FREE_VIDEO_MEM, \
2901+ struct drm_via_chrome9_memory_alloc)
2902+#define DRM_IOCTL_VIA_CHROME9_WAIT_CHIP_IDLE \
2903+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_WAIT_CHIP_IDLE, int)
2904+#define DRM_IOCTL_VIA_CHROME9_PROCESS_EXIT \
2905+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_PROCESS_EXIT, int)
2906+#define DRM_IOCTL_VIA_CHROME9_RESTORE_PRIMARY \
2907+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_RESTORE_PRIMARY, int)
2908+#define DRM_IOCTL_VIA_CHROME9_FLUSH_CACHE \
2909+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FLUSH_CACHE, int)
2910+#define DRM_IOCTL_VIA_CHROME9_ALLOCMEM \
2911+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_ALLOCMEM, int)
2912+#define DRM_IOCTL_VIA_CHROME9_FREEMEM \
2913+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_FREEMEM, int)
2914+#define DRM_IOCTL_VIA_CHROME9_CHECK_VIDMEM_SIZE \
2915+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_CHECKVIDMEMSIZE, \
2916+ struct drm_via_chrome9_memory_alloc)
2917+#define DRM_IOCTL_VIA_CHROME9_PCIEMEMCTRL \
2918+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_PCIEMEMCTRL,\
2919+ drm_via_chrome9_pciemem_ctrl_t)
2920+#define DRM_IOCTL_VIA_CHROME9_AUTH_MAGIC \
2921+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_AUTH_MAGIC, drm_auth_t)
2922+#define DRM_IOCTL_VIA_CHROME9_GET_PCI_ID \
2923+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_CHROME9_GET_PCI_ID, \
2924+ struct get_pci_id_struct)
2925+#define DRM_IOCTL_VIA_CHROME9_INIT_JUDGE \
2926+ DRM_IOR(DRM_COMMAND_BASE + DRM_VIA_CHROME9_INIT_JUDGE, int)
2927+#define DRM_IOCTL_VIA_CHROME9_DMA \
2928+ DRM_IO(DRM_COMMAND_BASE + DRM_VIA_CHROME9_DMA, int)
2929+
2930+enum S3GCHIPIDS {
2931+ CHIP_UNKNOWN = -1,
2932+ CHIP_CMODEL, /*Model for any chip. */
2933+ CHIP_CLB, /*Columbia */
2934+ CHIP_DST, /*Destination */
2935+ CHIP_CSR, /*Castlerock */
2936+ CHIP_INV, /*Innovation (H3) */
2937+ CHIP_H5, /*Innovation (H5) */
2938+ CHIP_H5S1, /*Innovation (H5S1) */
2939+ CHIP_H6S2, /*Innovation (H6S2) */
2940+ CHIP_CMS, /*Columbia MS */
2941+ CHIP_METRO, /*Metropolis */
2942+ CHIP_MANHATTAN, /*manhattan */
2943+ CHIP_MATRIX, /*matrix */
2944+ CHIP_EVO, /*change for GCC 4.1 -add- 07.02.12*/
2945+ CHIP_H6S1, /*Innovation (H6S1)*/
2946+ CHIP_DST2, /*Destination-2 */
2947+ CHIP_LAST /*Maximum number of chips supported. */
2948+};
2949+
2950+enum VIA_CHROME9CHIPBUS {
2951+ CHIP_PCI,
2952+ CHIP_AGP,
2953+ CHIP_PCIE
2954+};
2955+
2956+struct drm_via_chrome9_init {
2957+ enum {
2958+ VIA_CHROME9_INIT = 0x01,
2959+ VIA_CHROME9_CLEANUP = 0x02
2960+ } func;
2961+ int chip_agp;
2962+ int chip_index;
2963+ int chip_sub_index;
2964+ int usec_timeout;
2965+ unsigned int sarea_priv_offset;
2966+ unsigned int fb_cpp;
2967+ unsigned int front_offset;
2968+ unsigned int back_offset;
2969+ unsigned int depth_offset;
2970+ unsigned int mmio_handle;
2971+ unsigned int dma_handle;
2972+ unsigned int fb_handle;
2973+ unsigned int front_handle;
2974+ unsigned int back_handle;
2975+ unsigned int depth_handle;
2976+
2977+ unsigned int fb_tex_offset;
2978+ unsigned int fb_tex_size;
2979+
2980+ unsigned int agp_tex_size;
2981+ unsigned int agp_tex_handle;
2982+ unsigned int shadow_size;
2983+ unsigned int shadow_handle;
2984+ unsigned int garttable_size;
2985+ unsigned int garttable_offset;
2986+ unsigned long available_fb_size;
2987+ unsigned long fb_base_address;
2988+ unsigned int DMA_size;
2989+ unsigned long DMA_phys_address;
2990+ enum {
2991+ AGP_RING_BUFFER,
2992+ AGP_DOUBLE_BUFFER,
2993+ AGP_DISABLED
2994+ } agp_type;
2995+ unsigned int hostBlt_handle;
2996+};
2997+
2998+enum dma_cmd_type {
2999+ flush_bci = 0,
3000+ flush_bci_and_wait,
3001+ dma_kickoff,
3002+ flush_dma_buffer,
3003+ flush_dma_and_wait
3004+};
3005+
3006+struct drm_via_chrome9_flush {
3007+ enum dma_cmd_type dma_cmd_type;
3008+ /* command buffer index */
3009+ int cmd_idx;
3010+ /* command buffer offset */
3011+ int cmd_offset;
3012+ /* command dword size,command always from beginning */
3013+ int cmd_size;
3014+ /* if use dma kick off,it is dma kick off command */
3015+ unsigned long dma_kickoff[2];
3016+ /* user mode DMA buffer pointer */
3017+ unsigned int *usermode_dma_buf;
3018+};
3019+
3020+struct event_value {
3021+ int event_low;
3022+ int event_high;
3023+};
3024+
3025+struct drm_via_chrome9_event_tag {
3026+ unsigned int event_size; /* event tag size */
3027+ int event_offset; /* event tag id */
3028+ struct event_value last_sent_event_value;
3029+ struct event_value current_event_value;
3030+ int query_mask0;
3031+ int query_mask1;
3032+ int query_Id1;
3033+};
3034+
3035+/* Indices into buf.Setup where various bits of state are mirrored per
3036+ * context and per buffer. These can be fired at the card as a unit,
3037+ * or in a piecewise fashion as required.
3038+ */
3039+
3040+#define VIA_CHROME9_TEX_SETUP_SIZE 8
3041+
3042+/* Flags for clear ioctl
3043+ */
3044+#define VIA_CHROME9_FRONT 0x1
3045+#define VIA_CHROME9_BACK 0x2
3046+#define VIA_CHROME9_DEPTH 0x4
3047+#define VIA_CHROME9_STENCIL 0x8
3048+#define VIA_CHROME9_MEM_VIDEO 0 /* matches drm constant */
3049+#define VIA_CHROME9_MEM_AGP 1 /* matches drm constant */
3050+#define VIA_CHROME9_MEM_SYSTEM 2
3051+#define VIA_CHROME9_MEM_MIXED 3
3052+#define VIA_CHROME9_MEM_UNKNOWN 4
3053+
3054+struct drm_via_chrome9_agp {
3055+ uint32_t offset;
3056+ uint32_t size;
3057+};
3058+
3059+struct drm_via_chrome9_fb {
3060+ uint32_t offset;
3061+ uint32_t size;
3062+};
3063+
3064+struct drm_via_chrome9_mem {
3065+ uint32_t context;
3066+ uint32_t type;
3067+ uint32_t size;
3068+ unsigned long index;
3069+ unsigned long offset;
3070+};
3071+
3072+struct drm_via_chrome9_aperture {
3073+ /*IN: The frame buffer offset of the surface. */
3074+ int surface_offset;
3075+ /*IN: Surface pitch in byte, */
3076+ int pitch;
3077+ /*IN: Surface width in pixel */
3078+ int width;
3079+ /*IN: Surface height in pixel */
3080+ int height;
3081+ /*IN: Surface color format, Columbia has more color formats */
3082+ int color_format;
3083+ /*IN: Rotation degrees, only for Columbia */
3084+ int rotation_degree;
3085+ /*IN Is the PCIE Video, for MATRIX support NONLOCAL Aperture */
3086+ int isPCIEVIDEO;
3087+ /*IN: Is the surface tilled, only for Columbia */
3088+ int is_tiled;
3089+ /*IN: Only allocate apertur, not hardware setup. */
3090+ int allocate_only;
3091+ /* OUT: linear address for aperture */
3092+ unsigned int *aperture_linear_address;
3093+ /*OUT: The pitch of the aperture,for CPU write not for GE */
3094+ int aperture_pitch;
3095+ /*OUT: The index of the aperture */
3096+ int aperture_handle;
3097+ int apertureID;
3098+ /* always =0xAAAAAAAA */
3099+ /* Aligned surface's width(in pixel) */
3100+ int width_aligned;
3101+ /* Aligned surface's height(in pixel) */
3102+ int height_aligned;
3103+};
3104+
3105+/*
3106+ Some fileds of this data structure has no meaning now since
3107+ we have managed heap based on mechanism provided by DRM
3108+ Remain what it was to keep consistent with 3D driver interface.
3109+*/
3110+struct drm_via_chrome9_memory_alloc {
3111+ enum {
3112+ memory_heap_video = 0,
3113+ memory_heap_agp,
3114+ memory_heap_pcie_video,
3115+ memory_heap_pcie,
3116+ max_memory_heaps
3117+ } heap_type;
3118+ struct {
3119+ void *lpL1Node;
3120+ unsigned int alcL1Tag;
3121+ unsigned int usageCount;
3122+ unsigned int dwVersion;
3123+ unsigned int dwResHandle;
3124+ unsigned int dwProcessID;
3125+ } heap_info;
3126+ unsigned int flags;
3127+ unsigned int size;
3128+ unsigned int physaddress;
3129+ unsigned int offset;
3130+ unsigned int align;
3131+ void *linearaddress;
3132+};
3133+
3134+struct drm_via_chrome9_dma_init {
3135+ enum {
3136+ VIA_CHROME9_INIT_DMA = 0x01,
3137+ VIA_CHROME9_CLEANUP_DMA = 0x02,
3138+ VIA_CHROME9_DMA_INITIALIZED = 0x03
3139+ } func;
3140+
3141+ unsigned long offset;
3142+ unsigned long size;
3143+ unsigned long reg_pause_addr;
3144+};
3145+
3146+struct drm_via_chrome9_cmdbuffer {
3147+ char __user *buf;
3148+ unsigned long size;
3149+};
3150+
3151+/* Warning: If you change the SAREA structure you must change the Xserver
3152+ * structure as well */
3153+
3154+struct drm_via_chrome9_tex_region {
3155+ unsigned char next, prev; /* indices to form a circular LRU */
3156+ unsigned char inUse; /* owned by a client, or free? */
3157+ int age; /* tracked by clients to update local LRU's */
3158+};
3159+
3160+struct drm_via_chrome9_sarea {
3161+ int page_flip;
3162+ int current_page;
3163+ unsigned int req_drawable;/* the X drawable id */
3164+ unsigned int req_draw_buffer;/* VIA_CHROME9_FRONT or VIA_CHROME9_BACK */
3165+ /* Last context that uploaded state */
3166+ int ctx_owner;
3167+};
3168+
3169+struct drm_via_chrome9_cmdbuf_size {
3170+ enum {
3171+ VIA_CHROME9_CMDBUF_SPACE = 0x01,
3172+ VIA_CHROME9_CMDBUF_LAG = 0x02
3173+ } func;
3174+ int wait;
3175+ uint32_t size;
3176+};
3177+
3178+struct drm_via_chrome9_DMA_manager {
3179+ unsigned int *addr_linear;
3180+ unsigned int DMASize;
3181+ unsigned int bDMAAgp;
3182+ unsigned int LastIssuedEventTag;
3183+ unsigned int *pBeg;
3184+ unsigned int *pInUseByHW;
3185+ unsigned int **ppInUseByHW;
3186+ unsigned int *pInUseBySW;
3187+ unsigned int *pFree;
3188+ unsigned int *pEnd;
3189+
3190+ unsigned long pPhysical;
3191+ unsigned int MaxKickoffSize;
3192+};
3193+
3194+struct get_pci_id_struct {
3195+ unsigned int x;
3196+ unsigned int y;
3197+ unsigned int z;
3198+ unsigned int f;
3199+};
3200+
3201+
3202+extern void *via_chrome9_dev_v4l;
3203+extern void *via_chrome9_filepriv_v4l;
3204+extern int via_chrome9_ioctl_wait_chip_idle(struct drm_device *dev,
3205+ void *data, struct drm_file *file_priv);
3206+extern int via_chrome9_ioctl_init(struct drm_device *dev,
3207+ void *data, struct drm_file *file_priv);
3208+extern int via_chrome9_ioctl_allocate_event_tag(struct drm_device
3209+ *dev, void *data, struct drm_file *file_priv);
3210+extern int via_chrome9_ioctl_free_event_tag(struct drm_device *dev,
3211+ void *data, struct drm_file *file_priv);
3212+extern int via_chrome9_driver_load(struct drm_device *dev,
3213+ unsigned long chipset);
3214+extern int via_chrome9_driver_unload(struct drm_device *dev);
3215+extern int via_chrome9_ioctl_process_exit(struct drm_device *dev,
3216+ void *data, struct drm_file *file_priv);
3217+extern int via_chrome9_ioctl_restore_primary(struct drm_device *dev,
3218+ void *data, struct drm_file *file_priv);
3219+extern int via_chrome9_drm_resume(struct pci_dev *dev);
3220+extern int via_chrome9_drm_suspend(struct pci_dev *dev,
3221+ pm_message_t state);
3222+extern void __via_chrome9ke_udelay(unsigned long usecs);
3223+extern void via_chrome9_lastclose(struct drm_device *dev);
3224+extern void via_chrome9_preclose(struct drm_device *dev,
3225+ struct drm_file *file_priv);
3226+extern int via_chrome9_is_agp(struct drm_device *dev);
3227+
3228+
3229+#endif /* _VIA_CHROME9_DRM_H_ */
3230--- /dev/null
3231+++ b/drivers/gpu/drm/via_chrome9/via_chrome9_drv.c
3232@@ -0,0 +1,224 @@
3233+/*
3234+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3235+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
3236+ *
3237+ * Permission is hereby granted, free of charge, to any person
3238+ * obtaining a copy of this software and associated documentation
3239+ * files (the "Software"), to deal in the Software without
3240+ * restriction, including without limitation the rights to use,
3241+ * copy, modify, merge, publish, distribute, sub license,
3242+ * and/or sell copies of the Software, and to permit persons to
3243+ * whom the Software is furnished to do so, subject to the
3244+ * following conditions:
3245+ *
3246+ * The above copyright notice and this permission notice
3247+ * (including the next paragraph) shall be included in all
3248+ * copies or substantial portions of the Software.
3249+ *
3250+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
3251+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
3252+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
3253+ * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
3254+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3255+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
3256+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
3257+ * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
3258+ */
3259+
3260+#include "drmP.h"
3261+#include "via_chrome9_drm.h"
3262+#include "via_chrome9_drv.h"
3263+#include "via_chrome9_dma.h"
3264+#include "via_chrome9_mm.h"
3265+#include "via_chrome9_3d_reg.h"
3266+
3267+#define RING_BUFFER_INIT_FLAG 1
3268+#define RING_BUFFER_CLEANUP_FLAG 2
3269+
3270+static int dri_library_name(struct drm_device *dev, char *buf)
3271+{
3272+ return snprintf(buf, PAGE_SIZE, "via_chrome9");
3273+}
3274+
3275+int via_chrome9_drm_authmagic(struct drm_device *dev, void *data,
3276+ struct drm_file *file_priv)
3277+{
3278+ return 0;
3279+}
3280+
3281+int via_chrome9_drm_get_pci_id(struct drm_device *dev,
3282+ void *data, struct drm_file *file_priv)
3283+{
3284+ unsigned int *reg_val = data;
3285+ outl(0x8000002C, 0xCF8);
3286+ *reg_val = inl(0xCFC);
3287+ outl(0x8000012C, 0xCF8);
3288+ *(reg_val+1) = inl(0xCFC);
3289+ outl(0x8000022C, 0xCF8);
3290+ *(reg_val+2) = inl(0xCFC);
3291+ outl(0x8000052C, 0xCF8);
3292+ *(reg_val+3) = inl(0xCFC);
3293+
3294+ return 0;
3295+}
3296+int via_chrome9_drm_judge(struct drm_device *dev, void *data,
3297+ struct drm_file *file_priv)
3298+{
3299+ struct drm_via_chrome9_private *dev_priv =
3300+ (struct drm_via_chrome9_private *) dev->dev_private;
3301+
3302+ if (dev_priv->initialized)
3303+ *(int *)data = 1;
3304+ else
3305+ *(int *)data = -1;
3306+ return 0;
3307+}
3308+
3309+int via_chrome9_dma_init(struct drm_device *dev, void *data,
3310+ struct drm_file *file_priv)
3311+{
3312+ int tmp;
3313+ unsigned char sr6c;
3314+ struct drm_via_chrome9_private *dev_priv =
3315+ (struct drm_via_chrome9_private *)dev->dev_private;
3316+ tmp = *((int *)data);
3317+
3318+ switch (tmp) {
3319+ case RING_BUFFER_INIT_FLAG:
3320+ via_chrome9_dma_init_inv(dev);
3321+ break;
3322+ case RING_BUFFER_CLEANUP_FLAG:
3323+ if (dev_priv->chip_sub_index == CHIP_H6S2) {
3324+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c4, 0x6c);
3325+ sr6c = GetMMIORegisterU8(dev_priv->mmio->handle,
3326+ 0x83c5);
3327+ sr6c &= 0x7F;
3328+ SetMMIORegisterU8(dev_priv->mmio->handle, 0x83c5, sr6c);
3329+ }
3330+ break;
3331+ }
3332+ return 0;
3333+}
3334+
3335+
3336+
3337+struct drm_ioctl_desc via_chrome9_ioctls[] = {
3338+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_INIT, via_chrome9_ioctl_init,
3339+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),/* via_chrome9_map.c*/
3340+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_FLUSH, via_chrome9_ioctl_flush, DRM_AUTH),
3341+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_FREE, via_chrome9_ioctl_free, DRM_AUTH),
3342+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_ALLOCATE_EVENT_TAG,
3343+ via_chrome9_ioctl_allocate_event_tag, DRM_AUTH),
3344+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_FREE_EVENT_TAG,
3345+ via_chrome9_ioctl_free_event_tag, DRM_AUTH),
3346+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_ALLOCATE_APERTURE,
3347+ via_chrome9_ioctl_allocate_aperture, DRM_AUTH),
3348+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_FREE_APERTURE,
3349+ via_chrome9_ioctl_free_aperture, DRM_AUTH),
3350+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_ALLOCATE_VIDEO_MEM,
3351+ via_chrome9_ioctl_allocate_mem_wrapper, DRM_AUTH),
3352+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_FREE_VIDEO_MEM,
3353+ via_chrome9_ioctl_free_mem_wrapper, DRM_AUTH),
3354+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_WAIT_CHIP_IDLE,
3355+ via_chrome9_ioctl_wait_chip_idle, DRM_AUTH),
3356+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_PROCESS_EXIT,
3357+ via_chrome9_ioctl_process_exit, DRM_AUTH),
3358+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_RESTORE_PRIMARY,
3359+ via_chrome9_ioctl_restore_primary, DRM_AUTH),
3360+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_FLUSH_CACHE,
3361+ via_chrome9_ioctl_flush_cache, DRM_AUTH),
3362+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_ALLOCMEM,
3363+ via_chrome9_ioctl_allocate_mem_base, DRM_AUTH),
3364+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_FREEMEM,
3365+ via_chrome9_ioctl_freemem_base, DRM_AUTH),
3366+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_CHECKVIDMEMSIZE,
3367+ via_chrome9_ioctl_check_vidmem_size, DRM_AUTH),
3368+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_PCIEMEMCTRL,
3369+ via_chrome9_ioctl_pciemem_ctrl, DRM_AUTH),
3370+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_AUTH_MAGIC, via_chrome9_drm_authmagic, 0),
3371+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_GET_PCI_ID,
3372+ via_chrome9_drm_get_pci_id, 0),
3373+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_INIT_JUDGE, via_chrome9_drm_judge, 0),
3374+ DRM_IOCTL_DEF(DRM_VIA_CHROME9_DMA, via_chrome9_dma_init, 0)
3375+};
3376+
3377+int via_chrome9_max_ioctl = DRM_ARRAY_SIZE(via_chrome9_ioctls);
3378+
3379+static struct pci_device_id pciidlist[] = {
3380+ {0x1106, 0x3225, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3381+ {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_CHROME9_DX9_0},
3382+ {0x1106, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3383+ {0x1106, 0x1122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_CHROME9_PCIE_GROUP},
3384+ {0x1106, 0x5122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_CHROME9_PCIE_GROUP},
3385+ {0, 0, 0}
3386+};
3387+
3388+int via_chrome9_driver_open(struct drm_device *dev,
3389+ struct drm_file *priv)
3390+{
3391+ priv->authenticated = 1;
3392+ return 0;
3393+}
3394+
3395+static struct drm_driver driver = {
3396+ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
3397+ DRIVER_HAVE_DMA | DRIVER_FB_DMA | DRIVER_USE_MTRR,
3398+ .open = via_chrome9_driver_open,
3399+ .load = via_chrome9_driver_load,
3400+ .unload = via_chrome9_driver_unload,
3401+ .device_is_agp = via_chrome9_is_agp,
3402+ .dri_library_name = dri_library_name,
3403+ .reclaim_buffers = drm_core_reclaim_buffers,
3404+ .reclaim_buffers_locked = NULL,
3405+ .reclaim_buffers_idlelocked = via_chrome9_reclaim_buffers_locked,
3406+ .lastclose = via_chrome9_lastclose,
3407+ .preclose = via_chrome9_preclose,
3408+ .get_map_ofs = drm_core_get_map_ofs,
3409+ .get_reg_ofs = drm_core_get_reg_ofs,
3410+ .ioctls = via_chrome9_ioctls,
3411+ .fops = {
3412+ .owner = THIS_MODULE,
3413+ .open = drm_open,
3414+ .release = drm_release,
3415+ .ioctl = drm_ioctl,
3416+ .mmap = drm_mmap,
3417+ .poll = drm_poll,
3418+ .fasync = drm_fasync,
3419+ },
3420+ .pci_driver = {
3421+ .name = DRIVER_NAME,
3422+ .id_table = pciidlist,
3423+ .resume = via_chrome9_drm_resume,
3424+ .suspend = via_chrome9_drm_suspend,
3425+ },
3426+
3427+ .name = DRIVER_NAME,
3428+ .desc = DRIVER_DESC,
3429+ .date = DRIVER_DATE,
3430+ .major = DRIVER_MAJOR,
3431+ .minor = DRIVER_MINOR,
3432+ .patchlevel = DRIVER_PATCHLEVEL,
3433+};
3434+
3435+static int __init via_chrome9_init(void)
3436+{
3437+ driver.num_ioctls = via_chrome9_max_ioctl;
3438+#if VIA_CHROME9_VERIFY_ENABLE
3439+ via_chrome9_init_command_verifier();
3440+ DRM_INFO("via_chrome9 verify function enabled. \n");
3441+#endif
3442+ driver.dev_priv_size = sizeof(struct drm_via_chrome9_private);
3443+ return drm_init(&driver);
3444+}
3445+
3446+static void __exit via_chrome9_exit(void)
3447+{
3448+ drm_exit(&driver);
3449+}
3450+
3451+module_init(via_chrome9_init);
3452+module_exit(via_chrome9_exit);
3453+
3454+MODULE_AUTHOR(DRIVER_AUTHOR);
3455+MODULE_DESCRIPTION(DRIVER_DESC);
3456+MODULE_LICENSE("GPL and additional rights");
3457--- /dev/null
3458+++ b/drivers/gpu/drm/via_chrome9/via_chrome9_drv.h
3459@@ -0,0 +1,150 @@
3460+/*
3461+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3462+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
3463+ *
3464+ * Permission is hereby granted, free of charge, to any person
3465+ * obtaining a copy of this software and associated documentation
3466+ * files (the "Software"), to deal in the Software without
3467+ * restriction, including without limitation the rights to use,
3468+ * copy, modify, merge, publish, distribute, sub license,
3469+ * and/or sell copies of the Software, and to permit persons to
3470+ * whom the Software is furnished to do so, subject to the
3471+ * following conditions:
3472+ *
3473+ * The above copyright notice and this permission notice
3474+ * (including the next paragraph) shall be included in all
3475+ * copies or substantial portions of the Software.
3476+ *
3477+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
3478+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
3479+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
3480+ * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
3481+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3482+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
3483+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
3484+ * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
3485+ */
3486+#ifndef _VIA_CHROME9_DRV_H_
3487+#define _VIA_CHROME9_DRV_H_
3488+
3489+#include "drm_sman.h"
3490+#include "via_chrome9_verifier.h"
3491+#define DRIVER_AUTHOR "Various"
3492+
3493+#define DRIVER_NAME "via_chrome9"
3494+#define DRIVER_DESC "VIA_CHROME9 Unichrome / Pro"
3495+#define DRIVER_DATE "20080415"
3496+
3497+#define DRIVER_MAJOR 2
3498+#define DRIVER_MINOR 11
3499+#define DRIVER_PATCHLEVEL 1
3500+
3501+#define via_chrome9_FIRE_BUF_SIZE 1024
3502+#define via_chrome9_NUM_IRQS 4
3503+
3504+#define MAX_MEMORY_HEAPS 4
3505+#define NUMBER_OF_APERTURES 32
3506+
3507+/*typedef struct drm_via_chrome9_shadow_map drm_via_chrome9_shadow_map_t;*/
3508+struct drm_via_chrome9_shadow_map {
3509+ struct drm_map *shadow;
3510+ unsigned int shadow_size;
3511+ unsigned int *shadow_handle;
3512+};
3513+
3514+/*typedef struct drm_via_chrome9_pagetable_map
3515+ *drm_via_chrome9_pagetable_map_t;
3516+ */
3517+struct drm_via_chrome9_pagetable_map {
3518+ unsigned int pagetable_offset;
3519+ unsigned int pagetable_size;
3520+ unsigned int *pagetable_handle;
3521+ unsigned int mmt_register;
3522+};
3523+
3524+/*typedef struct drm_via_chrome9_private drm_via_chrome9_private_t;*/
3525+struct drm_via_chrome9_private {
3526+ int chip_agp;
3527+ int chip_index;
3528+ int chip_sub_index;
3529+
3530+ unsigned long front_offset;
3531+ unsigned long back_offset;
3532+ unsigned long depth_offset;
3533+ unsigned long fb_base_address;
3534+ unsigned long available_fb_size;
3535+ int usec_timeout;
3536+ int max_apertures;
3537+ struct drm_sman sman;
3538+ unsigned int alignment;
3539+ /* bit[31]:0:indicate no alignment needed,1:indicate
3540+ alignment needed and size is bit[0:30]*/
3541+
3542+ struct drm_map *sarea;
3543+ struct drm_via_chrome9_sarea *sarea_priv;
3544+
3545+ struct drm_map *mmio;
3546+ struct drm_map *hostBlt;
3547+ struct drm_map *fb;
3548+ struct drm_map *front;
3549+ struct drm_map *back;
3550+ struct drm_map *depth;
3551+ struct drm_map *agp_tex;
3552+ unsigned int agp_size;
3553+ unsigned int agp_offset;
3554+
3555+ struct semaphore *drm_s3g_sem;
3556+
3557+ struct drm_via_chrome9_shadow_map shadow_map;
3558+ struct drm_via_chrome9_pagetable_map pagetable_map;
3559+
3560+ char *bci;
3561+
3562+ int aperture_usage[NUMBER_OF_APERTURES];
3563+ void *event_tag_info;
3564+
3565+ /* DMA buffer manager */
3566+ void *dma_manager;
3567+ /* Indicate agp/pcie heap initialization flag */
3568+ int agp_initialized;
3569+ /* Indicate video heap initialization flag */
3570+ int vram_initialized;
3571+
3572+ unsigned long pcie_vmalloc_addr;
3573+
3574+ /* pointer to device information */
3575+ void *dev;
3576+ /* if agp init fail, go ahead and force dri use PCI*/
3577+ enum {
3578+ DRM_AGP_RING_BUFFER,
3579+ DRM_AGP_DOUBLE_BUFFER,
3580+ DRM_AGP_DISABLED
3581+ } drm_agp_type;
3582+ /*end*/
3583+#if VIA_CHROME9_VERIFY_ENABLE
3584+ struct drm_via_chrome9_state hc_state;
3585+#endif
3586+ unsigned long *bci_buffer;
3587+ unsigned long pcie_vmalloc_nocache;
3588+ unsigned char gti_backup[13];
3589+ int initialized;
3590+
3591+};
3592+
3593+
3594+enum via_chrome9_family {
3595+ VIA_CHROME9_OTHER = 0, /* Baseline */
3596+ VIA_CHROME9_PRO_GROUP_A,/* Another video engine and DMA commands */
3597+ VIA_CHROME9_DX9_0,
3598+ VIA_CHROME9_PCIE_GROUP
3599+};
3600+
3601+/* VIA_CHROME9 MMIO register access */
3602+#define VIA_CHROME9_BASE ((dev_priv->mmio))
3603+
3604+#define VIA_CHROME9_READ(reg) DRM_READ32(VIA_CHROME9_BASE, reg)
3605+#define VIA_CHROME9_WRITE(reg, val) DRM_WRITE32(VIA_CHROME9_BASE, reg, val)
3606+#define VIA_CHROME9_READ8(reg) DRM_READ8(VIA_CHROME9_BASE, reg)
3607+#define VIA_CHROME9_WRITE8(reg, val) DRM_WRITE8(VIA_CHROME9_BASE, reg, val)
3608+
3609+#endif
3610--- /dev/null
3611+++ b/drivers/gpu/drm/via_chrome9/via_chrome9_mm.c
3612@@ -0,0 +1,435 @@
3613+/*
3614+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3615+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
3616+ *
3617+ * Permission is hereby granted, free of charge, to any person
3618+ * obtaining a copy of this software and associated documentation
3619+ * files (the "Software"), to deal in the Software without
3620+ * restriction, including without limitation the rights to use,
3621+ * copy, modify, merge, publish, distribute, sub license,
3622+ * and/or sell copies of the Software, and to permit persons to
3623+ * whom the Software is furnished to do so, subject to the
3624+ * following conditions:
3625+ *
3626+ * The above copyright notice and this permission notice
3627+ * (including the next paragraph) shall be included in all
3628+ * copies or substantial portions of the Software.
3629+ *
3630+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
3631+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
3632+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
3633+ * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
3634+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3635+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
3636+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
3637+ * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
3638+ */
3639+
3640+#include "drmP.h"
3641+#include "via_chrome9_drm.h"
3642+#include "via_chrome9_drv.h"
3643+#include "drm_sman.h"
3644+#include "via_chrome9_mm.h"
3645+
3646+#define VIA_CHROME9_MM_GRANULARITY 4
3647+#define VIA_CHROME9_MM_GRANULARITY_MASK ((1 << VIA_CHROME9_MM_GRANULARITY) - 1)
3648+
3649+
3650+int via_chrome9_map_init(struct drm_device *dev,
3651+ struct drm_via_chrome9_init *init)
3652+{
3653+ struct drm_via_chrome9_private *dev_priv =
3654+ (struct drm_via_chrome9_private *)dev->dev_private;
3655+
3656+ dev_priv->sarea = drm_getsarea(dev);
3657+ if (!dev_priv->sarea) {
3658+ DRM_ERROR("could not find sarea!\n");
3659+ goto error;
3660+ }
3661+ dev_priv->sarea_priv =
3662+ (struct drm_via_chrome9_sarea *)((unsigned char *)dev_priv->
3663+ sarea->handle + init->sarea_priv_offset);
3664+
3665+ dev_priv->fb = drm_core_findmap(dev, init->fb_handle);
3666+ if (!dev_priv->fb) {
3667+ DRM_ERROR("could not find framebuffer!\n");
3668+ goto error;
3669+ }
3670+ /* Frame buffer physical base address */
3671+ dev_priv->fb_base_address = init->fb_base_address;
3672+
3673+ if (init->shadow_size) {
3674+ /* find apg shadow region mappings */
3675+ dev_priv->shadow_map.shadow = drm_core_findmap(dev, init->
3676+ shadow_handle);
3677+ if (!dev_priv->shadow_map.shadow) {
3678+ DRM_ERROR("could not shadow map!\n");
3679+ goto error;
3680+ }
3681+ dev_priv->shadow_map.shadow_size = init->shadow_size;
3682+ dev_priv->shadow_map.shadow_handle = (unsigned int *)dev_priv->
3683+ shadow_map.shadow->handle;
3684+ init->shadow_handle = dev_priv->shadow_map.shadow->offset;
3685+ }
3686+ if (init->agp_tex_size && init->chip_agp != CHIP_PCIE) {
3687+ /* find apg texture buffer mappings */
3688+ dev_priv->agp_tex = drm_core_findmap(dev, init->agp_tex_handle);
3689+ dev_priv->agp_size = init->agp_tex_size;
3690+ dev_priv->agp_offset = init->agp_tex_handle;
3691+ if (!dev_priv->agp_tex) {
3692+ DRM_ERROR("could not find agp texture map !\n");
3693+ goto error;
3694+ }
3695+ }
3696+ /* find mmio/dma mappings */
3697+ dev_priv->mmio = drm_core_findmap(dev, init->mmio_handle);
3698+ if (!dev_priv->mmio) {
3699+ DRM_ERROR("failed to find mmio region!\n");
3700+ goto error;
3701+ }
3702+
3703+ dev_priv->hostBlt = drm_core_findmap(dev, init->hostBlt_handle);
3704+ if (!dev_priv->hostBlt) {
3705+ DRM_ERROR("failed to find host bitblt region!\n");
3706+ goto error;
3707+ }
3708+
3709+ dev_priv->drm_agp_type = init->agp_type;
3710+ if (init->agp_type != AGP_DISABLED && init->chip_agp != CHIP_PCIE) {
3711+ dev->agp_buffer_map = drm_core_findmap(dev, init->dma_handle);
3712+ if (!dev->agp_buffer_map) {
3713+ DRM_ERROR("failed to find dma buffer region!\n");
3714+ goto error;
3715+ }
3716+ }
3717+
3718+ dev_priv->bci = (char *)dev_priv->mmio->handle + 0x10000;
3719+
3720+ return 0;
3721+
3722+error:
3723+ /* do cleanup here, refine_later */
3724+ return -EINVAL;
3725+}
3726+
3727+int via_chrome9_heap_management_init(struct drm_device *dev,
3728+ struct drm_via_chrome9_init *init)
3729+{
3730+ struct drm_via_chrome9_private *dev_priv =
3731+ (struct drm_via_chrome9_private *) dev->dev_private;
3732+ int ret = 0;
3733+
3734+ /* video memory management. range: 0 ---- video_whole_size */
3735+ mutex_lock(&dev->struct_mutex);
3736+ ret = drm_sman_set_range(&dev_priv->sman, VIA_CHROME9_MEM_VIDEO,
3737+ 0, dev_priv->available_fb_size >> VIA_CHROME9_MM_GRANULARITY);
3738+ if (ret) {
3739+ DRM_ERROR("VRAM memory manager initialization ******ERROR\
3740+ !******\n");
3741+ mutex_unlock(&dev->struct_mutex);
3742+ goto error;
3743+ }
3744+ dev_priv->vram_initialized = 1;
3745+ /* agp/pcie heap management.
3746+ note:because agp is contradict with pcie, so only one is enough
3747+ for managing both of them.*/
3748+ init->agp_type = dev_priv->drm_agp_type;
3749+ if (init->agp_type != AGP_DISABLED && dev_priv->agp_size) {
3750+ ret = drm_sman_set_range(&dev_priv->sman, VIA_CHROME9_MEM_AGP,
3751+ 0, dev_priv->agp_size >> VIA_CHROME9_MM_GRANULARITY);
3752+ if (ret) {
3753+ DRM_ERROR("AGP/PCIE memory manager initialization ******ERROR\
3754+ !******\n");
3755+ mutex_unlock(&dev->struct_mutex);
3756+ goto error;
3757+ }
3758+ dev_priv->agp_initialized = 1;
3759+ }
3760+ mutex_unlock(&dev->struct_mutex);
3761+ return 0;
3762+
3763+error:
3764+ /* Do error recover here, refine_later */
3765+ return -EINVAL;
3766+}
3767+
3768+
3769+void via_chrome9_memory_destroy_heap(struct drm_device *dev,
3770+ struct drm_via_chrome9_private *dev_priv)
3771+{
3772+ mutex_lock(&dev->struct_mutex);
3773+ drm_sman_cleanup(&dev_priv->sman);
3774+ dev_priv->vram_initialized = 0;
3775+ dev_priv->agp_initialized = 0;
3776+ mutex_unlock(&dev->struct_mutex);
3777+}
3778+
3779+void via_chrome9_reclaim_buffers_locked(struct drm_device *dev,
3780+ struct drm_file *file_priv)
3781+{
3782+ return;
3783+}
3784+
3785+int via_chrome9_ioctl_allocate_aperture(struct drm_device *dev,
3786+ void *data, struct drm_file *file_priv)
3787+{
3788+ return 0;
3789+}
3790+
3791+int via_chrome9_ioctl_free_aperture(struct drm_device *dev,
3792+ void *data, struct drm_file *file_priv)
3793+{
3794+ return 0;
3795+}
3796+
3797+
3798+/* Allocate memory from DRM module for video playing */
3799+int via_chrome9_ioctl_allocate_mem_base(struct drm_device *dev,
3800+void *data, struct drm_file *file_priv)
3801+{
3802+ struct drm_via_chrome9_mem *mem = data;
3803+ struct drm_memblock_item *item;
3804+ struct drm_via_chrome9_private *dev_priv =
3805+ (struct drm_via_chrome9_private *) dev->dev_private;
3806+ unsigned long tmpSize = 0, offset = 0, alignment = 0;
3807+ /* modify heap_type to agp for pcie, since we treat pcie/agp heap
3808+ no difference in heap management */
3809+ if (mem->type == memory_heap_pcie) {
3810+ if (dev_priv->chip_agp != CHIP_PCIE) {
3811+ DRM_ERROR("User want to alloc memory from pcie heap \
3812+ but via_chrome9.ko has no this heap exist.\n");
3813+ return -EINVAL;
3814+ }
3815+ mem->type = memory_heap_agp;
3816+ }
3817+
3818+ if (mem->type > VIA_CHROME9_MEM_AGP) {
3819+ DRM_ERROR("Unknown memory type allocation\n");
3820+ return -EINVAL;
3821+ }
3822+ mutex_lock(&dev->struct_mutex);
3823+ if (0 == ((mem->type == VIA_CHROME9_MEM_VIDEO) ?
3824+ dev_priv->vram_initialized : dev_priv->agp_initialized)) {
3825+ DRM_ERROR("Attempt to allocate from uninitialized\
3826+ memory manager.\n");
3827+ mutex_unlock(&dev->struct_mutex);
3828+ return -EINVAL;
3829+ }
3830+ tmpSize = (mem->size + VIA_CHROME9_MM_GRANULARITY_MASK) >>
3831+ VIA_CHROME9_MM_GRANULARITY;
3832+ mem->size = tmpSize << VIA_CHROME9_MM_GRANULARITY;
3833+ alignment = (dev_priv->alignment & 0x80000000) ? dev_priv->
3834+ alignment & 0x7FFFFFFF : 0;
3835+ alignment /= (1 << VIA_CHROME9_MM_GRANULARITY);
3836+ item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, alignment,
3837+ (unsigned long)file_priv);
3838+ mutex_unlock(&dev->struct_mutex);
3839+ /* alloc failed */
3840+ if (!item) {
3841+ DRM_ERROR("Allocate memory failed ******ERROR******.\n");
3842+ return -ENOMEM;
3843+ }
3844+ /* Till here every thing is ok, we check the memory type allocated
3845+ and return appropriate value to user mode Here the value return to
3846+ user is very difficult to operate. BE CAREFULLY!!! */
3847+ /* offset is used by user mode ap to calculate the virtual address
3848+ which is used to access the memory allocated */
3849+ mem->index = item->user_hash.key;
3850+ offset = item->mm->offset(item->mm, item->mm_info) <<
3851+ VIA_CHROME9_MM_GRANULARITY;
3852+ switch (mem->type) {
3853+ case VIA_CHROME9_MEM_VIDEO:
3854+ mem->offset = offset + dev_priv->back_offset;
3855+ break;
3856+ case VIA_CHROME9_MEM_AGP:
3857+ /* return different value to user according to the chip type */
3858+ if (dev_priv->chip_agp == CHIP_PCIE) {
3859+ mem->offset = offset +
3860+ ((struct drm_via_chrome9_DMA_manager *)dev_priv->
3861+ dma_manager)->DMASize * sizeof(unsigned long);
3862+ } else {
3863+ mem->offset = offset;
3864+ }
3865+ break;
3866+ default:
3867+ /* Strange thing happen! Faint. Code bug! */
3868+ DRM_ERROR("Enter here is impossible ******\
3869+ ERROR******.\n");
3870+ return -EINVAL;
3871+ }
3872+ /*DONE. Need we call function copy_to_user ?NO. We can't even
3873+ touch user's space.But we are lucky, since kernel drm:drm_ioctl
3874+ will to the job for us. */
3875+ return 0;
3876+}
3877+
3878+/* Allocate video/AGP/PCIE memory from heap management */
3879+int via_chrome9_ioctl_allocate_mem_wrapper(struct drm_device
3880+ *dev, void *data, struct drm_file *file_priv)
3881+{
3882+ struct drm_via_chrome9_memory_alloc *memory_alloc =
3883+ (struct drm_via_chrome9_memory_alloc *)data;
3884+ struct drm_via_chrome9_private *dev_priv =
3885+ (struct drm_via_chrome9_private *) dev->dev_private;
3886+ struct drm_via_chrome9_mem mem;
3887+
3888+ mem.size = memory_alloc->size;
3889+ mem.type = memory_alloc->heap_type;
3890+ dev_priv->alignment = memory_alloc->align | 0x80000000;
3891+ if (via_chrome9_ioctl_allocate_mem_base(dev, &mem, file_priv)) {
3892+ DRM_ERROR("Allocate memory error!.\n");
3893+ return -ENOMEM;
3894+ }
3895+ dev_priv->alignment = 0;
3896+ /* Till here every thing is ok, we check the memory type allocated and
3897+ return appropriate value to user mode Here the value return to user is
3898+ very difficult to operate. BE CAREFULLY!!!*/
3899+ /* offset is used by user mode ap to calculate the virtual address
3900+ which is used to access the memory allocated */
3901+ memory_alloc->offset = mem.offset;
3902+ memory_alloc->heap_info.lpL1Node = (void *)mem.index;
3903+ memory_alloc->size = mem.size;
3904+ switch (memory_alloc->heap_type) {
3905+ case VIA_CHROME9_MEM_VIDEO:
3906+ memory_alloc->physaddress = memory_alloc->offset +
3907+ dev_priv->fb_base_address;
3908+ memory_alloc->linearaddress = (void *)memory_alloc->physaddress;
3909+ break;
3910+ case VIA_CHROME9_MEM_AGP:
3911+ /* return different value to user according to the chip type */
3912+ if (dev_priv->chip_agp == CHIP_PCIE) {
3913+ memory_alloc->physaddress = memory_alloc->offset;
3914+ memory_alloc->linearaddress = (void *)memory_alloc->
3915+ physaddress;
3916+ } else {
3917+ memory_alloc->physaddress = dev->agp->base +
3918+ memory_alloc->offset +
3919+ ((struct drm_via_chrome9_DMA_manager *)
3920+ dev_priv->dma_manager)->DMASize * sizeof(unsigned long);
3921+ memory_alloc->linearaddress =
3922+ (void *)memory_alloc->physaddress;
3923+ }
3924+ break;
3925+ default:
3926+ /* Strange thing happen! Faint. Code bug! */
3927+ DRM_ERROR("Enter here is impossible ******ERROR******.\n");
3928+ return -EINVAL;
3929+ }
3930+ return 0;
3931+}
3932+
3933+int via_chrome9_ioctl_free_mem_wrapper(struct drm_device *dev,
3934+ void *data, struct drm_file *file_priv)
3935+{
3936+ struct drm_via_chrome9_memory_alloc *memory_alloc = data;
3937+ struct drm_via_chrome9_mem mem;
3938+
3939+ mem.index = (unsigned long)memory_alloc->heap_info.lpL1Node;
3940+ if (via_chrome9_ioctl_freemem_base(dev, &mem, file_priv)) {
3941+ DRM_ERROR("function free_mem_wrapper error.\n");
3942+ return -EINVAL;
3943+ }
3944+
3945+ return 0;
3946+}
3947+
3948+int via_chrome9_ioctl_freemem_base(struct drm_device *dev,
3949+ void *data, struct drm_file *file_priv)
3950+{
3951+ struct drm_via_chrome9_private *dev_priv = dev->dev_private;
3952+ struct drm_via_chrome9_mem *mem = data;
3953+ int ret;
3954+
3955+ mutex_lock(&dev->struct_mutex);
3956+ ret = drm_sman_free_key(&dev_priv->sman, mem->index);
3957+ mutex_unlock(&dev->struct_mutex);
3958+ DRM_DEBUG("free = 0x%lx\n", mem->index);
3959+
3960+ return ret;
3961+}
3962+
3963+int via_chrome9_ioctl_check_vidmem_size(struct drm_device *dev,
3964+ void *data, struct drm_file *file_priv)
3965+{
3966+ return 0;
3967+}
3968+
3969+int via_chrome9_ioctl_pciemem_ctrl(struct drm_device *dev,
3970+ void *data, struct drm_file *file_priv)
3971+{
3972+ int result = 0;
3973+ struct drm_via_chrome9_private *dev_priv = dev->dev_private;
3974+ struct drm_via_chrome9_pciemem_ctrl *pcie_memory_ctrl = data;
3975+ switch (pcie_memory_ctrl->ctrl_type) {
3976+ case pciemem_copy_from_user:
3977+ result = copy_from_user((void *)(
3978+ dev_priv->pcie_vmalloc_nocache+
3979+ pcie_memory_ctrl->pcieoffset),
3980+ pcie_memory_ctrl->usermode_data,
3981+ pcie_memory_ctrl->size);
3982+ break;
3983+ case pciemem_copy_to_user:
3984+ result = copy_to_user(pcie_memory_ctrl->usermode_data,
3985+ (void *)(dev_priv->pcie_vmalloc_nocache+
3986+ pcie_memory_ctrl->pcieoffset),
3987+ pcie_memory_ctrl->size);
3988+ break;
3989+ case pciemem_memset:
3990+ memset((void *)(dev_priv->pcie_vmalloc_nocache +
3991+ pcie_memory_ctrl->pcieoffset),
3992+ pcie_memory_ctrl->memsetdata,
3993+ pcie_memory_ctrl->size);
3994+ break;
3995+ default:
3996+ break;
3997+ }
3998+ return 0;
3999+}
4000+
4001+
4002+int via_fb_alloc(struct drm_via_chrome9_mem *mem)
4003+{
4004+ struct drm_device *dev = (struct drm_device *)via_chrome9_dev_v4l;
4005+ struct drm_via_chrome9_private *dev_priv;
4006+
4007+ if (!dev || !dev->dev_private || !via_chrome9_filepriv_v4l) {
4008+ DRM_ERROR("V4L work before X initialize DRM module !!!\n");
4009+ return -EINVAL;
4010+ }
4011+
4012+ dev_priv = (struct drm_via_chrome9_private *)dev->dev_private;
4013+ if (!dev_priv->vram_initialized ||
4014+ mem->type != VIA_CHROME9_MEM_VIDEO) {
4015+ DRM_ERROR("the memory type from V4L is error !!!\n");
4016+ return -EINVAL;
4017+ }
4018+
4019+ if (via_chrome9_ioctl_allocate_mem_base(dev,
4020+ mem, via_chrome9_filepriv_v4l)) {
4021+ DRM_ERROR("DRM module allocate memory error for V4L!!!\n");
4022+ return -EINVAL;
4023+ }
4024+
4025+ return 0;
4026+}
4027+EXPORT_SYMBOL(via_fb_alloc);
4028+
4029+int via_fb_free(struct drm_via_chrome9_mem *mem)
4030+{
4031+ struct drm_device *dev = (struct drm_device *)via_chrome9_dev_v4l;
4032+ struct drm_via_chrome9_private *dev_priv;
4033+
4034+ if (!dev || !dev->dev_private || !via_chrome9_filepriv_v4l)
4035+ return -EINVAL;
4036+
4037+ dev_priv = (struct drm_via_chrome9_private *)dev->dev_private;
4038+ if (!dev_priv->vram_initialized ||
4039+ mem->type != VIA_CHROME9_MEM_VIDEO)
4040+ return -EINVAL;
4041+
4042+ if (via_chrome9_ioctl_freemem_base(dev, mem, via_chrome9_filepriv_v4l))
4043+ return -EINVAL;
4044+
4045+ return 0;
4046+}
4047+EXPORT_SYMBOL(via_fb_free);
4048--- /dev/null
4049+++ b/drivers/gpu/drm/via_chrome9/via_chrome9_mm.h
4050@@ -0,0 +1,67 @@
4051+/*
4052+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
4053+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4054+ *
4055+ * Permission is hereby granted, free of charge, to any person
4056+ * obtaining a copy of this software and associated documentation
4057+ * files (the "Software"), to deal in the Software without
4058+ * restriction, including without limitation the rights to use,
4059+ * copy, modify, merge, publish, distribute, sub license,
4060+ * and/or sell copies of the Software, and to permit persons to
4061+ * whom the Software is furnished to do so, subject to the
4062+ * following conditions:
4063+ *
4064+ * The above copyright notice and this permission notice
4065+ * (including the next paragraph) shall be included in all
4066+ * copies or substantial portions of the Software.
4067+ *
4068+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
4069+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
4070+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
4071+ * NON-INFRINGEMENT. IN NO EVENT SHALL VIA, S3 GRAPHICS, AND/OR
4072+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4073+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
4074+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
4075+ * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
4076+ */
4077+#ifndef _VIA_CHROME9_MM_H_
4078+#define _VIA_CHROME9_MM_H_
4079+struct drm_via_chrome9_pciemem_ctrl {
4080+ enum {
4081+ pciemem_copy_from_user = 0,
4082+ pciemem_copy_to_user,
4083+ pciemem_memset,
4084+ } ctrl_type;
4085+ unsigned int pcieoffset;
4086+ unsigned int size;/*in Byte*/
4087+ unsigned char memsetdata;/*for memset*/
4088+ void *usermode_data;/*user mode data pointer*/
4089+};
4090+
4091+extern int via_chrome9_map_init(struct drm_device *dev,
4092+ struct drm_via_chrome9_init *init);
4093+extern int via_chrome9_heap_management_init(struct drm_device
4094+ *dev, struct drm_via_chrome9_init *init);
4095+extern void via_chrome9_memory_destroy_heap(struct drm_device
4096+ *dev, struct drm_via_chrome9_private *dev_priv);
4097+extern int via_chrome9_ioctl_check_vidmem_size(struct drm_device
4098+ *dev, void *data, struct drm_file *file_priv);
4099+extern int via_chrome9_ioctl_pciemem_ctrl(struct drm_device *dev,
4100+ void *data, struct drm_file *file_priv);
4101+extern int via_chrome9_ioctl_allocate_aperture(struct drm_device
4102+ *dev, void *data, struct drm_file *file_priv);
4103+extern int via_chrome9_ioctl_free_aperture(struct drm_device *dev,
4104+ void *data, struct drm_file *file_priv);
4105+extern int via_chrome9_ioctl_allocate_mem_base(struct drm_device
4106+ *dev, void *data, struct drm_file *file_priv);
4107+extern int via_chrome9_ioctl_allocate_mem_wrapper(
4108+ struct drm_device *dev, void *data, struct drm_file *file_priv);
4109+extern int via_chrome9_ioctl_freemem_base(struct drm_device
4110+ *dev, void *data, struct drm_file *file_priv);
4111+extern int via_chrome9_ioctl_free_mem_wrapper(struct drm_device
4112+ *dev, void *data, struct drm_file *file_priv);
4113+extern void via_chrome9_reclaim_buffers_locked(struct drm_device
4114+ *dev, struct drm_file *file_priv);
4115+
4116+#endif
4117+
4118--- /dev/null
4119+++ b/drivers/gpu/drm/via_chrome9/via_chrome9_verifier.c
4120@@ -0,0 +1,982 @@
4121+/*
4122+* Copyright 2004 The Unichrome Project. All Rights Reserved.
4123+* Copyright 2005 Thomas Hellstrom. All Rights Reserved.
4124+*
4125+* Permission is hereby granted, free of charge, to any person obtaining a
4126+* copy of this software and associated documentation files (the "Software"),
4127+* to deal in the Software without restriction, including without limitation
4128+* the rights to use, copy, modify, merge, publish, distribute, sub license,
4129+* and/or sell copies of the Software, and to permit persons to whom the
4130+* Software is furnished to do so, subject to the following conditions:
4131+*
4132+* The above copyright notice and this permission notice (including the
4133+* next paragraph) shall be included in all copies or substantial portions
4134+* of the Software.
4135+*
4136+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4137+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4138+* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
4139+* THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES
4140+* OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
4141+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
4142+* DEALINGS IN THE SOFTWARE.
4143+*
4144+* This code was written using docs obtained under NDA from VIA Inc.
4145+*
4146+* Don't run this code directly on an AGP buffer. Due to cache problems it will
4147+* be very slow.
4148+*/
4149+
4150+#include "via_chrome9_3d_reg.h"
4151+#include "drmP.h"
4152+#include "drm.h"
4153+#include "via_chrome9_drm.h"
4154+#include "via_chrome9_verifier.h"
4155+#include "via_chrome9_drv.h"
4156+
4157+#if VIA_CHROME9_VERIFY_ENABLE
4158+
4159+enum verifier_state {
4160+ state_command,
4161+ state_header0,
4162+ state_header1,
4163+ state_header2,
4164+ state_header3,
4165+ state_header4,
4166+ state_header5,
4167+ state_header6,
4168+ state_header7,
4169+ state_error
4170+};
4171+
4172+enum hazard {
4173+ no_check = 0,
4174+ check_render_target_addr0,
4175+ check_render_target_addr1,
4176+ check_render_target_addr_mode,
4177+ check_z_buffer_addr0,
4178+ check_z_buffer_addr1,
4179+ check_z_buffer_addr_mode,
4180+ check_zocclusion_addr0,
4181+ check_zocclusion_addr1,
4182+ check_coarse_z_addr0,
4183+ check_coarse_z_addr1,
4184+ check_fvf_addr_mode,
4185+ check_t_level0_facen_addr0,
4186+ check_fence_cmd_addr0,
4187+ check_fence_cmd_addr1,
4188+ check_fence_cmd_addr2,
4189+ forbidden_command
4190+};
4191+
4192+/*
4193+ * Associates each hazard above with a possible multi-command
4194+ * sequence. For example an address that is split over multiple
4195+ * commands and that needs to be checked at the first command
4196+ * that does not include any part of the address.
4197+ */
4198+
4199+static enum drm_via_chrome9_sequence seqs[] = {
4200+ no_sequence,
4201+ dest_address,
4202+ dest_address,
4203+ dest_address,
4204+ z_address,
4205+ z_address,
4206+ z_address,
4207+ zocclusion_address,
4208+ zocclusion_address,
4209+ coarse_z_address,
4210+ coarse_z_address,
4211+ fvf_address,
4212+ tex_address,
4213+ fence_cmd_address,
4214+ fence_cmd_address,
4215+ fence_cmd_address,
4216+ no_sequence
4217+};
4218+
4219+struct hz_init {
4220+ unsigned int code;
4221+ enum hazard hz;
4222+};
4223+/* for atrribute other than context hazard detect */
4224+static struct hz_init init_table1[] = {
4225+ {0xcc, no_check},
4226+ {0xcd, no_check},
4227+ {0xce, no_check},
4228+ {0xcf, no_check},
4229+ {0xdd, no_check},
4230+ {0xee, no_check},
4231+ {0x00, no_check},
4232+ {0x01, no_check},
4233+ {0x10, check_z_buffer_addr0},
4234+ {0x11, check_z_buffer_addr1},
4235+ {0x12, check_z_buffer_addr_mode},
4236+ {0x13, no_check},
4237+ {0x14, no_check},
4238+ {0x15, no_check},
4239+ {0x16, no_check},
4240+ {0x17, no_check},
4241+ {0x18, no_check},
4242+ {0x19, no_check},
4243+ {0x1a, no_check},
4244+ {0x1b, no_check},
4245+ {0x1c, no_check},
4246+ {0x1d, no_check},
4247+ {0x1e, no_check},
4248+ {0x1f, no_check},
4249+ {0x20, no_check},
4250+ {0x21, check_zocclusion_addr0},
4251+ {0x22, check_zocclusion_addr1},
4252+ {0x23, no_check},
4253+ {0x24, no_check},
4254+ {0x25, no_check},
4255+ {0x26, no_check},
4256+ {0x27, no_check},
4257+ /* H5 only*/
4258+ {0x28, no_check},
4259+ {0x29, check_coarse_z_addr0},
4260+ {0x2a, check_coarse_z_addr1},
4261+ {0x33, no_check},
4262+ {0x34, no_check},
4263+ {0x35, no_check},
4264+ {0x36, no_check},
4265+ {0x37, no_check},
4266+ {0x38, no_check},
4267+ {0x39, no_check},
4268+ {0x3A, no_check},
4269+ {0x3B, no_check},
4270+ {0x3C, no_check},
4271+ {0x3D, no_check},
4272+ {0x3E, no_check},
4273+ {0x3F, no_check},
4274+ /*render target check */
4275+ {0x50, check_render_target_addr0},
4276+ /* H5/H6 different */
4277+ {0x51, check_render_target_addr_mode},
4278+ {0x52, check_render_target_addr1},
4279+ {0x53, no_check},
4280+ {0x58, check_render_target_addr0},
4281+ {0x59, check_render_target_addr_mode},
4282+ {0x5a, check_render_target_addr1},
4283+ {0x5b, no_check},
4284+ {0x60, check_render_target_addr0},
4285+ {0x61, check_render_target_addr_mode},
4286+ {0x62, check_render_target_addr1},
4287+ {0x63, no_check},
4288+ {0x68, check_render_target_addr0},
4289+ {0x69, check_render_target_addr_mode},
4290+ {0x6a, check_render_target_addr1},
4291+ {0x6b, no_check},
4292+ {0x70, no_check},
4293+ {0x71, no_check},
4294+ {0x72, no_check},
4295+ {0x73, no_check},
4296+ {0x74, no_check},
4297+ {0x75, no_check},
4298+ {0x76, no_check},
4299+ {0x77, no_check},
4300+ {0x78, no_check},
4301+ {0x80, no_check},
4302+ {0x81, no_check},
4303+ {0x82, no_check},
4304+ {0x83, no_check},
4305+ {0x84, no_check},
4306+ {0x85, no_check},
4307+ {0x86, no_check},
4308+ {0x87, no_check},
4309+ {0x88, no_check},
4310+ {0x89, no_check},
4311+ {0x8a, no_check},
4312+ {0x90, no_check},
4313+ {0x91, no_check},
4314+ {0x92, no_check},
4315+ {0x93, no_check},
4316+ {0x94, no_check},
4317+ {0x95, no_check},
4318+ {0x96, no_check},
4319+ {0x97, no_check},
4320+ {0x98, no_check},
4321+ {0x99, no_check},
4322+ {0x9a, no_check},
4323+ {0x9b, no_check},
4324+ {0xaa, no_check}
4325+};
4326+
4327+/* for texture stage's hazard detect */
4328+static struct hz_init init_table2[] = {
4329+ {0xcc, no_check},
4330+ {0xcd, no_check},
4331+ {0xce, no_check},
4332+ {0xcf, no_check},
4333+ {0xdd, no_check},
4334+ {0xee, no_check},
4335+ {0x00, no_check},
4336+ {0x01, no_check},
4337+ {0x02, no_check},
4338+ {0x03, no_check},
4339+ {0x04, no_check},
4340+ {0x05, no_check},
4341+ /* H5/H6 diffent */
4342+ {0x18, check_t_level0_facen_addr0},
4343+ {0x20, no_check},
4344+ {0x21, no_check},
4345+ {0x22, no_check},
4346+ {0x30, no_check},
4347+ {0x50, no_check},
4348+ {0x51, no_check},
4349+ {0x9b, no_check},
4350+};
4351+
4352+/*Check for flexible vertex format */
4353+static struct hz_init init_table3[] = {
4354+ {0xcc, no_check},
4355+ {0xcd, no_check},
4356+ {0xce, no_check},
4357+ {0xcf, no_check},
4358+ {0xdd, no_check},
4359+ {0xee, no_check},
4360+ /* H5/H6 different */
4361+ {0x00, check_fvf_addr_mode},
4362+ {0x01, no_check},
4363+ {0x02, no_check},
4364+ {0x03, no_check},
4365+ {0x04, no_check},
4366+ {0x05, no_check},
4367+ {0x08, no_check},
4368+ {0x09, no_check},
4369+ {0x0a, no_check},
4370+ {0x0b, no_check},
4371+ {0x0c, no_check},
4372+ {0x0d, no_check},
4373+ {0x0e, no_check},
4374+ {0x0f, no_check},
4375+ {0x10, no_check},
4376+ {0x11, no_check},
4377+ {0x12, no_check},
4378+ {0x13, no_check},
4379+ {0x14, no_check},
4380+ {0x15, no_check},
4381+ {0x16, no_check},
4382+ {0x17, no_check},
4383+ {0x18, no_check},
4384+ {0x19, no_check},
4385+ {0x1a, no_check},
4386+ {0x1b, no_check},
4387+ {0x1c, no_check},
4388+ {0x1d, no_check},
4389+ {0x1e, no_check},
4390+ {0x1f, no_check},
4391+ {0x20, no_check},
4392+ {0x21, no_check},
4393+ {0x22, no_check},
4394+ {0x23, no_check},
4395+ {0x24, no_check},
4396+ {0x25, no_check},
4397+ {0x26, no_check},
4398+ {0x27, no_check},
4399+ {0x28, no_check},
4400+ {0x29, no_check},
4401+ {0x2a, no_check},
4402+ {0x2b, no_check},
4403+ {0x2c, no_check},
4404+ {0x2d, no_check},
4405+ {0x2e, no_check},
4406+ {0x2f, no_check},
4407+ {0x40, no_check},
4408+ {0x41, no_check},
4409+ {0x42, no_check},
4410+ {0x43, no_check},
4411+ {0x44, no_check},
4412+ {0x45, no_check},
4413+ {0x46, no_check},
4414+ {0x47, no_check},
4415+ {0x48, no_check},
4416+ {0x50, no_check},
4417+ {0x51, no_check},
4418+ {0x52, no_check},
4419+ {0x60, no_check},
4420+ {0x61, no_check},
4421+ {0x62, no_check},
4422+ {0x9b, no_check},
4423+ {0xaa, no_check}
4424+};
4425+/*Check for 364 fence command id*/
4426+static struct hz_init init_table4[] = {
4427+ {0xcc, no_check},
4428+ {0xcd, no_check},
4429+ {0xce, no_check},
4430+ {0xcf, no_check},
4431+ {0xdd, no_check},
4432+ {0xee, no_check},
4433+ {0x00, no_check},
4434+ {0x01, check_fence_cmd_addr0},
4435+ {0x02, check_fence_cmd_addr1},
4436+ {0x03, check_fence_cmd_addr2},
4437+ {0x10, no_check},
4438+ {0x11, no_check},
4439+ {0x12, no_check},
4440+ {0x13, no_check},
4441+ {0x14, no_check},
4442+ {0x18, no_check},
4443+ {0x19, no_check},
4444+ {0x1a, no_check},
4445+ {0x1b, no_check},
4446+ {0x1c, no_check},
4447+ {0x20, no_check},
4448+ {0xab, no_check},
4449+ {0xaa, no_check}
4450+};
4451+
4452+/*Check for 353 fence command id*/
4453+static struct hz_init init_table5[] = {
4454+ {0xcc, no_check},
4455+ {0xcd, no_check},
4456+ {0xce, no_check},
4457+ {0xcf, no_check},
4458+ {0xdd, no_check},
4459+ {0xee, no_check},
4460+ {0x00, no_check},
4461+ {0x01, no_check},
4462+ {0x02, no_check},
4463+ {0x03, no_check},
4464+ {0x04, check_fence_cmd_addr0},
4465+ {0x05, check_fence_cmd_addr1},
4466+ {0x06, no_check},
4467+ {0x07, check_fence_cmd_addr2},
4468+ {0x08, no_check},
4469+ {0x09, no_check},
4470+ {0x0a, no_check},
4471+ {0x0b, no_check},
4472+ {0x0c, no_check},
4473+ {0x0d, no_check},
4474+ {0x0e, no_check},
4475+ {0x0f, no_check},
4476+ {0x10, no_check},
4477+ {0x11, no_check},
4478+ {0x12, no_check},
4479+ {0x18, no_check},
4480+ {0x19, no_check},
4481+ {0x1a, no_check},
4482+ {0x30, no_check},
4483+ {0x31, no_check},
4484+ {0x32, no_check},
4485+ {0x68, no_check},
4486+ {0x69, no_check},
4487+ {0x6a, no_check},
4488+ {0x6b, no_check},
4489+ {0xab, no_check},
4490+ {0xaa, no_check}
4491+};
4492+
4493+static enum hazard init_table_01_00[256];
4494+static enum hazard init_table_02_0n[256];
4495+static enum hazard init_table_04_00[256];
4496+static enum hazard init_table_11_364[256];
4497+static enum hazard init_table_11_353[256];
4498+
4499+/*Require fence command id location reside in the shadow system memory */
4500+static inline int
4501+check_fence_cmd_addr_range(struct drm_via_chrome9_state *seq,
4502+ unsigned long fence_cmd_add, unsigned long size, struct drm_device *dev)
4503+{
4504+ struct drm_via_chrome9_private *dev_priv =
4505+ (struct drm_via_chrome9_private *)dev->dev_private;
4506+ if (!dev_priv->shadow_map.shadow)
4507+ return -1;
4508+ if ((fence_cmd_add < dev_priv->shadow_map.shadow->offset) ||
4509+ (fence_cmd_add + size >
4510+ dev_priv->shadow_map.shadow->offset +
4511+ dev_priv->shadow_map.shadow->size))
4512+ return -1;
4513+ return 0;
4514+}
4515+
4516+/*
4517+ * Currently we only catch the fence cmd's address, which will
4518+ * access system memory inevitably.
4519+ * NOTE:No care about AGP address.(we just think all AGP access are safe now).
4520+ */
4521+
4522+static inline int finish_current_sequence(struct drm_via_chrome9_state *cur_seq)
4523+{
4524+ switch (cur_seq->unfinished) {
4525+ case fence_cmd_address:
4526+ if (cur_seq->fence_need_check)
4527+ if (check_fence_cmd_addr_range(cur_seq,
4528+ cur_seq->fence_cmd_addr, 4, cur_seq->dev))
4529+ return -EINVAL;
4530+ break;
4531+ default:
4532+ break;
4533+ }
4534+ cur_seq->unfinished = no_sequence;
4535+ return 0;
4536+}
4537+/* Only catch the cmd which potentially access the system memory, and treat all
4538+ * the other cmds are safe.
4539+ */
4540+static inline int
4541+investigate_hazard(uint32_t cmd, enum hazard hz,
4542+ struct drm_via_chrome9_state *cur_seq)
4543+{
4544+ register uint32_t tmp;
4545+
4546+ if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
4547+ int ret = finish_current_sequence(cur_seq);
4548+ if (ret)
4549+ return ret;
4550+ }
4551+
4552+ switch (hz) {
4553+ case check_render_target_addr0:
4554+ tmp = ((cmd >> 24) - 0x50) >> 3;
4555+ cur_seq->unfinished = dest_address;
4556+ cur_seq->render_target_addr[tmp] = cmd << 8;
4557+ break;
4558+ case check_render_target_addr1:
4559+ cur_seq->unfinished = dest_address;
4560+ tmp = ((cmd >> 24) - 0x50) >> 3;
4561+ cur_seq->render_target_pitch[tmp] = (cmd & 0x000001FF) >> 5;
4562+ break;
4563+ case check_render_target_addr_mode:
4564+ cur_seq->unfinished = dest_address;
4565+ if (!cur_seq->agp)
4566+ if (((cmd & 0x00300000) >> 20) == 2) {
4567+ DRM_ERROR("Attempt to place \
4568+ render target in system memory\n");
4569+ return -EINVAL;
4570+ }
4571+ break;
4572+ case check_z_buffer_addr0:
4573+ cur_seq->unfinished = z_address;
4574+ break;
4575+ case check_z_buffer_addr1:
4576+ cur_seq->unfinished = z_address;
4577+ if ((cmd & 0x00000003) == 2) {
4578+ DRM_ERROR("Attempt to place \
4579+ Z buffer in system memory\n");
4580+ return -EINVAL;
4581+ }
4582+ break;
4583+ case check_z_buffer_addr_mode:
4584+ cur_seq->unfinished = z_address;
4585+ if (((cmd & 0x00000060) >> 5) == 2) {
4586+ DRM_ERROR("Attempt to place \
4587+ stencil buffer in system memory\n");
4588+ return -EINVAL;
4589+ }
4590+ break;
4591+ case check_zocclusion_addr0:
4592+ cur_seq->unfinished = zocclusion_address;
4593+ break;
4594+ case check_zocclusion_addr1:
4595+ cur_seq->unfinished = zocclusion_address;
4596+ if (((cmd & 0x00c00000) >> 22) == 2) {
4597+ DRM_ERROR("Attempt to access system memory\n");
4598+ return -EINVAL;
4599+ }
4600+ break;
4601+ case check_coarse_z_addr0:
4602+ cur_seq->unfinished = coarse_z_address;
4603+ if (((cmd & 0x00300000) >> 20) == 2)
4604+ return -EINVAL;
4605+ break;
4606+ case check_coarse_z_addr1:
4607+ cur_seq->unfinished = coarse_z_address;
4608+ break;
4609+ case check_fvf_addr_mode:
4610+ cur_seq->unfinished = fvf_address;
4611+ if (!cur_seq->agp)
4612+ if (((cmd & 0x0000c000) >> 14) == 2) {
4613+ DRM_ERROR("Attempt to place \
4614+ fvf buffer in system memory\n");
4615+ return -EINVAL;
4616+ }
4617+ break;
4618+ case check_t_level0_facen_addr0:
4619+ cur_seq->unfinished = tex_address;
4620+ if (!cur_seq->agp)
4621+ if ((cmd & 0x00000003) == 2 ||
4622+ ((cmd & 0x0000000c) >> 2) == 2 ||
4623+ ((cmd & 0x00000030) >> 4) == 2 ||
4624+ ((cmd & 0x000000c0) >> 6) == 2 ||
4625+ ((cmd & 0x0000c000) >> 14) == 2 ||
4626+ ((cmd & 0x00030000) >> 16) == 2) {
4627+ DRM_ERROR("Attempt to place \
4628+ texture buffer in system memory\n");
4629+ return -EINVAL;
4630+ }
4631+ break;
4632+ case check_fence_cmd_addr0:
4633+ cur_seq->unfinished = fence_cmd_address;
4634+ if (cur_seq->agp)
4635+ cur_seq->fence_cmd_addr =
4636+ (cur_seq->fence_cmd_addr & 0xFF000000) |
4637+ (cmd & 0x00FFFFFF);
4638+ else
4639+ cur_seq->fence_cmd_addr =
4640+ (cur_seq->fence_cmd_addr & 0x00FFFFFF) |
4641+ ((cmd & 0x000000FF) << 24);
4642+ break;
4643+ case check_fence_cmd_addr1:
4644+ cur_seq->unfinished = fence_cmd_address;
4645+ if (!cur_seq->agp)
4646+ cur_seq->fence_cmd_addr =
4647+ (cur_seq->fence_cmd_addr & 0xFF000000) |
4648+ (cmd & 0x00FFFFFF);
4649+ break;
4650+ case check_fence_cmd_addr2:
4651+ cur_seq->unfinished = fence_cmd_address;
4652+ if (cmd & 0x00040000)
4653+ cur_seq->fence_need_check = 1;
4654+ else
4655+ cur_seq->fence_need_check = 0;
4656+ break;
4657+ default:
4658+ /*We think all the other cmd are safe.*/
4659+ return 0;
4660+ }
4661+ return 0;
4662+}
4663+
4664+static inline int verify_mmio_address(uint32_t address)
4665+{
4666+ if ((address > 0x3FF) && (address < 0xC00)) {
4667+ DRM_ERROR("Invalid VIDEO DMA command. "
4668+ "Attempt to access 3D- or command burst area.\n");
4669+ return 1;
4670+ } else if ((address > 0xDFF) && (address < 0x1200)) {
4671+ DRM_ERROR("Invalid VIDEO DMA command. "
4672+ "Attempt to access PCI DMA area.\n");
4673+ return 1;
4674+ } else if ((address > 0x1DFF) && (address < 0x2200)) {
4675+ DRM_ERROR("Invalid VIDEO DMA command. "
4676+ "Attempt to access CBU ROTATE SPACE registers.\n");
4677+ return 1;
4678+ } else if ((address > 0x23FF) && (address < 0x3200)) {
4679+ DRM_ERROR("Invalid VIDEO DMA command. "
4680+ "Attempt to access PCI DMA2 area..\n");
4681+ return 1;
4682+ } else if (address > 0x33FF) {
4683+ DRM_ERROR("Invalid VIDEO DMA command. "
4684+ "Attempt to access VGA registers.\n");
4685+ return 1;
4686+ }
4687+ return 0;
4688+}
4689+
4690+static inline int is_dummy_cmd(uint32_t cmd)
4691+{
4692+ if ((cmd & INV_DUMMY_MASK) == 0xCC000000 ||
4693+ (cmd & INV_DUMMY_MASK) == 0xCD000000 ||
4694+ (cmd & INV_DUMMY_MASK) == 0xCE000000 ||
4695+ (cmd & INV_DUMMY_MASK) == 0xCF000000 ||
4696+ (cmd & INV_DUMMY_MASK) == 0xDD000000)
4697+ return 1;
4698+ return 0;
4699+}
4700+
4701+static inline int
4702+verify_2d_tail(uint32_t const **buffer, const uint32_t *buf_end,
4703+ uint32_t dwords)
4704+{
4705+ const uint32_t *buf = *buffer;
4706+
4707+ if (buf_end - buf < dwords) {
4708+ DRM_ERROR("Illegal termination of 2d command.\n");
4709+ return 1;
4710+ }
4711+
4712+ while (dwords--) {
4713+ if (!is_dummy_cmd(*buf++)) {
4714+ DRM_ERROR("Illegal 2d command tail.\n");
4715+ return 1;
4716+ }
4717+ }
4718+
4719+ *buffer = buf;
4720+ return 0;
4721+}
4722+
4723+static inline int
4724+verify_video_tail(uint32_t const **buffer, const uint32_t *buf_end,
4725+ uint32_t dwords)
4726+{
4727+ const uint32_t *buf = *buffer;
4728+
4729+ if (buf_end - buf < dwords) {
4730+ DRM_ERROR("Illegal termination of video command.\n");
4731+ return 1;
4732+ }
4733+ while (dwords--) {
4734+ if (*buf && !is_dummy_cmd(*buf)) {
4735+ DRM_ERROR("Illegal video command tail.\n");
4736+ return 1;
4737+ }
4738+ buf++;
4739+ }
4740+ *buffer = buf;
4741+ return 0;
4742+}
4743+
4744+static inline enum verifier_state
4745+via_chrome9_check_header0(uint32_t const **buffer, const uint32_t *buf_end)
4746+{
4747+ const uint32_t *buf = *buffer;
4748+ uint32_t cmd, qword, dword;
4749+
4750+ qword = *(buf+1);
4751+ buf += 4;
4752+ dword = qword << 1;
4753+
4754+ if (buf_end - buf < dword)
4755+ return state_error;
4756+
4757+ while (qword-- > 0) {
4758+ cmd = *buf;
4759+ /* Is this consition too restrict? */
4760+ if ((cmd & 0xFFFF) > 0x1FF) {
4761+ DRM_ERROR("Invalid header0 command io address 0x%x \
4762+ Attempt to access non-2D mmio area.\n", cmd);
4763+ return state_error;
4764+ }
4765+ buf += 2;
4766+ }
4767+
4768+ if ((dword & 3) && verify_2d_tail(&buf, buf_end, 4 - (dword & 0x3)))
4769+ return state_error;
4770+
4771+ *buffer = buf;
4772+ return state_command;
4773+}
4774+
4775+static inline enum verifier_state
4776+via_chrome9_check_header1(uint32_t const **buffer, const uint32_t *buf_end)
4777+{
4778+ uint32_t dword;
4779+ const uint32_t *buf = *buffer;
4780+
4781+ dword = *(buf + 1);
4782+ buf += 4;
4783+
4784+ if (buf + dword > buf_end)
4785+ return state_error;
4786+
4787+ buf += dword;
4788+
4789+ if ((dword & 0x3) && verify_2d_tail(&buf, buf_end, 4 - (dword & 0x3)))
4790+ return state_error;
4791+
4792+ *buffer = buf;
4793+ return state_command;
4794+}
4795+
4796+static inline enum verifier_state
4797+via_chrome9_check_header2(uint32_t const **buffer,
4798+ const uint32_t *buf_end, struct drm_via_chrome9_state *hc_state)
4799+{
4800+ uint32_t cmd1, cmd2;
4801+ enum hazard hz;
4802+ const uint32_t *buf = *buffer;
4803+ const enum hazard *hz_table;
4804+
4805+ if ((buf_end - buf) < 4) {
4806+ DRM_ERROR
4807+ ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
4808+ return state_error;
4809+ }
4810+ cmd1 = *buf & 0x0000FFFF;
4811+ cmd2 = *++buf & 0x0000FFFF;
4812+ if (((cmd1 != INV_REG_CR_BEGIN) && (cmd1 != INV_REG_3D_BEGIN)) ||
4813+ ((cmd2 != INV_REG_CR_TRANS) && (cmd2 != INV_REG_3D_TRANS))) {
4814+ DRM_ERROR
4815+ ("Illegal IO address of DMA HALCYON_HEADER2 sequence.\n");
4816+ return state_error;
4817+ }
4818+ /* Advance to get paratype and subparatype */
4819+ cmd1 = *++buf & 0xFFFF0000;
4820+
4821+ switch (cmd1) {
4822+ case INV_ParaType_Attr:
4823+ buf += 2;
4824+ hz_table = init_table_01_00;
4825+ break;
4826+ case (INV_ParaType_Tex | (INV_SubType_Tex0 << 24)):
4827+ case (INV_ParaType_Tex | (INV_SubType_Tex1 << 24)):
4828+ case (INV_ParaType_Tex | (INV_SubType_Tex2 << 24)):
4829+ case (INV_ParaType_Tex | (INV_SubType_Tex3 << 24)):
4830+ case (INV_ParaType_Tex | (INV_SubType_Tex4 << 24)):
4831+ case (INV_ParaType_Tex | (INV_SubType_Tex5 << 24)):
4832+ case (INV_ParaType_Tex | (INV_SubType_Tex6 << 24)):
4833+ case (INV_ParaType_Tex | (INV_SubType_Tex7 << 24)):
4834+ buf += 2;
4835+ hc_state->texture_index = (cmd1 & INV_ParaSubType_MASK) >> 24;
4836+ hz_table = init_table_02_0n;
4837+ break;
4838+ case INV_ParaType_FVF:
4839+ buf += 2;
4840+ hz_table = init_table_04_00;
4841+ break;
4842+ case INV_ParaType_CR:
4843+ buf += 2;
4844+ if (hc_state->agp)
4845+ hz_table = init_table_11_364;
4846+ else
4847+ hz_table = init_table_11_353;
4848+ break;
4849+ case INV_ParaType_Dummy:
4850+ buf += 2;
4851+ while ((buf < buf_end) && !is_agp_header(*buf))
4852+ if (!is_dummy_cmd(*buf))
4853+ return state_error;
4854+ else
4855+ buf++;
4856+
4857+ if ((buf_end > buf) && ((buf_end - buf) & 0x3))
4858+ return state_error;
4859+ return state_command;
4860+ /* We think cases below are all safe. So we feedback only when these
4861+ these cmd has another header there.
4862+ */
4863+ case INV_ParaType_Vdata:
4864+ case (INV_ParaType_Tex |
4865+ ((INV_SubType_Tex0 | INV_SubType_TexSample) << 24)):
4866+ case (INV_ParaType_Tex |
4867+ ((INV_SubType_Tex1 | INV_SubType_TexSample) << 24)):
4868+ case (INV_ParaType_Tex |
4869+ ((INV_SubType_Tex2 | INV_SubType_TexSample) << 24)):
4870+ case (INV_ParaType_Tex |
4871+ ((INV_SubType_Tex3 | INV_SubType_TexSample) << 24)):
4872+ case (INV_ParaType_Tex |
4873+ ((INV_SubType_Tex4 | INV_SubType_TexSample) << 24)):
4874+ case (INV_ParaType_Tex |
4875+ ((INV_SubType_Tex5 | INV_SubType_TexSample) << 24)):
4876+ case (INV_ParaType_Tex |
4877+ ((INV_SubType_Tex6 | INV_SubType_TexSample) << 24)):
4878+ case (INV_ParaType_Tex |
4879+ ((INV_SubType_Tex7 | INV_SubType_TexSample) << 24)):
4880+ case (INV_ParaType_Tex | (INV_SubType_General << 24)):
4881+ case INV_ParaType_Pal:
4882+ case INV_ParaType_PreCR:
4883+ case INV_ParaType_Cfg:
4884+ default:
4885+ buf += 2;
4886+ while ((buf < buf_end) && !is_agp_header(*buf))
4887+ buf++;
4888+ *buffer = buf;
4889+ return state_command;
4890+ }
4891+
4892+ while (buf < buf_end && !is_agp_header(*buf)) {
4893+ cmd1 = *buf++;
4894+ hz = hz_table[cmd1 >> 24];
4895+ if (hz) {
4896+ if (investigate_hazard(cmd1, hz, hc_state))
4897+ return state_error;
4898+ } else if (hc_state->unfinished &&
4899+ finish_current_sequence(hc_state))
4900+ return state_error;
4901+
4902+ }
4903+ if (hc_state->unfinished && finish_current_sequence(hc_state))
4904+ return state_error;
4905+ *buffer = buf;
4906+ return state_command;
4907+}
4908+
4909+static inline enum verifier_state
4910+via_chrome9_check_header3(uint32_t const **buffer,
4911+ const uint32_t *buf_end)
4912+{
4913+ const uint32_t *buf = *buffer;
4914+
4915+ buf += 4;
4916+ while (buf < buf_end && !is_agp_header(*buf))
4917+ buf += 4;
4918+
4919+ *buffer = buf;
4920+ return state_command;
4921+}
4922+
4923+
4924+static inline enum verifier_state
4925+via_chrome9_check_vheader4(uint32_t const **buffer,
4926+ const uint32_t *buf_end)
4927+{
4928+ uint32_t data;
4929+ const uint32_t *buf = *buffer;
4930+
4931+ if (buf_end - buf < 4) {
4932+ DRM_ERROR("Illegal termination of video header4 command\n");
4933+ return state_error;
4934+ }
4935+
4936+ data = *buf++ & ~INV_AGPHeader_MASK;
4937+ if (verify_mmio_address(data))
4938+ return state_error;
4939+
4940+ data = *buf;
4941+ buf += 2;
4942+
4943+ if (*buf++ != 0x00000000) {
4944+ DRM_ERROR("Illegal header4 header data\n");
4945+ return state_error;
4946+ }
4947+
4948+ if (buf_end - buf < data)
4949+ return state_error;
4950+ buf += data;
4951+
4952+ if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
4953+ return state_error;
4954+ *buffer = buf;
4955+ return state_command;
4956+
4957+}
4958+
4959+static inline enum verifier_state
4960+via_chrome9_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
4961+{
4962+ uint32_t data;
4963+ const uint32_t *buf = *buffer;
4964+ uint32_t i;
4965+
4966+ if (buf_end - buf < 4) {
4967+ DRM_ERROR("Illegal termination of video header5 command\n");
4968+ return state_error;
4969+ }
4970+
4971+ data = *++buf;
4972+ buf += 2;
4973+
4974+ if (*buf++ != 0x00000000) {
4975+ DRM_ERROR("Illegal header5 header data\n");
4976+ return state_error;
4977+ }
4978+ if ((buf_end - buf) < (data << 1)) {
4979+ DRM_ERROR("Illegal termination of video header5 command\n");
4980+ return state_error;
4981+ }
4982+ for (i = 0; i < data; ++i) {
4983+ if (verify_mmio_address(*buf++))
4984+ return state_error;
4985+ buf++;
4986+ }
4987+ data <<= 1;
4988+ if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
4989+ return state_error;
4990+ *buffer = buf;
4991+ return state_command;
4992+}
4993+
4994+int
4995+via_chrome9_verify_command_stream(const uint32_t *buf,
4996+ unsigned int size, struct drm_device *dev, int agp)
4997+{
4998+
4999+ struct drm_via_chrome9_private *dev_priv =
5000+ (struct drm_via_chrome9_private *) dev->dev_private;
5001+ struct drm_via_chrome9_state *hc_state = &dev_priv->hc_state;
5002+ struct drm_via_chrome9_state saved_state = *hc_state;
5003+ uint32_t cmd;
5004+ const uint32_t *buf_end = buf + (size >> 2);
5005+ enum verifier_state state = state_command;
5006+
5007+ hc_state->dev = dev;
5008+ hc_state->unfinished = no_sequence;
5009+ hc_state->agp = agp;
5010+
5011+ while (buf < buf_end) {
5012+
5013+ switch (state) {
5014+ case state_header0:
5015+ state = via_chrome9_check_header0(&buf, buf_end);
5016+ break;
5017+ case state_header1:
5018+ state = via_chrome9_check_header1(&buf, buf_end);
5019+ break;
5020+ case state_header2:
5021+ state = via_chrome9_check_header2(&buf,
5022+ buf_end, hc_state);
5023+ break;
5024+ case state_header3:
5025+ state = via_chrome9_check_header3(&buf, buf_end);
5026+ break;
5027+ case state_header4:
5028+ state = via_chrome9_check_vheader4(&buf, buf_end);
5029+ break;
5030+ case state_header5:
5031+ state = via_chrome9_check_vheader5(&buf, buf_end);
5032+ break;
5033+ case state_header6:
5034+ case state_header7:
5035+ DRM_ERROR("Unimplemented Header 6/7 command.\n");
5036+ state = state_error;
5037+ break;
5038+ case state_command:
5039+ cmd = *buf;
5040+ if (INV_AGPHeader2 == (cmd & INV_AGPHeader_MASK))
5041+ state = state_header2;
5042+ else if (INV_AGPHeader1 == (cmd & INV_AGPHeader_MASK))
5043+ state = state_header1;
5044+ else if (INV_AGPHeader5 == (cmd & INV_AGPHeader_MASK))
5045+ state = state_header5;
5046+ else if (INV_AGPHeader6 == (cmd & INV_AGPHeader_MASK))
5047+ state = state_header6;
5048+ else if (INV_AGPHeader3 == (cmd & INV_AGPHeader_MASK))
5049+ state = state_header3;
5050+ else if (INV_AGPHeader4 == (cmd & INV_AGPHeader_MASK))
5051+ state = state_header4;
5052+ else if (INV_AGPHeader7 == (cmd & INV_AGPHeader_MASK))
5053+ state = state_header7;
5054+ else if (INV_AGPHeader0 == (cmd & INV_AGPHeader_MASK))
5055+ state = state_header0;
5056+ else {
5057+ DRM_ERROR("Invalid command sequence\n");
5058+ state = state_error;
5059+ }
5060+ break;
5061+ case state_error:
5062+ default:
5063+ *hc_state = saved_state;
5064+ return -EINVAL;
5065+ }
5066+ }
5067+ if (state == state_error) {
5068+ *hc_state = saved_state;
5069+ return -EINVAL;
5070+ }
5071+ return 0;
5072+}
5073+
5074+
5075+static void
5076+setup_hazard_table(struct hz_init init_table[],
5077+enum hazard table[], int size)
5078+{
5079+ int i;
5080+
5081+ for (i = 0; i < 256; ++i)
5082+ table[i] = forbidden_command;
5083+
5084+ for (i = 0; i < size; ++i)
5085+ table[init_table[i].code] = init_table[i].hz;
5086+}
5087+
5088+void via_chrome9_init_command_verifier(void)
5089+{
5090+ setup_hazard_table(init_table1, init_table_01_00,
5091+ sizeof(init_table1) / sizeof(struct hz_init));
5092+ setup_hazard_table(init_table2, init_table_02_0n,
5093+ sizeof(init_table2) / sizeof(struct hz_init));
5094+ setup_hazard_table(init_table3, init_table_04_00,
5095+ sizeof(init_table3) / sizeof(struct hz_init));
5096+ setup_hazard_table(init_table4, init_table_11_364,
5097+ sizeof(init_table4) / sizeof(struct hz_init));
5098+ setup_hazard_table(init_table5, init_table_11_353,
5099+ sizeof(init_table5) / sizeof(struct hz_init));
5100+}
5101+
5102+#endif
5103--- /dev/null
5104+++ b/drivers/gpu/drm/via_chrome9/via_chrome9_verifier.h
5105@@ -0,0 +1,61 @@
5106+/*
5107+* Copyright 2004 The Unichrome Project. All Rights Reserved.
5108+*
5109+* Permission is hereby granted, free of charge, to any person obtaining a
5110+* copy of this software and associated documentation files (the "Software"),
5111+* to deal in the Software without restriction, including without limitation
5112+* the rights to use, copy, modify, merge, publish, distribute, sub license,
5113+* and/or sell copies of the Software, and to permit persons to whom the
5114+* Software is furnished to do so, subject to the following conditions:
5115+*
5116+* The above copyright notice and this permission notice (including the
5117+* next paragraph) shall be included in all copies or substantial portions
5118+* of the Software.
5119+*
5120+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
5121+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
5122+* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
5123+* THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
5124+* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
5125+* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
5126+* THE USE OR OTHER DEALINGS IN THE SOFTWARE.
5127+*
5128+* Author: Scott Fang 2008.
5129+*/
5130+
5131+#ifndef _via_chrome9_VERIFIER_H_
5132+#define _via_chrome9_VERIFIER_H_
5133+
5134+#define VIA_CHROME9_VERIFY_ENABLE 1
5135+
5136+enum drm_via_chrome9_sequence {
5137+ no_sequence = 0,
5138+ z_address,
5139+ dest_address,
5140+ tex_address,
5141+ zocclusion_address,
5142+ coarse_z_address,
5143+ fvf_address,
5144+ fence_cmd_address
5145+};
5146+
5147+struct drm_via_chrome9_state {
5148+ uint32_t texture_index;
5149+ uint32_t render_target_addr[4];
5150+ uint32_t render_target_pitch[4];
5151+ uint32_t vb_addr;
5152+ uint32_t fence_cmd_addr;
5153+ uint32_t fence_need_check;
5154+ enum drm_via_chrome9_sequence unfinished;
5155+ int agp_texture;
5156+ int multitex;
5157+ struct drm_device *dev;
5158+ int agp;
5159+ const uint32_t *buf_start;
5160+};
5161+
5162+extern int via_chrome9_verify_command_stream(const uint32_t *buf,
5163+ unsigned int size, struct drm_device *dev, int agp);
5164+void via_chrome9_init_command_verifier(void);
5165+
5166+#endif