]> git.ipfire.org Git - people/ms/u-boot.git/blob - board/MAI/bios_emulator/scitech/src/pm/common/mtrr.c
USB: This patch fix readl in ohci swap reg access.
[people/ms/u-boot.git] / board / MAI / bios_emulator / scitech / src / pm / common / mtrr.c
1 /****************************************************************************
2 *
3 * SciTech OS Portability Manager Library
4 *
5 * ========================================================================
6 *
7 * The contents of this file are subject to the SciTech MGL Public
8 * License Version 1.0 (the "License"); you may not use this file
9 * except in compliance with the License. You may obtain a copy of
10 * the License at http://www.scitechsoft.com/mgl-license.txt
11 *
12 * Software distributed under the License is distributed on an
13 * "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
14 * implied. See the License for the specific language governing
15 * rights and limitations under the License.
16 *
17 * The Original Code is Copyright (C) 1991-1998 SciTech Software, Inc.
18 *
19 * The Initial Developer of the Original Code is SciTech Software, Inc.
20 * All Rights Reserved.
21 *
22 * ========================================================================
23 *
24 * Heavily based on code copyright (C) Richard Gooch
25 *
26 * Language: ANSI C
27 * Environment: 32-bit Ring 0 device driver
28 *
29 * Description: Generic Memory Type Range Register (MTRR) functions to
30 * manipulate the MTRR registers on supported CPU's. This code
31 * *must* run at ring 0, so you can't normally include this
32 * code directly in normal applications (the except is DOS4GW
33 * apps which run at ring 0 under real DOS). Thus this code
34 * will normally be compiled into a ring 0 device driver for
35 * the target operating system.
36 *
37 ****************************************************************************/
38
39 #include "pmapi.h"
40 #include "ztimerc.h"
41 #include "mtrr.h"
42
43 #ifndef REALMODE
44
45 /*--------------------------- Global variables ----------------------------*/
46
47 /* Intel pre-defined MTRR registers */
48
49 #define NUM_FIXED_RANGES 88
50 #define INTEL_cap_MSR 0x0FE
51 #define INTEL_defType_MSR 0x2FF
52 #define INTEL_fix64K_00000_MSR 0x250
53 #define INTEL_fix16K_80000_MSR 0x258
54 #define INTEL_fix16K_A0000_MSR 0x259
55 #define INTEL_fix4K_C0000_MSR 0x268
56 #define INTEL_fix4K_C8000_MSR 0x269
57 #define INTEL_fix4K_D0000_MSR 0x26A
58 #define INTEL_fix4K_D8000_MSR 0x26B
59 #define INTEL_fix4K_E0000_MSR 0x26C
60 #define INTEL_fix4K_E8000_MSR 0x26D
61 #define INTEL_fix4K_F0000_MSR 0x26E
62 #define INTEL_fix4K_F8000_MSR 0x26F
63
64 /* Macros to find the address of a paricular MSR register */
65
66 #define INTEL_physBase_MSR(reg) (0x200 + 2 * (reg))
67 #define INTEL_physMask_MSR(reg) (0x200 + 2 * (reg) + 1)
68
69 /* Cyrix CPU configuration register indexes */
70 #define CX86_CCR0 0xC0
71 #define CX86_CCR1 0xC1
72 #define CX86_CCR2 0xC2
73 #define CX86_CCR3 0xC3
74 #define CX86_CCR4 0xE8
75 #define CX86_CCR5 0xE9
76 #define CX86_CCR6 0xEA
77 #define CX86_DIR0 0xFE
78 #define CX86_DIR1 0xFF
79 #define CX86_ARR_BASE 0xC4
80 #define CX86_RCR_BASE 0xDC
81
82 /* Structure to maintain machine state while updating MTRR registers */
83
84 typedef struct {
85 ulong flags;
86 ulong defTypeLo;
87 ulong defTypeHi;
88 ulong cr4Val;
89 ulong ccr3;
90 } MTRRContext;
91
92 static int numMTRR = -1;
93 static int cpuFamily,cpuType,cpuStepping;
94 static void (*getMTRR)(uint reg,ulong *base,ulong *size,int *type) = NULL;
95 static void (*setMTRR)(uint reg,ulong base,ulong size,int type) = NULL;
96 static int (*getFreeRegion)(ulong base,ulong size) = NULL;
97
98 /*----------------------------- Implementation ----------------------------*/
99
100 /****************************************************************************
101 RETURNS:
102 Returns non-zero if we have the write-combining memory type
103 ****************************************************************************/
104 static int MTRR_haveWriteCombine(void)
105 {
106 ulong config,dummy;
107
108 switch (cpuFamily) {
109 case CPU_AMD:
110 if (cpuType < CPU_AMDAthlon) {
111 /* AMD K6-2 stepping 8 and later support the MTRR registers.
112 * The earlier K6-2 steppings (300Mhz models) do not
113 * support MTRR's.
114 */
115 if ((cpuType < CPU_AMDK6_2) || (cpuType == CPU_AMDK6_2 && cpuStepping < 8))
116 return 0;
117 return 1;
118 }
119 /* Fall through for AMD Athlon which uses P6 style MTRR's */
120 case CPU_Intel:
121 _MTRR_readMSR(INTEL_cap_MSR,&config,&dummy);
122 return (config & (1 << 10));
123 case CPU_Cyrix:
124 /* Cyrix 6x86 and later support the MTRR registers */
125 if (cpuType < CPU_Cyrix6x86)
126 return 0;
127 return 1;
128 }
129 return 0;
130 }
131
132 /****************************************************************************
133 PARAMETERS:
134 base - The starting physical base address of the region
135 size - The size in bytes of the region
136
137 RETURNS:
138 The index of the region on success, else -1 on error.
139
140 REMARKS:
141 Generic function to find the location of a free MTRR register to be used
142 for creating a new mapping.
143 ****************************************************************************/
144 static int GENERIC_getFreeRegion(
145 ulong base,
146 ulong size)
147 {
148 int i,ltype;
149 ulong lbase,lsize;
150
151 for (i = 0; i < numMTRR; i++) {
152 getMTRR(i,&lbase,&lsize,&ltype);
153 if (lsize < 1)
154 return i;
155 }
156 (void)base;
157 (void)size;
158 return -1;
159 }
160
161 /****************************************************************************
162 PARAMETERS:
163 base - The starting physical base address of the region
164 size - The size in bytes of the region
165
166 RETURNS:
167 The index of the region on success, else -1 on error.
168
169 REMARKS:
170 Generic function to find the location of a free MTRR register to be used
171 for creating a new mapping.
172 ****************************************************************************/
173 static int AMDK6_getFreeRegion(
174 ulong base,
175 ulong size)
176 {
177 int i,ltype;
178 ulong lbase,lsize;
179
180 for (i = 0; i < numMTRR; i++) {
181 getMTRR(i,&lbase,&lsize,&ltype);
182 if (lsize < 1)
183 return i;
184 }
185 (void)base;
186 (void)size;
187 return -1;
188 }
189
190 /****************************************************************************
191 PARAMETERS:
192 base - The starting physical base address of the region
193 size - The size in bytes of the region
194
195 RETURNS:
196 The index of the region on success, else -1 on error.
197
198 REMARKS:
199 Cyrix specific function to find the location of a free MTRR register to be
200 used for creating a new mapping.
201 ****************************************************************************/
202 static int CYRIX_getFreeRegion(
203 ulong base,
204 ulong size)
205 {
206 int i,ltype;
207 ulong lbase, lsize;
208
209 if (size > 0x2000000UL) {
210 /* If we are to set up a region >32M then look at ARR7 immediately */
211 getMTRR(7,&lbase,&lsize,&ltype);
212 if (lsize < 1)
213 return 7;
214 }
215 else {
216 /* Check ARR0-6 registers */
217 for (i = 0; i < 7; i++) {
218 getMTRR(i,&lbase,&lsize,&ltype);
219 if (lsize < 1)
220 return i;
221 }
222 /* Try ARR7 but its size must be at least 256K */
223 getMTRR(7,&lbase,&lsize,&ltype);
224 if ((lsize < 1) && (size >= 0x40000))
225 return i;
226 }
227 (void)base;
228 return -1;
229 }
230
231 /****************************************************************************
232 PARAMETERS:
233 c - Place to store the machine context across the call
234
235 REMARKS:
236 Puts the processor into a state where MTRRs can be safely updated
237 ****************************************************************************/
238 static void MTRR_beginUpdate(
239 MTRRContext *c)
240 {
241 c->flags = _MTRR_disableInt();
242 if (cpuFamily != CPU_AMD || (cpuFamily == CPU_AMD && cpuType >= CPU_AMDAthlon)) {
243 switch (cpuFamily) {
244 case CPU_Intel:
245 case CPU_AMD:
246 /* Disable MTRRs, and set the default type to uncached */
247 c->cr4Val = _MTRR_saveCR4();
248 _MTRR_readMSR(INTEL_defType_MSR,&c->defTypeLo,&c->defTypeHi);
249 _MTRR_writeMSR(INTEL_defType_MSR,c->defTypeLo & 0xF300UL,c->defTypeHi);
250 break;
251 case CPU_Cyrix:
252 c->ccr3 = _MTRR_getCx86(CX86_CCR3);
253 _MTRR_setCx86(CX86_CCR3, (uchar)((c->ccr3 & 0x0F) | 0x10));
254 break;
255 }
256 }
257 }
258
259 /****************************************************************************
260 PARAMETERS:
261 c - Place to restore the machine context from
262
263 REMARKS:
264 Restores the processor after updating any of the registers
265 ****************************************************************************/
266 static void MTRR_endUpdate(
267 MTRRContext *c)
268 {
269 if (cpuFamily != CPU_AMD || (cpuFamily == CPU_AMD && cpuType >= CPU_AMDAthlon)) {
270 PM_flushTLB();
271 switch (cpuFamily) {
272 case CPU_Intel:
273 case CPU_AMD:
274 _MTRR_writeMSR(INTEL_defType_MSR,c->defTypeLo,c->defTypeHi);
275 _MTRR_restoreCR4(c->cr4Val);
276 break;
277 case CPU_Cyrix:
278 _MTRR_setCx86(CX86_CCR3,(uchar)c->ccr3);
279 break;
280 }
281 }
282
283 /* Re-enable interrupts (if enabled previously) */
284 _MTRR_restoreInt(c->flags);
285 }
286
287 /****************************************************************************
288 PARAMETERS:
289 reg - MTRR register to read
290 base - Place to store the starting physical base address of the region
291 size - Place to store the size in bytes of the region
292 type - Place to store the type of the MTRR register
293
294 REMARKS:
295 Intel specific function to read the value of a specific MTRR register.
296 ****************************************************************************/
297 static void INTEL_getMTRR(
298 uint reg,
299 ulong *base,
300 ulong *size,
301 int *type)
302 {
303 ulong hi,maskLo,baseLo;
304
305 _MTRR_readMSR(INTEL_physMask_MSR(reg),&maskLo,&hi);
306 if ((maskLo & 0x800) == 0) {
307 /* MTRR is disabled, so it is free */
308 *base = 0;
309 *size = 0;
310 *type = 0;
311 return;
312 }
313 _MTRR_readMSR(INTEL_physBase_MSR(reg),&baseLo,&hi);
314 maskLo = (maskLo & 0xFFFFF000UL);
315 *size = ~(maskLo - 1);
316 *base = (baseLo & 0xFFFFF000UL);
317 *type = (baseLo & 0xFF);
318 }
319
320 /****************************************************************************
321 PARAMETERS:
322 reg - MTRR register to set
323 base - The starting physical base address of the region
324 size - The size in bytes of the region
325 type - Type to place into the MTRR register
326
327 REMARKS:
328 Intel specific function to set the value of a specific MTRR register to
329 the passed in base, size and type.
330 ****************************************************************************/
331 static void INTEL_setMTRR(
332 uint reg,
333 ulong base,
334 ulong size,
335 int type)
336 {
337 MTRRContext c;
338
339 MTRR_beginUpdate(&c);
340 if (size == 0) {
341 /* The invalid bit is kept in the mask, so we simply clear the
342 * relevant mask register to disable a range.
343 */
344 _MTRR_writeMSR(INTEL_physMask_MSR(reg),0,0);
345 }
346 else {
347 _MTRR_writeMSR(INTEL_physBase_MSR(reg),base | type,0);
348 _MTRR_writeMSR(INTEL_physMask_MSR(reg),~(size - 1) | 0x800,0);
349 }
350 MTRR_endUpdate(&c);
351 }
352
353 /****************************************************************************
354 REMARKS:
355 Disabled banked write combing for Intel processors. We always disable this
356 because it invariably causes problems with older hardware.
357 ****************************************************************************/
358 static void INTEL_disableBankedWriteCombine(void)
359 {
360 MTRRContext c;
361
362 MTRR_beginUpdate(&c);
363 _MTRR_writeMSR(INTEL_fix16K_A0000_MSR,0,0);
364 MTRR_endUpdate(&c);
365 }
366
367 /****************************************************************************
368 PARAMETERS:
369 reg - MTRR register to set
370 base - The starting physical base address of the region
371 size - The size in bytes of the region
372 type - Type to place into the MTRR register
373
374 REMARKS:
375 Intel specific function to set the value of a specific MTRR register to
376 the passed in base, size and type.
377 ****************************************************************************/
378 static void AMD_getMTRR(
379 uint reg,
380 ulong *base,
381 ulong *size,
382 int *type)
383 {
384 ulong low,high;
385
386 /* Upper dword is region 1, lower is region 0 */
387 _MTRR_readMSR(0xC0000085, &low, &high);
388 if (reg == 1)
389 low = high;
390
391 /* Find the base and type for the region */
392 *base = low & 0xFFFE0000;
393 *type = 0;
394 if (low & 1)
395 *type = PM_MTRR_UNCACHABLE;
396 if (low & 2)
397 *type = PM_MTRR_WRCOMB;
398 if ((low & 3) == 0) {
399 *size = 0;
400 return;
401 }
402
403 /* This needs a little explaining. The size is stored as an
404 * inverted mask of bits of 128K granularity 15 bits long offset
405 * 2 bits
406 *
407 * So to get a size we do invert the mask and add 1 to the lowest
408 * mask bit (4 as its 2 bits in). This gives us a size we then shift
409 * to turn into 128K blocks
410 *
411 * eg 111 1111 1111 1100 is 512K
412 *
413 * invert 000 0000 0000 0011
414 * +1 000 0000 0000 0100
415 * *128K ...
416 */
417 low = (~low) & 0x0FFFC;
418 *size = (low + 4) << 15;
419 }
420
421 /****************************************************************************
422 PARAMETERS:
423 reg - MTRR register to set
424 base - The starting physical base address of the region
425 size - The size in bytes of the region
426 type - Type to place into the MTRR register
427
428 REMARKS:
429 Intel specific function to set the value of a specific MTRR register to
430 the passed in base, size and type.
431 ****************************************************************************/
432 static void AMD_setMTRR(
433 uint reg,
434 ulong base,
435 ulong size,
436 int type)
437 {
438 ulong low,high,newVal;
439 MTRRContext c;
440
441 MTRR_beginUpdate(&c);
442 _MTRR_readMSR(0xC0000085, &low, &high);
443 if (size == 0) {
444 /* Clear register to disable */
445 if (reg)
446 high = 0;
447 else
448 low = 0;
449 }
450 else {
451 /* Set the register to the base (already shifted for us), the
452 * type (off by one) and an inverted bitmask of the size
453 * The size is the only odd bit. We are fed say 512K
454 * We invert this and we get 111 1111 1111 1011 but
455 * if you subtract one and invert you get the desired
456 * 111 1111 1111 1100 mask
457 */
458 newVal = (((~(size-1)) >> 15) & 0x0001FFFC) | base | (type+1);
459 if (reg)
460 high = newVal;
461 else
462 low = newVal;
463 }
464
465 /* The writeback rule is quite specific. See the manual. Its
466 * disable local interrupts, write back the cache, set the MTRR
467 */
468 PM_flushTLB();
469 _MTRR_writeMSR(0xC0000085, low, high);
470 MTRR_endUpdate(&c);
471 }
472
473 /****************************************************************************
474 PARAMETERS:
475 reg - MTRR register to set
476 base - The starting physical base address of the region
477 size - The size in bytes of the region
478 type - Type to place into the MTRR register
479
480 REMARKS:
481 Intel specific function to set the value of a specific MTRR register to
482 the passed in base, size and type.
483 ****************************************************************************/
484 static void CYRIX_getMTRR(
485 uint reg,
486 ulong *base,
487 ulong *size,
488 int *type)
489 {
490 MTRRContext c;
491 uchar arr = CX86_ARR_BASE + reg*3;
492 uchar rcr,shift;
493
494 /* Save flags and disable interrupts */
495 MTRR_beginUpdate(&c);
496 ((uchar*)base)[3] = _MTRR_getCx86(arr);
497 ((uchar*)base)[2] = _MTRR_getCx86((uchar)(arr+1));
498 ((uchar*)base)[1] = _MTRR_getCx86((uchar)(arr+2));
499 rcr = _MTRR_getCx86((uchar)(CX86_RCR_BASE + reg));
500 MTRR_endUpdate(&c);
501
502 /* Enable interrupts if it was enabled previously */
503 shift = ((uchar*)base)[1] & 0x0f;
504 *base &= 0xFFFFF000UL;
505
506 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
507 * Note: shift==0xF means 4G, this is unsupported.
508 */
509 if (shift)
510 *size = (reg < 7 ? 0x800UL : 0x20000UL) << shift;
511 else
512 *size = 0;
513
514 /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
515 if (reg < 7) {
516 switch (rcr) {
517 case 1: *type = PM_MTRR_UNCACHABLE; break;
518 case 8: *type = PM_MTRR_WRBACK; break;
519 case 9: *type = PM_MTRR_WRCOMB; break;
520 case 24:
521 default: *type = PM_MTRR_WRTHROUGH; break;
522 }
523 }
524 else {
525 switch (rcr) {
526 case 0: *type = PM_MTRR_UNCACHABLE; break;
527 case 8: *type = PM_MTRR_WRCOMB; break;
528 case 9: *type = PM_MTRR_WRBACK; break;
529 case 25:
530 default: *type = PM_MTRR_WRTHROUGH; break;
531 }
532 }
533 }
534
535 /****************************************************************************
536 PARAMETERS:
537 reg - MTRR register to set
538 base - The starting physical base address of the region
539 size - The size in bytes of the region
540 type - Type to place into the MTRR register
541
542 REMARKS:
543 Intel specific function to set the value of a specific MTRR register to
544 the passed in base, size and type.
545 ****************************************************************************/
546 static void CYRIX_setMTRR(
547 uint reg,
548 ulong base,
549 ulong size,
550 int type)
551 {
552 MTRRContext c;
553 uchar arr = CX86_ARR_BASE + reg*3;
554 uchar arr_type,arr_size;
555
556 /* Count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
557 size >>= (reg < 7 ? 12 : 18);
558 size &= 0x7FFF; /* Make sure arr_size <= 14 */
559 for (arr_size = 0; size; arr_size++, size >>= 1)
560 ;
561 if (reg < 7) {
562 switch (type) {
563 case PM_MTRR_UNCACHABLE: arr_type = 1; break;
564 case PM_MTRR_WRCOMB: arr_type = 9; break;
565 case PM_MTRR_WRTHROUGH: arr_type = 24; break;
566 default: arr_type = 8; break;
567 }
568 }
569 else {
570 switch (type) {
571 case PM_MTRR_UNCACHABLE: arr_type = 0; break;
572 case PM_MTRR_WRCOMB: arr_type = 8; break;
573 case PM_MTRR_WRTHROUGH: arr_type = 25; break;
574 default: arr_type = 9; break;
575 }
576 }
577 MTRR_beginUpdate(&c);
578 _MTRR_setCx86((uchar)arr, ((uchar*)&base)[3]);
579 _MTRR_setCx86((uchar)(arr+1), ((uchar*)&base)[2]);
580 _MTRR_setCx86((uchar)(arr+2), (uchar)((((uchar*)&base)[1]) | arr_size));
581 _MTRR_setCx86((uchar)(CX86_RCR_BASE + reg), (uchar)arr_type);
582 MTRR_endUpdate(&c);
583 }
584
585 /****************************************************************************
586 REMARKS:
587 On Cyrix 6x86(MX) and MII the ARR3 is special: it has connection
588 with the SMM (System Management Mode) mode. So we need the following:
589 Check whether SMI_LOCK (CCR3 bit 0) is set
590 if it is set, ARR3 cannot be changed (it cannot be changed until the
591 next processor reset)
592 if it is reset, then we can change it, set all the needed bits:
593 - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
594 - disable access to SMM memory (CCR1 bit 2 reset)
595 - disable SMM mode (CCR1 bit 1 reset)
596 - disable write protection of ARR3 (CCR6 bit 1 reset)
597 - (maybe) disable ARR3
598 Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
599 ****************************************************************************/
600 static void CYRIX_initARR(void)
601 {
602 MTRRContext c;
603 uchar ccr[7];
604 int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
605
606 /* Begin updating */
607 MTRR_beginUpdate(&c);
608
609 /* Save all CCRs locally */
610 ccr[0] = _MTRR_getCx86(CX86_CCR0);
611 ccr[1] = _MTRR_getCx86(CX86_CCR1);
612 ccr[2] = _MTRR_getCx86(CX86_CCR2);
613 ccr[3] = (uchar)c.ccr3;
614 ccr[4] = _MTRR_getCx86(CX86_CCR4);
615 ccr[5] = _MTRR_getCx86(CX86_CCR5);
616 ccr[6] = _MTRR_getCx86(CX86_CCR6);
617 if (ccr[3] & 1)
618 ccrc[3] = 1;
619 else {
620 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
621 * access to SMM memory through ARR3 (bit 7).
622 */
623 if (ccr[6] & 0x02) {
624 ccr[6] &= 0xFD;
625 ccrc[6] = 1; /* Disable write protection of ARR3. */
626 _MTRR_setCx86(CX86_CCR6,ccr[6]);
627 }
628 }
629
630 /* If we changed CCR1 in memory, change it in the processor, too. */
631 if (ccrc[1])
632 _MTRR_setCx86(CX86_CCR1,ccr[1]);
633
634 /* Enable ARR usage by the processor */
635 if (!(ccr[5] & 0x20)) {
636 ccr[5] |= 0x20;
637 ccrc[5] = 1;
638 _MTRR_setCx86(CX86_CCR5,ccr[5]);
639 }
640
641 /* We are finished updating */
642 MTRR_endUpdate(&c);
643 }
644
645 /****************************************************************************
646 REMARKS:
647 Initialise the MTRR module, by detecting the processor type and determining
648 if the processor supports the MTRR functionality.
649 ****************************************************************************/
650 void MTRR_init(void)
651 {
652 int i,cpu,ltype;
653 ulong eax,edx,lbase,lsize;
654
655 /* Check that we have a compatible CPU */
656 if (numMTRR == -1) {
657 numMTRR = 0;
658 if (!_MTRR_isRing0())
659 return;
660 cpu = CPU_getProcessorType();
661 cpuFamily = cpu & CPU_familyMask;
662 cpuType = cpu & CPU_mask;
663 cpuStepping = (cpu & CPU_steppingMask) >> CPU_steppingShift;
664 switch (cpuFamily) {
665 case CPU_Intel:
666 /* Intel Pentium Pro and later support the MTRR registers */
667 if (cpuType < CPU_PentiumPro)
668 return;
669 _MTRR_readMSR(INTEL_cap_MSR,&eax,&edx);
670 numMTRR = eax & 0xFF;
671 getMTRR = INTEL_getMTRR;
672 setMTRR = INTEL_setMTRR;
673 getFreeRegion = GENERIC_getFreeRegion;
674 INTEL_disableBankedWriteCombine();
675 break;
676 case CPU_AMD:
677 /* AMD K6-2 and later support the MTRR registers */
678 if ((cpuType < CPU_AMDK6_2) || (cpuType == CPU_AMDK6_2 && cpuStepping < 8))
679 return;
680 if (cpuType < CPU_AMDAthlon) {
681 numMTRR = 2; /* AMD CPU's have 2 MTRR's */
682 getMTRR = AMD_getMTRR;
683 setMTRR = AMD_setMTRR;
684 getFreeRegion = AMDK6_getFreeRegion;
685
686 /* For some reason some IBM systems with K6-2 processors
687 * have write combined enabled for the system BIOS
688 * region from 0xE0000 to 0xFFFFFF. We need *both* MTRR's
689 * for our own graphics drivers, so if we detect any
690 * regions below the 1Meg boundary, we remove them
691 * so we can use this MTRR register ourselves.
692 */
693 for (i = 0; i < numMTRR; i++) {
694 getMTRR(i,&lbase,&lsize,&ltype);
695 if (lbase < 0x100000)
696 setMTRR(i,0,0,0);
697 }
698 }
699 else {
700 /* AMD Athlon uses P6 style MTRR's */
701 _MTRR_readMSR(INTEL_cap_MSR,&eax,&edx);
702 numMTRR = eax & 0xFF;
703 getMTRR = INTEL_getMTRR;
704 setMTRR = INTEL_setMTRR;
705 getFreeRegion = GENERIC_getFreeRegion;
706 INTEL_disableBankedWriteCombine();
707 }
708 break;
709 case CPU_Cyrix:
710 /* Cyrix 6x86 and later support the MTRR registers */
711 if (cpuType < CPU_Cyrix6x86 || cpuType >= CPU_CyrixMediaGX)
712 return;
713 numMTRR = 8; /* Cyrix CPU's have 8 ARR's */
714 getMTRR = CYRIX_getMTRR;
715 setMTRR = CYRIX_setMTRR;
716 getFreeRegion = CYRIX_getFreeRegion;
717 CYRIX_initARR();
718 break;
719 default:
720 return;
721 }
722 }
723 }
724
725 /****************************************************************************
726 PARAMETERS:
727 base - The starting physical base address of the region
728 size - The size in bytes of the region
729 type - Type to place into the MTRR register
730
731 RETURNS:
732 Error code describing the result.
733
734 REMARKS:
735 Function to enable write combining for the specified region of memory.
736 ****************************************************************************/
737 int MTRR_enableWriteCombine(
738 ulong base,
739 ulong size,
740 uint type)
741 {
742 int i;
743 int ltype;
744 ulong lbase,lsize,last;
745
746 /* Check that we have a CPU that supports MTRR's and type is valid */
747 if (numMTRR <= 0) {
748 if (!_MTRR_isRing0())
749 return PM_MTRR_ERR_NO_OS_SUPPORT;
750 return PM_MTRR_NOT_SUPPORTED;
751 }
752 if (type >= PM_MTRR_MAX)
753 return PM_MTRR_ERR_PARAMS;
754
755 /* If the type is WC, check that this processor supports it */
756 if (!MTRR_haveWriteCombine())
757 return PM_MTRR_ERR_NOWRCOMB;
758
759 /* Adjust the boundaries depending on the CPU type */
760 switch (cpuFamily) {
761 case CPU_AMD:
762 if (cpuType < CPU_AMDAthlon) {
763 /* Apply the K6 block alignment and size rules. In order:
764 * o Uncached or gathering only
765 * o 128K or bigger block
766 * o Power of 2 block
767 * o base suitably aligned to the power
768 */
769 if (type > PM_MTRR_WRCOMB && (size < (1 << 17) || (size & ~(size-1))-size || (base & (size-1))))
770 return PM_MTRR_ERR_NOT_ALIGNED;
771 break;
772 }
773 /* Fall through for AMD Athlon which uses P6 style MTRR's */
774 case CPU_Intel:
775 case CPU_Cyrix:
776 if ((base & 0xFFF) || (size & 0xFFF)) {
777 /* Base and size must be multiples of 4Kb */
778 return PM_MTRR_ERR_NOT_4KB_ALIGNED;
779 }
780 if (base < 0x100000) {
781 /* Base must be >= 1Mb */
782 return PM_MTRR_ERR_BELOW_1MB;
783 }
784
785 /* Check upper bits of base and last are equal and lower bits
786 * are 0 for base and 1 for last
787 */
788 last = base + size - 1;
789 for (lbase = base; !(lbase & 1) && (last & 1); lbase = lbase >> 1, last = last >> 1)
790 ;
791 if (lbase != last) {
792 /* Base is not aligned on the correct boundary */
793 return PM_MTRR_ERR_NOT_ALIGNED;
794 }
795 break;
796 default:
797 return PM_MTRR_NOT_SUPPORTED;
798 }
799
800 /* Search for existing MTRR */
801 for (i = 0; i < numMTRR; ++i) {
802 getMTRR(i,&lbase,&lsize,&ltype);
803 if (lbase == 0 && lsize == 0)
804 continue;
805 if (base > lbase + (lsize-1))
806 continue;
807 if ((base < lbase) && (base+size-1 < lbase))
808 continue;
809
810 /* Check that we don't overlap an existing region */
811 if (type != PM_MTRR_UNCACHABLE) {
812 if ((base < lbase) || (base+size-1 > lbase+lsize-1))
813 return PM_MTRR_ERR_OVERLAP;
814 }
815 else if (base == lbase && size == lsize) {
816 /* The region already exists so leave it alone */
817 return PM_MTRR_ERR_OK;
818 }
819
820 /* New region is enclosed by an existing region, so only allow
821 * a new type to be created if we are setting a region to be
822 * uncacheable (such as MMIO registers within a framebuffer).
823 */
824 if (ltype != (int)type) {
825 if (type == PM_MTRR_UNCACHABLE)
826 continue;
827 return PM_MTRR_ERR_TYPE_MISMATCH;
828 }
829 return PM_MTRR_ERR_OK;
830 }
831
832 /* Search for an empty MTRR */
833 if ((i = getFreeRegion(base,size)) < 0)
834 return PM_MTRR_ERR_NONE_FREE;
835 setMTRR(i,base,size,type);
836 return PM_MTRR_ERR_OK;
837 }
838
839 /****************************************************************************
840 PARAMETERS:
841 callback - Function to callback with write combine information
842
843 REMARKS:
844 Function to enumerate all write combine regions currently enabled for the
845 processor.
846 ****************************************************************************/
847 int PMAPI PM_enumWriteCombine(
848 PM_enumWriteCombine_t callback)
849 {
850 int i,ltype;
851 ulong lbase,lsize;
852
853 /* Check that we have a CPU that supports MTRR's and type is valid */
854 if (numMTRR <= 0) {
855 if (!_MTRR_isRing0())
856 return PM_MTRR_ERR_NO_OS_SUPPORT;
857 return PM_MTRR_NOT_SUPPORTED;
858 }
859
860 /* Enumerate all existing MTRR's */
861 for (i = 0; i < numMTRR; ++i) {
862 getMTRR(i,&lbase,&lsize,&ltype);
863 callback(lbase,lsize,ltype);
864 }
865 return PM_MTRR_ERR_OK;
866 }
867 #endif