1 /* Blackfin Memory Management Unit (MMU) model.
3 Copyright (C) 2010-2022 Free Software Foundation, Inc.
4 Contributed by Analog Devices, Inc.
6 This file is part of simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 /* This must come before any other includes. */
25 #include "sim-options.h"
27 #include "dv-bfin_mmu.h"
28 #include "dv-bfin_cec.h"
30 /* XXX: Should this really be two blocks of registers ? PRM describes
31 these as two Content Addressable Memory (CAM) blocks. */
37 /* Order after here is important -- matches hardware MMR layout. */
38 bu32 sram_base_address
;
40 bu32 dmem_control
, dcplb_fault_status
, dcplb_fault_addr
;
41 char _dpad0
[0x100 - 0x0 - (4 * 4)];
43 char _dpad1
[0x200 - 0x100 - (4 * 16)];
45 char _dpad2
[0x300 - 0x200 - (4 * 16)];
47 char _dpad3
[0x400 - 0x300 - (4 * 1)];
50 char _dpad4
[0x1000 - 0x400 - (4 * 2)];
52 bu32 idk
; /* Filler MMR; hardware simply ignores. */
53 bu32 imem_control
, icplb_fault_status
, icplb_fault_addr
;
54 char _ipad0
[0x100 - 0x0 - (4 * 4)];
56 char _ipad1
[0x200 - 0x100 - (4 * 16)];
58 char _ipad2
[0x300 - 0x200 - (4 * 16)];
60 char _ipad3
[0x400 - 0x300 - (4 * 1)];
63 #define mmr_base() offsetof(struct bfin_mmu, sram_base_address)
64 #define mmr_offset(mmr) (offsetof(struct bfin_mmu, mmr) - mmr_base())
65 #define mmr_idx(mmr) (mmr_offset (mmr) / 4)
67 static const char * const mmr_names
[BFIN_COREMMR_MMU_SIZE
/ 4] =
69 "SRAM_BASE_ADDRESS", "DMEM_CONTROL", "DCPLB_FAULT_STATUS", "DCPLB_FAULT_ADDR",
70 [mmr_idx (dcplb_addr
[0])] = "DCPLB_ADDR0",
71 "DCPLB_ADDR1", "DCPLB_ADDR2", "DCPLB_ADDR3", "DCPLB_ADDR4", "DCPLB_ADDR5",
72 "DCPLB_ADDR6", "DCPLB_ADDR7", "DCPLB_ADDR8", "DCPLB_ADDR9", "DCPLB_ADDR10",
73 "DCPLB_ADDR11", "DCPLB_ADDR12", "DCPLB_ADDR13", "DCPLB_ADDR14", "DCPLB_ADDR15",
74 [mmr_idx (dcplb_data
[0])] = "DCPLB_DATA0",
75 "DCPLB_DATA1", "DCPLB_DATA2", "DCPLB_DATA3", "DCPLB_DATA4", "DCPLB_DATA5",
76 "DCPLB_DATA6", "DCPLB_DATA7", "DCPLB_DATA8", "DCPLB_DATA9", "DCPLB_DATA10",
77 "DCPLB_DATA11", "DCPLB_DATA12", "DCPLB_DATA13", "DCPLB_DATA14", "DCPLB_DATA15",
78 [mmr_idx (dtest_command
)] = "DTEST_COMMAND",
79 [mmr_idx (dtest_data
[0])] = "DTEST_DATA0", "DTEST_DATA1",
80 [mmr_idx (imem_control
)] = "IMEM_CONTROL", "ICPLB_FAULT_STATUS", "ICPLB_FAULT_ADDR",
81 [mmr_idx (icplb_addr
[0])] = "ICPLB_ADDR0",
82 "ICPLB_ADDR1", "ICPLB_ADDR2", "ICPLB_ADDR3", "ICPLB_ADDR4", "ICPLB_ADDR5",
83 "ICPLB_ADDR6", "ICPLB_ADDR7", "ICPLB_ADDR8", "ICPLB_ADDR9", "ICPLB_ADDR10",
84 "ICPLB_ADDR11", "ICPLB_ADDR12", "ICPLB_ADDR13", "ICPLB_ADDR14", "ICPLB_ADDR15",
85 [mmr_idx (icplb_data
[0])] = "ICPLB_DATA0",
86 "ICPLB_DATA1", "ICPLB_DATA2", "ICPLB_DATA3", "ICPLB_DATA4", "ICPLB_DATA5",
87 "ICPLB_DATA6", "ICPLB_DATA7", "ICPLB_DATA8", "ICPLB_DATA9", "ICPLB_DATA10",
88 "ICPLB_DATA11", "ICPLB_DATA12", "ICPLB_DATA13", "ICPLB_DATA14", "ICPLB_DATA15",
89 [mmr_idx (itest_command
)] = "ITEST_COMMAND",
90 [mmr_idx (itest_data
[0])] = "ITEST_DATA0", "ITEST_DATA1",
92 #define mmr_name(off) (mmr_names[(off) / 4] ? : "<INV>")
94 static bool bfin_mmu_skip_cplbs
= false;
97 bfin_mmu_io_write_buffer (struct hw
*me
, const void *source
,
98 int space
, address_word addr
, unsigned nr_bytes
)
100 struct bfin_mmu
*mmu
= hw_data (me
);
105 /* Invalid access mode is higher priority than missing register. */
106 if (!dv_bfin_mmr_require_32 (me
, addr
, nr_bytes
, true))
109 value
= dv_load_4 (source
);
111 mmr_off
= addr
- mmu
->base
;
112 valuep
= (void *)((uintptr_t)mmu
+ mmr_base() + mmr_off
);
118 case mmr_offset(dmem_control
):
119 case mmr_offset(imem_control
):
120 /* XXX: IMC/DMC bit should add/remove L1 cache regions ... */
121 case mmr_offset(dtest_data
[0]) ... mmr_offset(dtest_data
[1]):
122 case mmr_offset(itest_data
[0]) ... mmr_offset(itest_data
[1]):
123 case mmr_offset(dcplb_addr
[0]) ... mmr_offset(dcplb_addr
[15]):
124 case mmr_offset(dcplb_data
[0]) ... mmr_offset(dcplb_data
[15]):
125 case mmr_offset(icplb_addr
[0]) ... mmr_offset(icplb_addr
[15]):
126 case mmr_offset(icplb_data
[0]) ... mmr_offset(icplb_data
[15]):
129 case mmr_offset(sram_base_address
):
130 case mmr_offset(dcplb_fault_status
):
131 case mmr_offset(dcplb_fault_addr
):
132 case mmr_offset(idk
):
133 case mmr_offset(icplb_fault_status
):
134 case mmr_offset(icplb_fault_addr
):
135 /* Discard writes to these. */
137 case mmr_offset(itest_command
):
138 /* XXX: Not supported atm. */
140 hw_abort (me
, "ITEST_COMMAND unimplemented");
142 case mmr_offset(dtest_command
):
143 /* Access L1 memory indirectly. */
147 bu32 addr
= mmu
->sram_base_address
|
148 ((value
>> (26 - 11)) & (1 << 11)) | /* addr bit 11 (Way0/Way1) */
149 ((value
>> (24 - 21)) & (1 << 21)) | /* addr bit 21 (Data/Inst) */
150 ((value
>> (23 - 15)) & (1 << 15)) | /* addr bit 15 (Data Bank) */
151 ((value
>> (16 - 12)) & (3 << 12)) | /* addr bits 13:12 (Subbank) */
152 (value
& 0x47F8); /* addr bits 14 & 10:3 */
154 if (!(value
& TEST_DATA_ARRAY
))
155 hw_abort (me
, "DTEST_COMMAND tag array unimplemented");
156 if (value
& 0xfa7cb801)
157 hw_abort (me
, "DTEST_COMMAND bits undefined");
159 if (value
& TEST_WRITE
)
160 sim_write (hw_system (me
), addr
, mmu
->dtest_data
, 8);
162 sim_read (hw_system (me
), addr
, mmu
->dtest_data
, 8);
166 dv_bfin_mmr_invalid (me
, addr
, nr_bytes
, true);
174 bfin_mmu_io_read_buffer (struct hw
*me
, void *dest
,
175 int space
, address_word addr
, unsigned nr_bytes
)
177 struct bfin_mmu
*mmu
= hw_data (me
);
181 /* Invalid access mode is higher priority than missing register. */
182 if (!dv_bfin_mmr_require_32 (me
, addr
, nr_bytes
, false))
185 mmr_off
= addr
- mmu
->base
;
186 valuep
= (void *)((uintptr_t)mmu
+ mmr_base() + mmr_off
);
192 case mmr_offset(dmem_control
):
193 case mmr_offset(imem_control
):
194 case mmr_offset(dtest_command
):
195 case mmr_offset(dtest_data
[0]) ... mmr_offset(dtest_data
[2]):
196 case mmr_offset(itest_command
):
197 case mmr_offset(itest_data
[0]) ... mmr_offset(itest_data
[2]):
198 /* XXX: should do something here. */
199 case mmr_offset(dcplb_addr
[0]) ... mmr_offset(dcplb_addr
[15]):
200 case mmr_offset(dcplb_data
[0]) ... mmr_offset(dcplb_data
[15]):
201 case mmr_offset(icplb_addr
[0]) ... mmr_offset(icplb_addr
[15]):
202 case mmr_offset(icplb_data
[0]) ... mmr_offset(icplb_data
[15]):
203 case mmr_offset(sram_base_address
):
204 case mmr_offset(dcplb_fault_status
):
205 case mmr_offset(dcplb_fault_addr
):
206 case mmr_offset(idk
):
207 case mmr_offset(icplb_fault_status
):
208 case mmr_offset(icplb_fault_addr
):
209 dv_store_4 (dest
, *valuep
);
212 dv_bfin_mmr_invalid (me
, addr
, nr_bytes
, false);
220 attach_bfin_mmu_regs (struct hw
*me
, struct bfin_mmu
*mmu
)
222 address_word attach_address
;
224 unsigned attach_size
;
225 reg_property_spec reg
;
227 if (hw_find_property (me
, "reg") == NULL
)
228 hw_abort (me
, "Missing \"reg\" property");
230 if (!hw_find_reg_array_property (me
, "reg", 0, ®
))
231 hw_abort (me
, "\"reg\" property must contain three addr/size entries");
233 hw_unit_address_to_attach_address (hw_parent (me
),
235 &attach_space
, &attach_address
, me
);
236 hw_unit_size_to_attach_size (hw_parent (me
), ®
.size
, &attach_size
, me
);
238 if (attach_size
!= BFIN_COREMMR_MMU_SIZE
)
239 hw_abort (me
, "\"reg\" size must be %#x", BFIN_COREMMR_MMU_SIZE
);
241 hw_attach_address (hw_parent (me
),
242 0, attach_space
, attach_address
, attach_size
, me
);
244 mmu
->base
= attach_address
;
248 bfin_mmu_finish (struct hw
*me
)
250 struct bfin_mmu
*mmu
;
252 mmu
= HW_ZALLOC (me
, struct bfin_mmu
);
254 set_hw_data (me
, mmu
);
255 set_hw_io_read_buffer (me
, bfin_mmu_io_read_buffer
);
256 set_hw_io_write_buffer (me
, bfin_mmu_io_write_buffer
);
258 attach_bfin_mmu_regs (me
, mmu
);
260 /* Initialize the MMU. */
261 mmu
->sram_base_address
= 0xff800000 - 0;
262 /*(4 * 1024 * 1024 * CPU_INDEX (hw_system_cpu (me)));*/
263 mmu
->dmem_control
= 0x00000001;
264 mmu
->imem_control
= 0x00000001;
267 const struct hw_descriptor dv_bfin_mmu_descriptor
[] =
269 {"bfin_mmu", bfin_mmu_finish
,},
273 /* Device option parsing. */
275 static DECLARE_OPTION_HANDLER (bfin_mmu_option_handler
);
278 OPTION_MMU_SKIP_TABLES
= OPTION_START
,
281 static const OPTION bfin_mmu_options
[] =
283 { {"mmu-skip-cplbs", no_argument
, NULL
, OPTION_MMU_SKIP_TABLES
},
284 '\0', NULL
, "Skip parsing of CPLB tables (big speed increase)",
285 bfin_mmu_option_handler
, NULL
},
287 { {NULL
, no_argument
, NULL
, 0}, '\0', NULL
, NULL
, NULL
, NULL
}
291 bfin_mmu_option_handler (SIM_DESC sd
, sim_cpu
*current_cpu
, int opt
,
292 char *arg
, int is_command
)
296 case OPTION_MMU_SKIP_TABLES
:
297 bfin_mmu_skip_cplbs
= true;
301 sim_io_eprintf (sd
, "Unknown Blackfin MMU option %d\n", opt
);
306 /* Provide a prototype to silence -Wmissing-prototypes. */
307 extern MODULE_INIT_FN sim_install_bfin_mmu
;
310 sim_install_bfin_mmu (SIM_DESC sd
)
312 SIM_ASSERT (STATE_MAGIC (sd
) == SIM_MAGIC_NUMBER
);
313 return sim_add_option_table (sd
, NULL
, bfin_mmu_options
);
316 #define MMU_STATE(cpu) DV_STATE_CACHED (cpu, mmu)
319 _mmu_log_ifault (SIM_CPU
*cpu
, struct bfin_mmu
*mmu
, bu32 pc
, bool supv
)
321 mmu
->icplb_fault_addr
= pc
;
322 mmu
->icplb_fault_status
= supv
<< 17;
326 mmu_log_ifault (SIM_CPU
*cpu
)
328 _mmu_log_ifault (cpu
, MMU_STATE (cpu
), PCREG
, cec_get_ivg (cpu
) >= 0);
332 _mmu_log_fault (SIM_CPU
*cpu
, struct bfin_mmu
*mmu
, bu32 addr
, bool write
,
333 bool inst
, bool miss
, bool supv
, bool dag1
, bu32 faults
)
335 bu32
*fault_status
, *fault_addr
;
337 /* No logging in non-OS mode. */
341 fault_status
= inst
? &mmu
->icplb_fault_status
: &mmu
->dcplb_fault_status
;
342 fault_addr
= inst
? &mmu
->icplb_fault_addr
: &mmu
->dcplb_fault_addr
;
343 /* ICPLB regs always get updated. */
345 _mmu_log_ifault (cpu
, mmu
, PCREG
, supv
);
357 _mmu_process_fault (SIM_CPU
*cpu
, struct bfin_mmu
*mmu
, bu32 addr
, bool write
,
358 bool inst
, bool unaligned
, bool miss
, bool supv
, bool dag1
)
362 /* See order in mmu_check_addr() */
364 excp
= inst
? VEC_MISALI_I
: VEC_MISALI_D
;
365 else if (addr
>= BFIN_SYSTEM_MMR_BASE
)
368 excp
= inst
? VEC_CPLB_I_M
: VEC_CPLB_M
;
371 /* Misses are hardware errors. */
372 cec_hwerr (cpu
, HWERR_EXTERN_ADDR
);
376 _mmu_log_fault (cpu
, mmu
, addr
, write
, inst
, miss
, supv
, dag1
, 0);
377 cec_exception (cpu
, excp
);
381 mmu_process_fault (SIM_CPU
*cpu
, bu32 addr
, bool write
, bool inst
,
382 bool unaligned
, bool miss
)
384 SIM_DESC sd
= CPU_STATE (cpu
);
385 struct bfin_mmu
*mmu
;
387 if (STATE_ENVIRONMENT (sd
) != OPERATING_ENVIRONMENT
)
390 mmu
= MMU_STATE (cpu
);
392 _mmu_process_fault (cpu
, mmu
, addr
, write
, inst
, unaligned
, miss
,
393 cec_is_supervisor_mode (cpu
),
394 BFIN_CPU_STATE
.multi_pc
== PCREG
+ 6);
398 -2: no known problems
401 1: protection violation
406 mmu_check_implicit_addr (SIM_CPU
*cpu
, bu32 addr
, bool inst
, int size
,
407 bool supv
, bool dag1
)
409 bool l1
= ((addr
& 0xFF000000) == 0xFF000000);
410 bu32 amask
= (addr
& 0xFFF00000);
412 if (addr
& (size
- 1))
415 /* MMRs may never be executable or accessed from usermode. */
416 if (addr
>= BFIN_SYSTEM_MMR_BASE
)
420 else if (!supv
|| dag1
)
427 /* Some regions are not executable. */
428 /* XXX: Should this be in the model data ? Core B 561 ? */
430 return (amask
== 0xFFA00000) ? -1 : 1;
434 /* Some regions are not readable. */
435 /* XXX: Should this be in the model data ? Core B 561 ? */
437 return (amask
!= 0xFFA00000) ? -1 : 4;
443 /* Exception order per the PRM (first has highest):
444 Inst Multiple CPLB Hits
445 Inst Misaligned Access
446 Inst Protection Violation
448 Only the alignment matters in non-OS mode though. */
450 _mmu_check_addr (SIM_CPU
*cpu
, bu32 addr
, bool write
, bool inst
, int size
)
452 SIM_DESC sd
= CPU_STATE (cpu
);
453 struct bfin_mmu
*mmu
;
454 bu32
*fault_status
, *fault_addr
, *mem_control
, *cplb_addr
, *cplb_data
;
456 bool supv
, do_excp
, dag1
;
459 supv
= cec_is_supervisor_mode (cpu
);
460 dag1
= (BFIN_CPU_STATE
.multi_pc
== PCREG
+ 6);
462 if (STATE_ENVIRONMENT (sd
) != OPERATING_ENVIRONMENT
|| bfin_mmu_skip_cplbs
)
464 int ret
= mmu_check_implicit_addr (cpu
, addr
, inst
, size
, supv
, dag1
);
465 /* Valid hits and misses are OK in non-OS envs. */
468 _mmu_process_fault (cpu
, NULL
, addr
, write
, inst
, (ret
== 3), false, supv
, dag1
);
471 mmu
= MMU_STATE (cpu
);
472 fault_status
= inst
? &mmu
->icplb_fault_status
: &mmu
->dcplb_fault_status
;
473 fault_addr
= inst
? &mmu
->icplb_fault_addr
: &mmu
->dcplb_fault_addr
;
474 mem_control
= inst
? &mmu
->imem_control
: &mmu
->dmem_control
;
475 cplb_addr
= inst
? &mmu
->icplb_addr
[0] : &mmu
->dcplb_addr
[0];
476 cplb_data
= inst
? &mmu
->icplb_data
[0] : &mmu
->dcplb_data
[0];
482 /* CPLBs disabled -> little to do. */
483 if (!(*mem_control
& ENCPLB
))
489 /* Check all the CPLBs first. */
490 for (i
= 0; i
< 16; ++i
)
492 const bu32 pages
[4] = { 0x400, 0x1000, 0x100000, 0x400000 };
493 bu32 addr_lo
, addr_hi
;
495 /* Skip invalid entries. */
496 if (!(cplb_data
[i
] & CPLB_VALID
))
499 /* See if this entry covers this address. */
500 addr_lo
= cplb_addr
[i
];
501 addr_hi
= cplb_addr
[i
] + pages
[(cplb_data
[i
] & PAGE_SIZE
) >> 16];
502 if (addr
< addr_lo
|| addr
>= addr_hi
)
509 if (!supv
&& !(cplb_data
[i
] & CPLB_USER_WR
))
511 if (supv
&& !(cplb_data
[i
] & CPLB_SUPV_WR
))
513 if ((cplb_data
[i
] & (CPLB_WT
| CPLB_L1_CHBL
| CPLB_DIRTY
)) == CPLB_L1_CHBL
)
518 if (!supv
&& !(cplb_data
[i
] & CPLB_USER_RD
))
523 /* Handle default/implicit CPLBs. */
524 if (!do_excp
&& hits
< 2)
528 ihits
= mmu_check_implicit_addr (cpu
, addr
, inst
, size
, supv
, dag1
);
531 /* No faults and one match -> good to go. */
538 cec_hwerr (cpu
, HWERR_EXTERN_ADDR
);
545 /* Normalize hit count so hits==2 is always multiple hit exception. */
546 hits
= min (2, hits
);
548 _mmu_log_fault (cpu
, mmu
, addr
, write
, inst
, hits
== 0, supv
, dag1
, faults
);
552 int iexcps
[] = { VEC_CPLB_I_M
, VEC_CPLB_I_VL
, VEC_CPLB_I_MHIT
, VEC_MISALI_I
};
557 int dexcps
[] = { VEC_CPLB_M
, VEC_CPLB_VL
, VEC_CPLB_MHIT
, VEC_MISALI_D
};
563 mmu_check_addr (SIM_CPU
*cpu
, bu32 addr
, bool write
, bool inst
, int size
)
565 int excp
= _mmu_check_addr (cpu
, addr
, write
, inst
, size
);
567 cec_exception (cpu
, excp
);
571 mmu_check_cache_addr (SIM_CPU
*cpu
, bu32 addr
, bool write
, bool inst
)
576 cacheaddr
= addr
& ~(BFIN_L1_CACHE_BYTES
- 1);
577 excp
= _mmu_check_addr (cpu
, cacheaddr
, write
, inst
, BFIN_L1_CACHE_BYTES
);
581 /* Most exceptions are ignored with cache funcs. */
582 /* XXX: Not sure if we should be ignoring CPLB misses. */
585 if (excp
== VEC_CPLB_I_VL
)
590 if (excp
== VEC_CPLB_VL
)
593 cec_exception (cpu
, excp
);