]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/bfin/dv-bfin_mmu.c
Update copyright year range in all GDB files
[thirdparty/binutils-gdb.git] / sim / bfin / dv-bfin_mmu.c
1 /* Blackfin Memory Management Unit (MMU) model.
2
3 Copyright (C) 2010-2021 Free Software Foundation, Inc.
4 Contributed by Analog Devices, Inc.
5
6 This file is part of simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22
23 #include "sim-main.h"
24 #include "sim-options.h"
25 #include "devices.h"
26 #include "dv-bfin_mmu.h"
27 #include "dv-bfin_cec.h"
28
29 /* XXX: Should this really be two blocks of registers ? PRM describes
30 these as two Content Addressable Memory (CAM) blocks. */
31
32 struct bfin_mmu
33 {
34 bu32 base;
35
36 /* Order after here is important -- matches hardware MMR layout. */
37 bu32 sram_base_address;
38
39 bu32 dmem_control, dcplb_fault_status, dcplb_fault_addr;
40 char _dpad0[0x100 - 0x0 - (4 * 4)];
41 bu32 dcplb_addr[16];
42 char _dpad1[0x200 - 0x100 - (4 * 16)];
43 bu32 dcplb_data[16];
44 char _dpad2[0x300 - 0x200 - (4 * 16)];
45 bu32 dtest_command;
46 char _dpad3[0x400 - 0x300 - (4 * 1)];
47 bu32 dtest_data[2];
48
49 char _dpad4[0x1000 - 0x400 - (4 * 2)];
50
51 bu32 idk; /* Filler MMR; hardware simply ignores. */
52 bu32 imem_control, icplb_fault_status, icplb_fault_addr;
53 char _ipad0[0x100 - 0x0 - (4 * 4)];
54 bu32 icplb_addr[16];
55 char _ipad1[0x200 - 0x100 - (4 * 16)];
56 bu32 icplb_data[16];
57 char _ipad2[0x300 - 0x200 - (4 * 16)];
58 bu32 itest_command;
59 char _ipad3[0x400 - 0x300 - (4 * 1)];
60 bu32 itest_data[2];
61 };
62 #define mmr_base() offsetof(struct bfin_mmu, sram_base_address)
63 #define mmr_offset(mmr) (offsetof(struct bfin_mmu, mmr) - mmr_base())
64 #define mmr_idx(mmr) (mmr_offset (mmr) / 4)
65
66 static const char * const mmr_names[BFIN_COREMMR_MMU_SIZE / 4] =
67 {
68 "SRAM_BASE_ADDRESS", "DMEM_CONTROL", "DCPLB_FAULT_STATUS", "DCPLB_FAULT_ADDR",
69 [mmr_idx (dcplb_addr[0])] = "DCPLB_ADDR0",
70 "DCPLB_ADDR1", "DCPLB_ADDR2", "DCPLB_ADDR3", "DCPLB_ADDR4", "DCPLB_ADDR5",
71 "DCPLB_ADDR6", "DCPLB_ADDR7", "DCPLB_ADDR8", "DCPLB_ADDR9", "DCPLB_ADDR10",
72 "DCPLB_ADDR11", "DCPLB_ADDR12", "DCPLB_ADDR13", "DCPLB_ADDR14", "DCPLB_ADDR15",
73 [mmr_idx (dcplb_data[0])] = "DCPLB_DATA0",
74 "DCPLB_DATA1", "DCPLB_DATA2", "DCPLB_DATA3", "DCPLB_DATA4", "DCPLB_DATA5",
75 "DCPLB_DATA6", "DCPLB_DATA7", "DCPLB_DATA8", "DCPLB_DATA9", "DCPLB_DATA10",
76 "DCPLB_DATA11", "DCPLB_DATA12", "DCPLB_DATA13", "DCPLB_DATA14", "DCPLB_DATA15",
77 [mmr_idx (dtest_command)] = "DTEST_COMMAND",
78 [mmr_idx (dtest_data[0])] = "DTEST_DATA0", "DTEST_DATA1",
79 [mmr_idx (imem_control)] = "IMEM_CONTROL", "ICPLB_FAULT_STATUS", "ICPLB_FAULT_ADDR",
80 [mmr_idx (icplb_addr[0])] = "ICPLB_ADDR0",
81 "ICPLB_ADDR1", "ICPLB_ADDR2", "ICPLB_ADDR3", "ICPLB_ADDR4", "ICPLB_ADDR5",
82 "ICPLB_ADDR6", "ICPLB_ADDR7", "ICPLB_ADDR8", "ICPLB_ADDR9", "ICPLB_ADDR10",
83 "ICPLB_ADDR11", "ICPLB_ADDR12", "ICPLB_ADDR13", "ICPLB_ADDR14", "ICPLB_ADDR15",
84 [mmr_idx (icplb_data[0])] = "ICPLB_DATA0",
85 "ICPLB_DATA1", "ICPLB_DATA2", "ICPLB_DATA3", "ICPLB_DATA4", "ICPLB_DATA5",
86 "ICPLB_DATA6", "ICPLB_DATA7", "ICPLB_DATA8", "ICPLB_DATA9", "ICPLB_DATA10",
87 "ICPLB_DATA11", "ICPLB_DATA12", "ICPLB_DATA13", "ICPLB_DATA14", "ICPLB_DATA15",
88 [mmr_idx (itest_command)] = "ITEST_COMMAND",
89 [mmr_idx (itest_data[0])] = "ITEST_DATA0", "ITEST_DATA1",
90 };
91 #define mmr_name(off) (mmr_names[(off) / 4] ? : "<INV>")
92
93 static bool bfin_mmu_skip_cplbs = false;
94
95 static unsigned
96 bfin_mmu_io_write_buffer (struct hw *me, const void *source,
97 int space, address_word addr, unsigned nr_bytes)
98 {
99 struct bfin_mmu *mmu = hw_data (me);
100 bu32 mmr_off;
101 bu32 value;
102 bu32 *valuep;
103
104 /* Invalid access mode is higher priority than missing register. */
105 if (!dv_bfin_mmr_require_32 (me, addr, nr_bytes, true))
106 return 0;
107
108 value = dv_load_4 (source);
109
110 mmr_off = addr - mmu->base;
111 valuep = (void *)((unsigned long)mmu + mmr_base() + mmr_off);
112
113 HW_TRACE_WRITE ();
114
115 switch (mmr_off)
116 {
117 case mmr_offset(dmem_control):
118 case mmr_offset(imem_control):
119 /* XXX: IMC/DMC bit should add/remove L1 cache regions ... */
120 case mmr_offset(dtest_data[0]) ... mmr_offset(dtest_data[1]):
121 case mmr_offset(itest_data[0]) ... mmr_offset(itest_data[1]):
122 case mmr_offset(dcplb_addr[0]) ... mmr_offset(dcplb_addr[15]):
123 case mmr_offset(dcplb_data[0]) ... mmr_offset(dcplb_data[15]):
124 case mmr_offset(icplb_addr[0]) ... mmr_offset(icplb_addr[15]):
125 case mmr_offset(icplb_data[0]) ... mmr_offset(icplb_data[15]):
126 *valuep = value;
127 break;
128 case mmr_offset(sram_base_address):
129 case mmr_offset(dcplb_fault_status):
130 case mmr_offset(dcplb_fault_addr):
131 case mmr_offset(idk):
132 case mmr_offset(icplb_fault_status):
133 case mmr_offset(icplb_fault_addr):
134 /* Discard writes to these. */
135 break;
136 case mmr_offset(itest_command):
137 /* XXX: Not supported atm. */
138 if (value)
139 hw_abort (me, "ITEST_COMMAND unimplemented");
140 break;
141 case mmr_offset(dtest_command):
142 /* Access L1 memory indirectly. */
143 *valuep = value;
144 if (value)
145 {
146 bu32 addr = mmu->sram_base_address |
147 ((value >> (26 - 11)) & (1 << 11)) | /* addr bit 11 (Way0/Way1) */
148 ((value >> (24 - 21)) & (1 << 21)) | /* addr bit 21 (Data/Inst) */
149 ((value >> (23 - 15)) & (1 << 15)) | /* addr bit 15 (Data Bank) */
150 ((value >> (16 - 12)) & (3 << 12)) | /* addr bits 13:12 (Subbank) */
151 (value & 0x47F8); /* addr bits 14 & 10:3 */
152
153 if (!(value & TEST_DATA_ARRAY))
154 hw_abort (me, "DTEST_COMMAND tag array unimplemented");
155 if (value & 0xfa7cb801)
156 hw_abort (me, "DTEST_COMMAND bits undefined");
157
158 if (value & TEST_WRITE)
159 sim_write (hw_system (me), addr, (void *)mmu->dtest_data, 8);
160 else
161 sim_read (hw_system (me), addr, (void *)mmu->dtest_data, 8);
162 }
163 break;
164 default:
165 dv_bfin_mmr_invalid (me, addr, nr_bytes, true);
166 return 0;
167 }
168
169 return nr_bytes;
170 }
171
172 static unsigned
173 bfin_mmu_io_read_buffer (struct hw *me, void *dest,
174 int space, address_word addr, unsigned nr_bytes)
175 {
176 struct bfin_mmu *mmu = hw_data (me);
177 bu32 mmr_off;
178 bu32 *valuep;
179
180 /* Invalid access mode is higher priority than missing register. */
181 if (!dv_bfin_mmr_require_32 (me, addr, nr_bytes, false))
182 return 0;
183
184 mmr_off = addr - mmu->base;
185 valuep = (void *)((unsigned long)mmu + mmr_base() + mmr_off);
186
187 HW_TRACE_READ ();
188
189 switch (mmr_off)
190 {
191 case mmr_offset(dmem_control):
192 case mmr_offset(imem_control):
193 case mmr_offset(dtest_command):
194 case mmr_offset(dtest_data[0]) ... mmr_offset(dtest_data[2]):
195 case mmr_offset(itest_command):
196 case mmr_offset(itest_data[0]) ... mmr_offset(itest_data[2]):
197 /* XXX: should do something here. */
198 case mmr_offset(dcplb_addr[0]) ... mmr_offset(dcplb_addr[15]):
199 case mmr_offset(dcplb_data[0]) ... mmr_offset(dcplb_data[15]):
200 case mmr_offset(icplb_addr[0]) ... mmr_offset(icplb_addr[15]):
201 case mmr_offset(icplb_data[0]) ... mmr_offset(icplb_data[15]):
202 case mmr_offset(sram_base_address):
203 case mmr_offset(dcplb_fault_status):
204 case mmr_offset(dcplb_fault_addr):
205 case mmr_offset(idk):
206 case mmr_offset(icplb_fault_status):
207 case mmr_offset(icplb_fault_addr):
208 dv_store_4 (dest, *valuep);
209 break;
210 default:
211 dv_bfin_mmr_invalid (me, addr, nr_bytes, false);
212 return 0;
213 }
214
215 return nr_bytes;
216 }
217
218 static void
219 attach_bfin_mmu_regs (struct hw *me, struct bfin_mmu *mmu)
220 {
221 address_word attach_address;
222 int attach_space;
223 unsigned attach_size;
224 reg_property_spec reg;
225
226 if (hw_find_property (me, "reg") == NULL)
227 hw_abort (me, "Missing \"reg\" property");
228
229 if (!hw_find_reg_array_property (me, "reg", 0, &reg))
230 hw_abort (me, "\"reg\" property must contain three addr/size entries");
231
232 hw_unit_address_to_attach_address (hw_parent (me),
233 &reg.address,
234 &attach_space, &attach_address, me);
235 hw_unit_size_to_attach_size (hw_parent (me), &reg.size, &attach_size, me);
236
237 if (attach_size != BFIN_COREMMR_MMU_SIZE)
238 hw_abort (me, "\"reg\" size must be %#x", BFIN_COREMMR_MMU_SIZE);
239
240 hw_attach_address (hw_parent (me),
241 0, attach_space, attach_address, attach_size, me);
242
243 mmu->base = attach_address;
244 }
245
246 static void
247 bfin_mmu_finish (struct hw *me)
248 {
249 struct bfin_mmu *mmu;
250
251 mmu = HW_ZALLOC (me, struct bfin_mmu);
252
253 set_hw_data (me, mmu);
254 set_hw_io_read_buffer (me, bfin_mmu_io_read_buffer);
255 set_hw_io_write_buffer (me, bfin_mmu_io_write_buffer);
256
257 attach_bfin_mmu_regs (me, mmu);
258
259 /* Initialize the MMU. */
260 mmu->sram_base_address = 0xff800000 - 0;
261 /*(4 * 1024 * 1024 * CPU_INDEX (hw_system_cpu (me)));*/
262 mmu->dmem_control = 0x00000001;
263 mmu->imem_control = 0x00000001;
264 }
265
266 const struct hw_descriptor dv_bfin_mmu_descriptor[] =
267 {
268 {"bfin_mmu", bfin_mmu_finish,},
269 {NULL, NULL},
270 };
271 \f
272 /* Device option parsing. */
273
274 static DECLARE_OPTION_HANDLER (bfin_mmu_option_handler);
275
276 enum {
277 OPTION_MMU_SKIP_TABLES = OPTION_START,
278 };
279
280 const OPTION bfin_mmu_options[] =
281 {
282 { {"mmu-skip-cplbs", no_argument, NULL, OPTION_MMU_SKIP_TABLES },
283 '\0', NULL, "Skip parsing of CPLB tables (big speed increase)",
284 bfin_mmu_option_handler, NULL },
285
286 { {NULL, no_argument, NULL, 0}, '\0', NULL, NULL, NULL, NULL }
287 };
288
289 static SIM_RC
290 bfin_mmu_option_handler (SIM_DESC sd, sim_cpu *current_cpu, int opt,
291 char *arg, int is_command)
292 {
293 switch (opt)
294 {
295 case OPTION_MMU_SKIP_TABLES:
296 bfin_mmu_skip_cplbs = true;
297 return SIM_RC_OK;
298
299 default:
300 sim_io_eprintf (sd, "Unknown Blackfin MMU option %d\n", opt);
301 return SIM_RC_FAIL;
302 }
303 }
304 \f
305 #define MMU_STATE(cpu) DV_STATE_CACHED (cpu, mmu)
306
307 static void
308 _mmu_log_ifault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 pc, bool supv)
309 {
310 mmu->icplb_fault_addr = pc;
311 mmu->icplb_fault_status = supv << 17;
312 }
313
314 void
315 mmu_log_ifault (SIM_CPU *cpu)
316 {
317 _mmu_log_ifault (cpu, MMU_STATE (cpu), PCREG, cec_get_ivg (cpu) >= 0);
318 }
319
320 static void
321 _mmu_log_fault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 addr, bool write,
322 bool inst, bool miss, bool supv, bool dag1, bu32 faults)
323 {
324 bu32 *fault_status, *fault_addr;
325
326 /* No logging in non-OS mode. */
327 if (!mmu)
328 return;
329
330 fault_status = inst ? &mmu->icplb_fault_status : &mmu->dcplb_fault_status;
331 fault_addr = inst ? &mmu->icplb_fault_addr : &mmu->dcplb_fault_addr;
332 /* ICPLB regs always get updated. */
333 if (!inst)
334 _mmu_log_ifault (cpu, mmu, PCREG, supv);
335
336 *fault_addr = addr;
337 *fault_status =
338 (miss << 19) |
339 (dag1 << 18) |
340 (supv << 17) |
341 (write << 16) |
342 faults;
343 }
344
345 static void
346 _mmu_process_fault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 addr, bool write,
347 bool inst, bool unaligned, bool miss, bool supv, bool dag1)
348 {
349 int excp;
350
351 /* See order in mmu_check_addr() */
352 if (unaligned)
353 excp = inst ? VEC_MISALI_I : VEC_MISALI_D;
354 else if (addr >= BFIN_SYSTEM_MMR_BASE)
355 excp = VEC_ILL_RES;
356 else if (!mmu)
357 excp = inst ? VEC_CPLB_I_M : VEC_CPLB_M;
358 else
359 {
360 /* Misses are hardware errors. */
361 cec_hwerr (cpu, HWERR_EXTERN_ADDR);
362 return;
363 }
364
365 _mmu_log_fault (cpu, mmu, addr, write, inst, miss, supv, dag1, 0);
366 cec_exception (cpu, excp);
367 }
368
369 void
370 mmu_process_fault (SIM_CPU *cpu, bu32 addr, bool write, bool inst,
371 bool unaligned, bool miss)
372 {
373 SIM_DESC sd = CPU_STATE (cpu);
374 struct bfin_mmu *mmu;
375
376 if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT)
377 mmu = NULL;
378 else
379 mmu = MMU_STATE (cpu);
380
381 _mmu_process_fault (cpu, mmu, addr, write, inst, unaligned, miss,
382 cec_is_supervisor_mode (cpu),
383 BFIN_CPU_STATE.multi_pc == PCREG + 6);
384 }
385
386 /* Return values:
387 -2: no known problems
388 -1: valid
389 0: miss
390 1: protection violation
391 2: multiple hits
392 3: unaligned
393 4: miss; hwerr */
394 static int
395 mmu_check_implicit_addr (SIM_CPU *cpu, bu32 addr, bool inst, int size,
396 bool supv, bool dag1)
397 {
398 bool l1 = ((addr & 0xFF000000) == 0xFF000000);
399 bu32 amask = (addr & 0xFFF00000);
400
401 if (addr & (size - 1))
402 return 3;
403
404 /* MMRs may never be executable or accessed from usermode. */
405 if (addr >= BFIN_SYSTEM_MMR_BASE)
406 {
407 if (inst)
408 return 0;
409 else if (!supv || dag1)
410 return 1;
411 else
412 return -1;
413 }
414 else if (inst)
415 {
416 /* Some regions are not executable. */
417 /* XXX: Should this be in the model data ? Core B 561 ? */
418 if (l1)
419 return (amask == 0xFFA00000) ? -1 : 1;
420 }
421 else
422 {
423 /* Some regions are not readable. */
424 /* XXX: Should this be in the model data ? Core B 561 ? */
425 if (l1)
426 return (amask != 0xFFA00000) ? -1 : 4;
427 }
428
429 return -2;
430 }
431
432 /* Exception order per the PRM (first has highest):
433 Inst Multiple CPLB Hits
434 Inst Misaligned Access
435 Inst Protection Violation
436 Inst CPLB Miss
437 Only the alignment matters in non-OS mode though. */
438 static int
439 _mmu_check_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst, int size)
440 {
441 SIM_DESC sd = CPU_STATE (cpu);
442 struct bfin_mmu *mmu;
443 bu32 *fault_status, *fault_addr, *mem_control, *cplb_addr, *cplb_data;
444 bu32 faults;
445 bool supv, do_excp, dag1;
446 int i, hits;
447
448 supv = cec_is_supervisor_mode (cpu);
449 dag1 = (BFIN_CPU_STATE.multi_pc == PCREG + 6);
450
451 if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT || bfin_mmu_skip_cplbs)
452 {
453 int ret = mmu_check_implicit_addr (cpu, addr, inst, size, supv, dag1);
454 /* Valid hits and misses are OK in non-OS envs. */
455 if (ret < 0)
456 return 0;
457 _mmu_process_fault (cpu, NULL, addr, write, inst, (ret == 3), false, supv, dag1);
458 }
459
460 mmu = MMU_STATE (cpu);
461 fault_status = inst ? &mmu->icplb_fault_status : &mmu->dcplb_fault_status;
462 fault_addr = inst ? &mmu->icplb_fault_addr : &mmu->dcplb_fault_addr;
463 mem_control = inst ? &mmu->imem_control : &mmu->dmem_control;
464 cplb_addr = inst ? &mmu->icplb_addr[0] : &mmu->dcplb_addr[0];
465 cplb_data = inst ? &mmu->icplb_data[0] : &mmu->dcplb_data[0];
466
467 faults = 0;
468 hits = 0;
469 do_excp = false;
470
471 /* CPLBs disabled -> little to do. */
472 if (!(*mem_control & ENCPLB))
473 {
474 hits = 1;
475 goto implicit_check;
476 }
477
478 /* Check all the CPLBs first. */
479 for (i = 0; i < 16; ++i)
480 {
481 const bu32 pages[4] = { 0x400, 0x1000, 0x100000, 0x400000 };
482 bu32 addr_lo, addr_hi;
483
484 /* Skip invalid entries. */
485 if (!(cplb_data[i] & CPLB_VALID))
486 continue;
487
488 /* See if this entry covers this address. */
489 addr_lo = cplb_addr[i];
490 addr_hi = cplb_addr[i] + pages[(cplb_data[i] & PAGE_SIZE) >> 16];
491 if (addr < addr_lo || addr >= addr_hi)
492 continue;
493
494 ++hits;
495 faults |= (1 << i);
496 if (write)
497 {
498 if (!supv && !(cplb_data[i] & CPLB_USER_WR))
499 do_excp = true;
500 if (supv && !(cplb_data[i] & CPLB_SUPV_WR))
501 do_excp = true;
502 if ((cplb_data[i] & (CPLB_WT | CPLB_L1_CHBL | CPLB_DIRTY)) == CPLB_L1_CHBL)
503 do_excp = true;
504 }
505 else
506 {
507 if (!supv && !(cplb_data[i] & CPLB_USER_RD))
508 do_excp = true;
509 }
510 }
511
512 /* Handle default/implicit CPLBs. */
513 if (!do_excp && hits < 2)
514 {
515 int ihits;
516 implicit_check:
517 ihits = mmu_check_implicit_addr (cpu, addr, inst, size, supv, dag1);
518 switch (ihits)
519 {
520 /* No faults and one match -> good to go. */
521 case -1: return 0;
522 case -2:
523 if (hits == 1)
524 return 0;
525 break;
526 case 4:
527 cec_hwerr (cpu, HWERR_EXTERN_ADDR);
528 return 0;
529 default:
530 hits = ihits;
531 }
532 }
533 else
534 /* Normalize hit count so hits==2 is always multiple hit exception. */
535 hits = min (2, hits);
536
537 _mmu_log_fault (cpu, mmu, addr, write, inst, hits == 0, supv, dag1, faults);
538
539 if (inst)
540 {
541 int iexcps[] = { VEC_CPLB_I_M, VEC_CPLB_I_VL, VEC_CPLB_I_MHIT, VEC_MISALI_I };
542 return iexcps[hits];
543 }
544 else
545 {
546 int dexcps[] = { VEC_CPLB_M, VEC_CPLB_VL, VEC_CPLB_MHIT, VEC_MISALI_D };
547 return dexcps[hits];
548 }
549 }
550
551 void
552 mmu_check_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst, int size)
553 {
554 int excp = _mmu_check_addr (cpu, addr, write, inst, size);
555 if (excp)
556 cec_exception (cpu, excp);
557 }
558
559 void
560 mmu_check_cache_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst)
561 {
562 bu32 cacheaddr;
563 int excp;
564
565 cacheaddr = addr & ~(BFIN_L1_CACHE_BYTES - 1);
566 excp = _mmu_check_addr (cpu, cacheaddr, write, inst, BFIN_L1_CACHE_BYTES);
567 if (excp == 0)
568 return;
569
570 /* Most exceptions are ignored with cache funcs. */
571 /* XXX: Not sure if we should be ignoring CPLB misses. */
572 if (inst)
573 {
574 if (excp == VEC_CPLB_I_VL)
575 return;
576 }
577 else
578 {
579 if (excp == VEC_CPLB_VL)
580 return;
581 }
582 cec_exception (cpu, excp);
583 }