]> git.ipfire.org Git - thirdparty/u-boot.git/blame - arch/arm/include/asm/armv7.h
SPDX: Convert all of our single license tags to Linux Kernel style
[thirdparty/u-boot.git] / arch / arm / include / asm / armv7.h
CommitLineData
83d290c5 1/* SPDX-License-Identifier: GPL-2.0+ */
2c451f78
A
2/*
3 * (C) Copyright 2010
4 * Texas Instruments, <www.ti.com>
5 * Aneesh V <aneesh@ti.com>
2c451f78
A
6 */
7#ifndef ARMV7_H
8#define ARMV7_H
2c451f78 9
ad577c8a
A
10/* Cortex-A9 revisions */
11#define MIDR_CORTEX_A9_R0P1 0x410FC091
12#define MIDR_CORTEX_A9_R1P2 0x411FC092
13#define MIDR_CORTEX_A9_R1P3 0x411FC093
5ab12a9e 14#define MIDR_CORTEX_A9_R2P10 0x412FC09A
ad577c8a 15
508a58fa
S
16/* Cortex-A15 revisions */
17#define MIDR_CORTEX_A15_R0P0 0x410FC0F0
eed7c0f7 18#define MIDR_CORTEX_A15_R2P2 0x412FC0F2
508a58fa 19
16212b59
AP
20/* Cortex-A7 revisions */
21#define MIDR_CORTEX_A7_R0P0 0x410FC070
22
23#define MIDR_PRIMARY_PART_MASK 0xFF0FFFF0
24
25/* ID_PFR1 feature fields */
26#define CPUID_ARM_SEC_SHIFT 4
27#define CPUID_ARM_SEC_MASK (0xF << CPUID_ARM_SEC_SHIFT)
28#define CPUID_ARM_VIRT_SHIFT 12
29#define CPUID_ARM_VIRT_MASK (0xF << CPUID_ARM_VIRT_SHIFT)
30#define CPUID_ARM_GENTIMER_SHIFT 16
31#define CPUID_ARM_GENTIMER_MASK (0xF << CPUID_ARM_GENTIMER_SHIFT)
32
33/* valid bits in CBAR register / PERIPHBASE value */
34#define CBAR_MASK 0xFFFF8000
35
2c451f78
A
36/* CCSIDR */
37#define CCSIDR_LINE_SIZE_OFFSET 0
38#define CCSIDR_LINE_SIZE_MASK 0x7
39#define CCSIDR_ASSOCIATIVITY_OFFSET 3
40#define CCSIDR_ASSOCIATIVITY_MASK (0x3FF << 3)
41#define CCSIDR_NUM_SETS_OFFSET 13
42#define CCSIDR_NUM_SETS_MASK (0x7FFF << 13)
43
44/*
45 * Values for InD field in CSSELR
46 * Selects the type of cache
47 */
48#define ARMV7_CSSELR_IND_DATA_UNIFIED 0
49#define ARMV7_CSSELR_IND_INSTRUCTION 1
50
51/* Values for Ctype fields in CLIDR */
52#define ARMV7_CLIDR_CTYPE_NO_CACHE 0
53#define ARMV7_CLIDR_CTYPE_INSTRUCTION_ONLY 1
54#define ARMV7_CLIDR_CTYPE_DATA_ONLY 2
55#define ARMV7_CLIDR_CTYPE_INSTRUCTION_DATA 3
56#define ARMV7_CLIDR_CTYPE_UNIFIED 4
57
d75ba503
AP
58#ifndef __ASSEMBLY__
59#include <linux/types.h>
301c1283 60#include <asm/io.h>
1ea4fac5 61#include <asm/barriers.h>
9ba379ad 62
d9a7dcf5
JT
63/* read L2 control register (L2CTLR) */
64static inline uint32_t read_l2ctlr(void)
65{
66 uint32_t val = 0;
67
68 asm volatile ("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
69
70 return val;
71}
72
73/* write L2 control register (L2CTLR) */
74static inline void write_l2ctlr(uint32_t val)
75{
76 /*
77 * Note: L2CTLR can only be written when the L2 memory system
78 * is idle, ie before the MMU is enabled.
79 */
80 asm volatile("mcr p15, 1, %0, c9, c0, 2" : : "r" (val) : "memory");
81 isb();
82}
83
0c08baf0
AS
84/*
85 * Workaround for ARM errata # 798870
86 * Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been
87 * stalled for 1024 cycles to verify that its hazard condition still exists.
88 */
89static inline void v7_enable_l2_hazard_detect(void)
90{
91 uint32_t val;
92
93 /* L2ACTLR[7]: Enable hazard detect timeout */
94 asm volatile ("mrc p15, 1, %0, c15, c0, 0\n\t" : "=r"(val));
95 val |= (1 << 7);
96 asm volatile ("mcr p15, 1, %0, c15, c0, 0\n\t" : : "r"(val));
97}
98
a3895314
AS
99/*
100 * Workaround for ARM errata # 799270
101 * Ensure that the L2 logic has been used within the previous 256 cycles
102 * before modifying the ACTLR.SMP bit. This is required during boot before
103 * MMU has been enabled, or during a specified reset or power down sequence.
104 */
105static inline void v7_enable_smp(uint32_t address)
106{
107 uint32_t temp, val;
108
109 /* Read auxiliary control register */
110 asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t" : "=r"(val));
111
112 /* Enable SMP */
113 val |= (1 << 6);
114
115 /* Dummy read to assure L2 access */
116 temp = readl(address);
117 temp &= 0;
118 val |= temp;
119
120 /* Write auxiliary control register */
121 asm volatile ("mcr p15, 0, %0, c1, c0, 1\n\t" : : "r"(val));
122
123 CP15DSB;
124 CP15ISB;
125}
126
0c08baf0 127void v7_en_l2_hazard_detect(void);
2c451f78
A
128void v7_outer_cache_enable(void);
129void v7_outer_cache_disable(void);
130void v7_outer_cache_flush_all(void);
131void v7_outer_cache_inval_all(void);
132void v7_outer_cache_flush_range(u32 start, u32 end);
133void v7_outer_cache_inval_range(u32 start, u32 end);
134
104d6fb6 135#ifdef CONFIG_ARMV7_NONSEC
1ef92385 136
f510aeae 137int armv7_init_nonsec(void);
d6b72da0 138int armv7_apply_memory_carveout(u64 *start, u64 *size);
97a81964 139bool armv7_boot_nonsec(void);
1ef92385 140
16212b59
AP
141/* defined in assembly file */
142unsigned int _nonsec_init(void);
f510aeae
MZ
143void _do_nonsec_entry(void *target_pc, unsigned long r0,
144 unsigned long r1, unsigned long r2);
ba6a1698 145void _smp_pen(void);
f510aeae
MZ
146
147extern char __secure_start[];
148extern char __secure_end[];
980d6a55
CYT
149extern char __secure_stack_start[];
150extern char __secure_stack_end[];
f510aeae 151
104d6fb6 152#endif /* CONFIG_ARMV7_NONSEC */
16212b59 153
c616a0df
NM
154void v7_arch_cp15_set_l2aux_ctrl(u32 l2auxctrl, u32 cpu_midr,
155 u32 cpu_rev_comb, u32 cpu_variant,
156 u32 cpu_rev);
b45c48a7
NM
157void v7_arch_cp15_set_acr(u32 acr, u32 cpu_midr, u32 cpu_rev_comb,
158 u32 cpu_variant, u32 cpu_rev);
d75ba503
AP
159#endif /* ! __ASSEMBLY__ */
160
2c451f78 161#endif