]> git.ipfire.org Git - thirdparty/linux.git/blame - virt/kvm/arm/vgic/vgic-mmio.h
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / virt / kvm / arm / vgic / vgic-mmio.h
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
4493b1c4
MZ
2/*
3 * Copyright (C) 2015, 2016 ARM Ltd.
4493b1c4
MZ
4 */
5#ifndef __KVM_ARM_VGIC_MMIO_H__
6#define __KVM_ARM_VGIC_MMIO_H__
7
8struct vgic_register_region {
9 unsigned int reg_offset;
10 unsigned int len;
11 unsigned int bits_per_irq;
12 unsigned int access_flags;
59c5ab40
AP
13 union {
14 unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr,
15 unsigned int len);
16 unsigned long (*its_read)(struct kvm *kvm, struct vgic_its *its,
17 gpa_t addr, unsigned int len);
18 };
19 union {
20 void (*write)(struct kvm_vcpu *vcpu, gpa_t addr,
21 unsigned int len, unsigned long val);
22 void (*its_write)(struct kvm *kvm, struct vgic_its *its,
23 gpa_t addr, unsigned int len,
24 unsigned long val);
25 };
2df903a8
VK
26 unsigned long (*uaccess_read)(struct kvm_vcpu *vcpu, gpa_t addr,
27 unsigned int len);
8331c23c 28 union {
c6e0917b
CD
29 int (*uaccess_write)(struct kvm_vcpu *vcpu, gpa_t addr,
30 unsigned int len, unsigned long val);
8331c23c
EA
31 int (*uaccess_its_write)(struct kvm *kvm, struct vgic_its *its,
32 gpa_t addr, unsigned int len,
33 unsigned long val);
34 };
4493b1c4
MZ
35};
36
37extern struct kvm_io_device_ops kvm_io_gic_ops;
38
39#define VGIC_ACCESS_8bit 1
40#define VGIC_ACCESS_32bit 2
41#define VGIC_ACCESS_64bit 4
42
43/*
44 * Generate a mask that covers the number of bytes required to address
45 * up to 1024 interrupts, each represented by <bits> bits. This assumes
46 * that <bits> is a power of two.
47 */
48#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
49
50/*
112b0b8f
AP
51 * (addr & mask) gives us the _byte_ offset for the INT ID.
52 * We multiply this by 8 the get the _bit_ offset, then divide this by
53 * the number of bits to learn the actual INT ID.
54 * But instead of a division (which requires a "long long div" implementation),
55 * we shift by the binary logarithm of <bits>.
56 * This assumes that <bits> is a power of two.
4493b1c4
MZ
57 */
58#define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
112b0b8f 59 8 >> ilog2(bits))
4493b1c4
MZ
60
61/*
62 * Some VGIC registers store per-IRQ information, with a different number
63 * of bits per IRQ. For those registers this macro is used.
64 * The _WITH_LENGTH version instantiates registers with a fixed length
65 * and is mutually exclusive with the _PER_IRQ version.
66 */
2602087e 67#define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, ur, uw, bpi, acc) \
4493b1c4
MZ
68 { \
69 .reg_offset = off, \
70 .bits_per_irq = bpi, \
71 .len = bpi * 1024 / 8, \
72 .access_flags = acc, \
73 .read = rd, \
74 .write = wr, \
2602087e
CD
75 .uaccess_read = ur, \
76 .uaccess_write = uw, \
4493b1c4
MZ
77 }
78
79#define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc) \
80 { \
81 .reg_offset = off, \
82 .bits_per_irq = 0, \
83 .len = length, \
84 .access_flags = acc, \
85 .read = rd, \
86 .write = wr, \
87 }
88
2df903a8
VK
89#define REGISTER_DESC_WITH_LENGTH_UACCESS(off, rd, wr, urd, uwr, length, acc) \
90 { \
91 .reg_offset = off, \
92 .bits_per_irq = 0, \
93 .len = length, \
94 .access_flags = acc, \
95 .read = rd, \
96 .write = wr, \
97 .uaccess_read = urd, \
98 .uaccess_write = uwr, \
99 }
100
4493b1c4
MZ
101unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
102
103void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
104 unsigned long data);
105
d7d0a11e 106unsigned long extract_bytes(u64 data, unsigned int offset,
424c3383
AP
107 unsigned int num);
108
109u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
110 unsigned long val);
111
4493b1c4
MZ
112unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
113 gpa_t addr, unsigned int len);
114
115unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
116 gpa_t addr, unsigned int len);
117
118void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
119 unsigned int len, unsigned long val);
120
c6e0917b
CD
121int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
122 unsigned int len, unsigned long val);
123
d53c2c29
CD
124unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, gpa_t addr,
125 unsigned int len);
126
127void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
128 unsigned int len, unsigned long val);
129
fd122e62
AP
130unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
131 gpa_t addr, unsigned int len);
132
133void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
134 gpa_t addr, unsigned int len,
135 unsigned long val);
136
137void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
138 gpa_t addr, unsigned int len,
139 unsigned long val);
140
41ee52ec
MZ
141int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
142 gpa_t addr, unsigned int len,
143 unsigned long val);
144
145int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
146 gpa_t addr, unsigned int len,
147 unsigned long val);
148
96b29800
AP
149unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
150 gpa_t addr, unsigned int len);
151
152void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
153 gpa_t addr, unsigned int len,
154 unsigned long val);
155
156void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
157 gpa_t addr, unsigned int len,
158 unsigned long val);
159
ba1ed9e1
MZ
160int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
161 gpa_t addr, unsigned int len,
162 unsigned long val);
163
164int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
165 gpa_t addr, unsigned int len,
166 unsigned long val);
167
69b6fe0c
AP
168unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
169 gpa_t addr, unsigned int len);
170
9a50ebbf
MZ
171unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
172 gpa_t addr, unsigned int len);
173
69b6fe0c
AP
174void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
175 gpa_t addr, unsigned int len,
176 unsigned long val);
177
178void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
179 gpa_t addr, unsigned int len,
180 unsigned long val);
96b29800 181
c6e0917b
CD
182int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
183 gpa_t addr, unsigned int len,
184 unsigned long val);
3197191e 185
c6e0917b
CD
186int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
187 gpa_t addr, unsigned int len,
188 unsigned long val);
3197191e 189
055658bf
AP
190unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
191 gpa_t addr, unsigned int len);
192
193void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
194 gpa_t addr, unsigned int len,
195 unsigned long val);
196
79717e4a
AP
197unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
198 gpa_t addr, unsigned int len);
199
200void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
201 gpa_t addr, unsigned int len,
202 unsigned long val);
203
2df903a8
VK
204int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
205 bool is_write, int offset, u32 *val);
206
e96a006c
VK
207u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid);
208
209void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
210 const u64 val);
211
fb848db3
AP
212unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
213
ed9b8cef
AP
214unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
215
0aa1de57
AP
216u64 vgic_sanitise_outer_cacheability(u64 reg);
217u64 vgic_sanitise_inner_cacheability(u64 reg);
218u64 vgic_sanitise_shareability(u64 reg);
219u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
220 u64 (*sanitise_fn)(u64));
0aa1de57 221
4b7171ac
EA
222/* Find the proper register handler entry given a certain address offset */
223const struct vgic_register_region *
224vgic_find_mmio_region(const struct vgic_register_region *regions,
225 int nr_regions, unsigned int offset);
226
4493b1c4 227#endif