]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - include/linux/arm_ffa.h
Merge tag 'kvm-x86-generic-6.8' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / include / linux / arm_ffa.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2021 ARM Ltd.
4 */
5
6 #ifndef _LINUX_ARM_FFA_H
7 #define _LINUX_ARM_FFA_H
8
9 #include <linux/bitfield.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/uuid.h>
14
15 #define FFA_SMC(calling_convention, func_num) \
16 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \
17 ARM_SMCCC_OWNER_STANDARD, (func_num))
18
19 #define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num))
20 #define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num))
21
22 #define FFA_ERROR FFA_SMC_32(0x60)
23 #define FFA_SUCCESS FFA_SMC_32(0x61)
24 #define FFA_FN64_SUCCESS FFA_SMC_64(0x61)
25 #define FFA_INTERRUPT FFA_SMC_32(0x62)
26 #define FFA_VERSION FFA_SMC_32(0x63)
27 #define FFA_FEATURES FFA_SMC_32(0x64)
28 #define FFA_RX_RELEASE FFA_SMC_32(0x65)
29 #define FFA_RXTX_MAP FFA_SMC_32(0x66)
30 #define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66)
31 #define FFA_RXTX_UNMAP FFA_SMC_32(0x67)
32 #define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68)
33 #define FFA_ID_GET FFA_SMC_32(0x69)
34 #define FFA_MSG_POLL FFA_SMC_32(0x6A)
35 #define FFA_MSG_WAIT FFA_SMC_32(0x6B)
36 #define FFA_YIELD FFA_SMC_32(0x6C)
37 #define FFA_RUN FFA_SMC_32(0x6D)
38 #define FFA_MSG_SEND FFA_SMC_32(0x6E)
39 #define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F)
40 #define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F)
41 #define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70)
42 #define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70)
43 #define FFA_MEM_DONATE FFA_SMC_32(0x71)
44 #define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71)
45 #define FFA_MEM_LEND FFA_SMC_32(0x72)
46 #define FFA_FN64_MEM_LEND FFA_SMC_64(0x72)
47 #define FFA_MEM_SHARE FFA_SMC_32(0x73)
48 #define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73)
49 #define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74)
50 #define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74)
51 #define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75)
52 #define FFA_MEM_RELINQUISH FFA_SMC_32(0x76)
53 #define FFA_MEM_RECLAIM FFA_SMC_32(0x77)
54 #define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78)
55 #define FFA_MEM_OP_RESUME FFA_SMC_32(0x79)
56 #define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A)
57 #define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B)
58 #define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C)
59 #define FFA_NOTIFICATION_BITMAP_CREATE FFA_SMC_32(0x7D)
60 #define FFA_NOTIFICATION_BITMAP_DESTROY FFA_SMC_32(0x7E)
61 #define FFA_NOTIFICATION_BIND FFA_SMC_32(0x7F)
62 #define FFA_NOTIFICATION_UNBIND FFA_SMC_32(0x80)
63 #define FFA_NOTIFICATION_SET FFA_SMC_32(0x81)
64 #define FFA_NOTIFICATION_GET FFA_SMC_32(0x82)
65 #define FFA_NOTIFICATION_INFO_GET FFA_SMC_32(0x83)
66 #define FFA_FN64_NOTIFICATION_INFO_GET FFA_SMC_64(0x83)
67 #define FFA_RX_ACQUIRE FFA_SMC_32(0x84)
68 #define FFA_SPM_ID_GET FFA_SMC_32(0x85)
69 #define FFA_MSG_SEND2 FFA_SMC_32(0x86)
70 #define FFA_SECONDARY_EP_REGISTER FFA_SMC_32(0x87)
71 #define FFA_FN64_SECONDARY_EP_REGISTER FFA_SMC_64(0x87)
72 #define FFA_MEM_PERM_GET FFA_SMC_32(0x88)
73 #define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88)
74 #define FFA_MEM_PERM_SET FFA_SMC_32(0x89)
75 #define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89)
76
77 /*
78 * For some calls it is necessary to use SMC64 to pass or return 64-bit values.
79 * For such calls FFA_FN_NATIVE(name) will choose the appropriate
80 * (native-width) function ID.
81 */
82 #ifdef CONFIG_64BIT
83 #define FFA_FN_NATIVE(name) FFA_FN64_##name
84 #else
85 #define FFA_FN_NATIVE(name) FFA_##name
86 #endif
87
88 /* FFA error codes. */
89 #define FFA_RET_SUCCESS (0)
90 #define FFA_RET_NOT_SUPPORTED (-1)
91 #define FFA_RET_INVALID_PARAMETERS (-2)
92 #define FFA_RET_NO_MEMORY (-3)
93 #define FFA_RET_BUSY (-4)
94 #define FFA_RET_INTERRUPTED (-5)
95 #define FFA_RET_DENIED (-6)
96 #define FFA_RET_RETRY (-7)
97 #define FFA_RET_ABORTED (-8)
98 #define FFA_RET_NO_DATA (-9)
99
100 /* FFA version encoding */
101 #define FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
102 #define FFA_MINOR_VERSION_MASK GENMASK(15, 0)
103 #define FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(FFA_MAJOR_VERSION_MASK, (x))))
104 #define FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(FFA_MINOR_VERSION_MASK, (x))))
105 #define FFA_PACK_VERSION_INFO(major, minor) \
106 (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \
107 FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor)))
108 #define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0)
109 #define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1)
110
111 /**
112 * FF-A specification mentions explicitly about '4K pages'. This should
113 * not be confused with the kernel PAGE_SIZE, which is the translation
114 * granule kernel is configured and may be one among 4K, 16K and 64K.
115 */
116 #define FFA_PAGE_SIZE SZ_4K
117
118 /*
119 * Minimum buffer size/alignment encodings returned by an FFA_FEATURES
120 * query for FFA_RXTX_MAP.
121 */
122 #define FFA_FEAT_RXTX_MIN_SZ_4K 0
123 #define FFA_FEAT_RXTX_MIN_SZ_64K 1
124 #define FFA_FEAT_RXTX_MIN_SZ_16K 2
125
126 /* FFA Bus/Device/Driver related */
127 struct ffa_device {
128 u32 id;
129 int vm_id;
130 bool mode_32bit;
131 uuid_t uuid;
132 struct device dev;
133 const struct ffa_ops *ops;
134 };
135
136 #define to_ffa_dev(d) container_of(d, struct ffa_device, dev)
137
138 struct ffa_device_id {
139 uuid_t uuid;
140 };
141
142 struct ffa_driver {
143 const char *name;
144 int (*probe)(struct ffa_device *sdev);
145 void (*remove)(struct ffa_device *sdev);
146 const struct ffa_device_id *id_table;
147
148 struct device_driver driver;
149 };
150
151 #define to_ffa_driver(d) container_of(d, struct ffa_driver, driver)
152
153 static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data)
154 {
155 dev_set_drvdata(&fdev->dev, data);
156 }
157
158 static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev)
159 {
160 return dev_get_drvdata(&fdev->dev);
161 }
162
163 #if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
164 struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
165 const struct ffa_ops *ops);
166 void ffa_device_unregister(struct ffa_device *ffa_dev);
167 int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
168 const char *mod_name);
169 void ffa_driver_unregister(struct ffa_driver *driver);
170 bool ffa_device_is_valid(struct ffa_device *ffa_dev);
171
172 #else
173 static inline
174 struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
175 const struct ffa_ops *ops)
176 {
177 return NULL;
178 }
179
180 static inline void ffa_device_unregister(struct ffa_device *dev) {}
181
182 static inline int
183 ffa_driver_register(struct ffa_driver *driver, struct module *owner,
184 const char *mod_name)
185 {
186 return -EINVAL;
187 }
188
189 static inline void ffa_driver_unregister(struct ffa_driver *driver) {}
190
191 static inline
192 bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
193
194 #endif /* CONFIG_ARM_FFA_TRANSPORT */
195
196 #define ffa_register(driver) \
197 ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
198 #define ffa_unregister(driver) \
199 ffa_driver_unregister(driver)
200
201 /**
202 * module_ffa_driver() - Helper macro for registering a psa_ffa driver
203 * @__ffa_driver: ffa_driver structure
204 *
205 * Helper macro for psa_ffa drivers to set up proper module init / exit
206 * functions. Replaces module_init() and module_exit() and keeps people from
207 * printing pointless things to the kernel log when their driver is loaded.
208 */
209 #define module_ffa_driver(__ffa_driver) \
210 module_driver(__ffa_driver, ffa_register, ffa_unregister)
211
212 extern struct bus_type ffa_bus_type;
213
214 /* FFA transport related */
215 struct ffa_partition_info {
216 u16 id;
217 u16 exec_ctxt;
218 /* partition supports receipt of direct requests */
219 #define FFA_PARTITION_DIRECT_RECV BIT(0)
220 /* partition can send direct requests. */
221 #define FFA_PARTITION_DIRECT_SEND BIT(1)
222 /* partition can send and receive indirect messages. */
223 #define FFA_PARTITION_INDIRECT_MSG BIT(2)
224 /* partition runs in the AArch64 execution state. */
225 #define FFA_PARTITION_AARCH64_EXEC BIT(8)
226 u32 properties;
227 u32 uuid[4];
228 };
229
230 /* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
231 struct ffa_send_direct_data {
232 unsigned long data0; /* w3/x3 */
233 unsigned long data1; /* w4/x4 */
234 unsigned long data2; /* w5/x5 */
235 unsigned long data3; /* w6/x6 */
236 unsigned long data4; /* w7/x7 */
237 };
238
239 struct ffa_mem_region_addr_range {
240 /* The base IPA of the constituent memory region, aligned to 4 kiB */
241 u64 address;
242 /* The number of 4 kiB pages in the constituent memory region. */
243 u32 pg_cnt;
244 u32 reserved;
245 };
246
247 struct ffa_composite_mem_region {
248 /*
249 * The total number of 4 kiB pages included in this memory region. This
250 * must be equal to the sum of page counts specified in each
251 * `struct ffa_mem_region_addr_range`.
252 */
253 u32 total_pg_cnt;
254 /* The number of constituents included in this memory region range */
255 u32 addr_range_cnt;
256 u64 reserved;
257 /** An array of `addr_range_cnt` memory region constituents. */
258 struct ffa_mem_region_addr_range constituents[];
259 };
260
261 struct ffa_mem_region_attributes {
262 /* The ID of the VM to which the memory is being given or shared. */
263 u16 receiver;
264 /*
265 * The permissions with which the memory region should be mapped in the
266 * receiver's page table.
267 */
268 #define FFA_MEM_EXEC BIT(3)
269 #define FFA_MEM_NO_EXEC BIT(2)
270 #define FFA_MEM_RW BIT(1)
271 #define FFA_MEM_RO BIT(0)
272 u8 attrs;
273 /*
274 * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
275 * for memory regions with multiple borrowers.
276 */
277 #define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0)
278 u8 flag;
279 /*
280 * Offset in bytes from the start of the outer `ffa_memory_region` to
281 * an `struct ffa_mem_region_addr_range`.
282 */
283 u32 composite_off;
284 u64 reserved;
285 };
286
287 struct ffa_mem_region {
288 /* The ID of the VM/owner which originally sent the memory region */
289 u16 sender_id;
290 #define FFA_MEM_NORMAL BIT(5)
291 #define FFA_MEM_DEVICE BIT(4)
292
293 #define FFA_MEM_WRITE_BACK (3 << 2)
294 #define FFA_MEM_NON_CACHEABLE (1 << 2)
295
296 #define FFA_DEV_nGnRnE (0 << 2)
297 #define FFA_DEV_nGnRE (1 << 2)
298 #define FFA_DEV_nGRE (2 << 2)
299 #define FFA_DEV_GRE (3 << 2)
300
301 #define FFA_MEM_NON_SHAREABLE (0)
302 #define FFA_MEM_OUTER_SHAREABLE (2)
303 #define FFA_MEM_INNER_SHAREABLE (3)
304 /* Memory region attributes, upper byte MBZ pre v1.1 */
305 u16 attributes;
306 /*
307 * Clear memory region contents after unmapping it from the sender and
308 * before mapping it for any receiver.
309 */
310 #define FFA_MEM_CLEAR BIT(0)
311 /*
312 * Whether the hypervisor may time slice the memory sharing or retrieval
313 * operation.
314 */
315 #define FFA_TIME_SLICE_ENABLE BIT(1)
316
317 #define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3)
318 #define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3)
319 #define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3)
320 #define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3)
321
322 #define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9)
323 #define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5)
324 /* Flags to control behaviour of the transaction. */
325 u32 flags;
326 #define HANDLE_LOW_MASK GENMASK_ULL(31, 0)
327 #define HANDLE_HIGH_MASK GENMASK_ULL(63, 32)
328 #define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x))))
329 #define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x))))
330
331 #define PACK_HANDLE(l, h) \
332 (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h)))
333 /*
334 * A globally-unique ID assigned by the hypervisor for a region
335 * of memory being sent between VMs.
336 */
337 u64 handle;
338 /*
339 * An implementation defined value associated with the receiver and the
340 * memory region.
341 */
342 u64 tag;
343 /* Size of each endpoint memory access descriptor, MBZ pre v1.1 */
344 u32 ep_mem_size;
345 /*
346 * The number of `ffa_mem_region_attributes` entries included in this
347 * transaction.
348 */
349 u32 ep_count;
350 /*
351 * 16-byte aligned offset from the base address of this descriptor
352 * to the first element of the endpoint memory access descriptor array
353 * Valid only from v1.1
354 */
355 u32 ep_mem_offset;
356 /* MBZ, valid only from v1.1 */
357 u32 reserved[3];
358 };
359
360 #define CONSTITUENTS_OFFSET(x) \
361 (offsetof(struct ffa_composite_mem_region, constituents[x]))
362
363 static inline u32
364 ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version)
365 {
366 u32 offset = count * sizeof(struct ffa_mem_region_attributes);
367 /*
368 * Earlier to v1.1, the endpoint memory descriptor array started at
369 * offset 32(i.e. offset of ep_mem_offset in the current structure)
370 */
371 if (ffa_version <= FFA_VERSION_1_0)
372 offset += offsetof(struct ffa_mem_region, ep_mem_offset);
373 else
374 offset += sizeof(struct ffa_mem_region);
375
376 return offset;
377 }
378
379 struct ffa_mem_ops_args {
380 bool use_txbuf;
381 u32 nattrs;
382 u32 flags;
383 u64 tag;
384 u64 g_handle;
385 struct scatterlist *sg;
386 struct ffa_mem_region_attributes *attrs;
387 };
388
389 struct ffa_info_ops {
390 u32 (*api_version_get)(void);
391 int (*partition_info_get)(const char *uuid_str,
392 struct ffa_partition_info *buffer);
393 };
394
395 struct ffa_msg_ops {
396 void (*mode_32bit_set)(struct ffa_device *dev);
397 int (*sync_send_receive)(struct ffa_device *dev,
398 struct ffa_send_direct_data *data);
399 };
400
401 struct ffa_mem_ops {
402 int (*memory_reclaim)(u64 g_handle, u32 flags);
403 int (*memory_share)(struct ffa_mem_ops_args *args);
404 int (*memory_lend)(struct ffa_mem_ops_args *args);
405 };
406
407 struct ffa_cpu_ops {
408 int (*run)(struct ffa_device *dev, u16 vcpu);
409 };
410
411 typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data);
412 typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data);
413
414 struct ffa_notifier_ops {
415 int (*sched_recv_cb_register)(struct ffa_device *dev,
416 ffa_sched_recv_cb cb, void *cb_data);
417 int (*sched_recv_cb_unregister)(struct ffa_device *dev);
418 int (*notify_request)(struct ffa_device *dev, bool per_vcpu,
419 ffa_notifier_cb cb, void *cb_data, int notify_id);
420 int (*notify_relinquish)(struct ffa_device *dev, int notify_id);
421 int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu,
422 u16 vcpu);
423 };
424
425 struct ffa_ops {
426 const struct ffa_info_ops *info_ops;
427 const struct ffa_msg_ops *msg_ops;
428 const struct ffa_mem_ops *mem_ops;
429 const struct ffa_cpu_ops *cpu_ops;
430 const struct ffa_notifier_ops *notifier_ops;
431 };
432
433 #endif /* _LINUX_ARM_FFA_H */