]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
firmware: arm_ffa: Add support for MEM_* interfaces
authorSudeep Holla <sudeep.holla@arm.com>
Fri, 21 May 2021 15:10:33 +0000 (16:10 +0100)
committerSudeep Holla <sudeep.holla@arm.com>
Wed, 26 May 2021 21:38:43 +0000 (22:38 +0100)
Most of the MEM_* APIs share the same parameters, so they can be
generalised. Currently only MEM_SHARE is implemented and the user space
interface for that is not added yet.

Link: https://lore.kernel.org/r/20210521151033.181846-6-sudeep.holla@arm.com
Tested-by: Jens Wiklander <jens.wiklander@linaro.org>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
drivers/firmware/arm_ffa/driver.c
include/linux/arm_ffa.h

index 85b9bbdbeabe50bc79354d14e3ccdf0d83f00d29..b1edb4b2e94aaf32af5d7eddc459aac71e22634c 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/uuid.h>
 
@@ -349,6 +351,192 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
        return -EINVAL;
 }
 
+static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
+                             u32 frag_len, u32 len, u64 *handle)
+{
+       ffa_value_t ret;
+
+       invoke_ffa_fn((ffa_value_t){
+                     .a0 = func_id, .a1 = len, .a2 = frag_len,
+                     .a3 = buf, .a4 = buf_sz,
+                     }, &ret);
+
+       while (ret.a0 == FFA_MEM_OP_PAUSE)
+               invoke_ffa_fn((ffa_value_t){
+                             .a0 = FFA_MEM_OP_RESUME,
+                             .a1 = ret.a1, .a2 = ret.a2,
+                             }, &ret);
+
+       if (ret.a0 == FFA_ERROR)
+               return ffa_to_linux_errno((int)ret.a2);
+
+       if (ret.a0 != FFA_SUCCESS)
+               return -EOPNOTSUPP;
+
+       if (handle)
+               *handle = PACK_HANDLE(ret.a2, ret.a3);
+
+       return frag_len;
+}
+
+static int ffa_mem_next_frag(u64 handle, u32 frag_len)
+{
+       ffa_value_t ret;
+
+       invoke_ffa_fn((ffa_value_t){
+                     .a0 = FFA_MEM_FRAG_TX,
+                     .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle),
+                     .a3 = frag_len,
+                     }, &ret);
+
+       while (ret.a0 == FFA_MEM_OP_PAUSE)
+               invoke_ffa_fn((ffa_value_t){
+                             .a0 = FFA_MEM_OP_RESUME,
+                             .a1 = ret.a1, .a2 = ret.a2,
+                             }, &ret);
+
+       if (ret.a0 == FFA_ERROR)
+               return ffa_to_linux_errno((int)ret.a2);
+
+       if (ret.a0 != FFA_MEM_FRAG_RX)
+               return -EOPNOTSUPP;
+
+       return ret.a3;
+}
+
+static int
+ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
+                     u32 len, u64 *handle, bool first)
+{
+       if (!first)
+               return ffa_mem_next_frag(*handle, frag_len);
+
+       return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle);
+}
+
+static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
+{
+       u32 num_pages = 0;
+
+       do {
+               num_pages += sg->length / FFA_PAGE_SIZE;
+       } while ((sg = sg_next(sg)));
+
+       return num_pages;
+}
+
+static int
+ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
+                      struct ffa_mem_ops_args *args)
+{
+       int rc = 0;
+       bool first = true;
+       phys_addr_t addr = 0;
+       struct ffa_composite_mem_region *composite;
+       struct ffa_mem_region_addr_range *constituents;
+       struct ffa_mem_region_attributes *ep_mem_access;
+       struct ffa_mem_region *mem_region = buffer;
+       u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg);
+
+       mem_region->tag = args->tag;
+       mem_region->flags = args->flags;
+       mem_region->sender_id = drv_info->vm_id;
+       mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK |
+                                FFA_MEM_INNER_SHAREABLE;
+       ep_mem_access = &mem_region->ep_mem_access[0];
+
+       for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
+               ep_mem_access->receiver = args->attrs[idx].receiver;
+               ep_mem_access->attrs = args->attrs[idx].attrs;
+               ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs);
+       }
+       mem_region->ep_count = args->nattrs;
+
+       composite = buffer + COMPOSITE_OFFSET(args->nattrs);
+       composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
+       composite->addr_range_cnt = num_entries;
+
+       length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries);
+       frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0);
+       if (frag_len > max_fragsize)
+               return -ENXIO;
+
+       if (!args->use_txbuf) {
+               addr = virt_to_phys(buffer);
+               buf_sz = max_fragsize / FFA_PAGE_SIZE;
+       }
+
+       constituents = buffer + frag_len;
+       idx = 0;
+       do {
+               if (frag_len == max_fragsize) {
+                       rc = ffa_transmit_fragment(func_id, addr, buf_sz,
+                                                  frag_len, length,
+                                                  &args->g_handle, first);
+                       if (rc < 0)
+                               return -ENXIO;
+
+                       first = false;
+                       idx = 0;
+                       frag_len = 0;
+                       constituents = buffer;
+               }
+
+               if ((void *)constituents - buffer > max_fragsize) {
+                       pr_err("Memory Region Fragment > Tx Buffer size\n");
+                       return -EFAULT;
+               }
+
+               constituents->address = sg_phys(args->sg);
+               constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
+               constituents++;
+               frag_len += sizeof(struct ffa_mem_region_addr_range);
+       } while ((args->sg = sg_next(args->sg)));
+
+       return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
+                                    length, &args->g_handle, first);
+}
+
+static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
+{
+       int ret;
+       void *buffer;
+
+       if (!args->use_txbuf) {
+               buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
+               if (!buffer)
+                       return -ENOMEM;
+       } else {
+               buffer = drv_info->tx_buffer;
+               mutex_lock(&drv_info->tx_lock);
+       }
+
+       ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args);
+
+       if (args->use_txbuf)
+               mutex_unlock(&drv_info->tx_lock);
+       else
+               free_pages_exact(buffer, RXTX_BUFFER_SIZE);
+
+       return ret < 0 ? ret : 0;
+}
+
+static int ffa_memory_reclaim(u64 g_handle, u32 flags)
+{
+       ffa_value_t ret;
+
+       invoke_ffa_fn((ffa_value_t){
+                     .a0 = FFA_MEM_RECLAIM,
+                     .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle),
+                     .a3 = flags,
+                     }, &ret);
+
+       if (ret.a0 == FFA_ERROR)
+               return ffa_to_linux_errno((int)ret.a2);
+
+       return 0;
+}
+
 static u32 ffa_api_version_get(void)
 {
        return drv_info->version;
@@ -387,11 +575,22 @@ static int ffa_sync_send_receive(struct ffa_device *dev,
                                       dev->mode_32bit, data);
 }
 
+static int
+ffa_memory_share(struct ffa_device *dev, struct ffa_mem_ops_args *args)
+{
+       if (dev->mode_32bit)
+               return ffa_memory_ops(FFA_MEM_SHARE, args);
+
+       return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
+}
+
 static const struct ffa_dev_ops ffa_ops = {
        .api_version_get = ffa_api_version_get,
        .partition_info_get = ffa_partition_info_get,
        .mode_32bit_set = ffa_mode_32bit_set,
        .sync_send_receive = ffa_sync_send_receive,
+       .memory_reclaim = ffa_memory_reclaim,
+       .memory_share = ffa_memory_share,
 };
 
 const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev)
index d672673fc621dcad789c753cd7884cf638a67252..505c679b6a9b726021e613a637de73bfd1596363 100644 (file)
@@ -116,6 +116,142 @@ struct ffa_send_direct_data {
        unsigned long data4; /* w7/x7 */
 };
 
+struct ffa_mem_region_addr_range {
+       /* The base IPA of the constituent memory region, aligned to 4 kiB */
+       u64 address;
+       /* The number of 4 kiB pages in the constituent memory region. */
+       u32 pg_cnt;
+       u32 reserved;
+};
+
+struct ffa_composite_mem_region {
+       /*
+        * The total number of 4 kiB pages included in this memory region. This
+        * must be equal to the sum of page counts specified in each
+        * `struct ffa_mem_region_addr_range`.
+        */
+       u32 total_pg_cnt;
+       /* The number of constituents included in this memory region range */
+       u32 addr_range_cnt;
+       u64 reserved;
+       /** An array of `addr_range_cnt` memory region constituents. */
+       struct ffa_mem_region_addr_range constituents[];
+};
+
+struct ffa_mem_region_attributes {
+       /* The ID of the VM to which the memory is being given or shared. */
+       u16 receiver;
+       /*
+        * The permissions with which the memory region should be mapped in the
+        * receiver's page table.
+        */
+#define FFA_MEM_EXEC           BIT(3)
+#define FFA_MEM_NO_EXEC                BIT(2)
+#define FFA_MEM_RW             BIT(1)
+#define FFA_MEM_RO             BIT(0)
+       u8 attrs;
+       /*
+        * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
+        * for memory regions with multiple borrowers.
+        */
+#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0)
+       u8 flag;
+       u32 composite_off;
+       /*
+        * Offset in bytes from the start of the outer `ffa_memory_region` to
+        * an `struct ffa_mem_region_addr_range`.
+        */
+       u64 reserved;
+};
+
+struct ffa_mem_region {
+       /* The ID of the VM/owner which originally sent the memory region */
+       u16 sender_id;
+#define FFA_MEM_NORMAL         BIT(5)
+#define FFA_MEM_DEVICE         BIT(4)
+
+#define FFA_MEM_WRITE_BACK     (3 << 2)
+#define FFA_MEM_NON_CACHEABLE  (1 << 2)
+
+#define FFA_DEV_nGnRnE         (0 << 2)
+#define FFA_DEV_nGnRE          (1 << 2)
+#define FFA_DEV_nGRE           (2 << 2)
+#define FFA_DEV_GRE            (3 << 2)
+
+#define FFA_MEM_NON_SHAREABLE  (0)
+#define FFA_MEM_OUTER_SHAREABLE        (2)
+#define FFA_MEM_INNER_SHAREABLE        (3)
+       u8 attributes;
+       u8 reserved_0;
+/*
+ * Clear memory region contents after unmapping it from the sender and
+ * before mapping it for any receiver.
+ */
+#define FFA_MEM_CLEAR                  BIT(0)
+/*
+ * Whether the hypervisor may time slice the memory sharing or retrieval
+ * operation.
+ */
+#define FFA_TIME_SLICE_ENABLE          BIT(1)
+
+#define FFA_MEM_RETRIEVE_TYPE_IN_RESP  (0 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_SHARE    (1 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_LEND     (2 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_DONATE   (3 << 3)
+
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT       BIT(9)
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x)         ((x) << 5)
+       /* Flags to control behaviour of the transaction. */
+       u32 flags;
+#define HANDLE_LOW_MASK                GENMASK_ULL(31, 0)
+#define HANDLE_HIGH_MASK       GENMASK_ULL(63, 32)
+#define HANDLE_LOW(x)          ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x))))
+#define        HANDLE_HIGH(x)          ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x))))
+
+#define PACK_HANDLE(l, h)              \
+       (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h)))
+       /*
+        * A globally-unique ID assigned by the hypervisor for a region
+        * of memory being sent between VMs.
+        */
+       u64 handle;
+       /*
+        * An implementation defined value associated with the receiver and the
+        * memory region.
+        */
+       u64 tag;
+       u32 reserved_1;
+       /*
+        * The number of `ffa_mem_region_attributes` entries included in this
+        * transaction.
+        */
+       u32 ep_count;
+       /*
+        * An array of endpoint memory access descriptors.
+        * Each one specifies a memory region offset, an endpoint and the
+        * attributes with which this memory region should be mapped in that
+        * endpoint's page table.
+        */
+       struct ffa_mem_region_attributes ep_mem_access[];
+};
+
+#define        COMPOSITE_OFFSET(x)     \
+       (offsetof(struct ffa_mem_region, ep_mem_access[x]))
+#define CONSTITUENTS_OFFSET(x) \
+       (offsetof(struct ffa_composite_mem_region, constituents[x]))
+#define COMPOSITE_CONSTITUENTS_OFFSET(x, y)    \
+       (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y))
+
+struct ffa_mem_ops_args {
+       bool use_txbuf;
+       u32 nattrs;
+       u32 flags;
+       u64 tag;
+       u64 g_handle;
+       struct scatterlist *sg;
+       struct ffa_mem_region_attributes *attrs;
+};
+
 struct ffa_dev_ops {
        u32 (*api_version_get)(void);
        int (*partition_info_get)(const char *uuid_str,
@@ -123,6 +259,9 @@ struct ffa_dev_ops {
        void (*mode_32bit_set)(struct ffa_device *dev);
        int (*sync_send_receive)(struct ffa_device *dev,
                                 struct ffa_send_direct_data *data);
+       int (*memory_reclaim)(u64 g_handle, u32 flags);
+       int (*memory_share)(struct ffa_device *dev,
+                           struct ffa_mem_ops_args *args);
 };
 
 #endif /* _LINUX_ARM_FFA_H */