* @handle_layer: returned youngest layer handling a subset of @masks. Not set
* if the function returns NULL.
*
- * Returns: landlock_cred(@cred) if any access rights specified in @masks is
+ * Return: landlock_cred(@cred) if any access rights specified in @masks is
* handled, or NULL otherwise.
*/
static inline const struct landlock_cred_security *
* @exe_size: Returned size of @exe_str (including the trailing null
* character), if any.
*
- * Returns: A pointer to an allocated buffer where @exe_str point to, %NULL if
+ * Return: A pointer to an allocated buffer where @exe_str point to, %NULL if
* there is no executable path, or an error otherwise.
*/
static const void *get_current_exe(const char **const exe_str,
}
/*
- * Returns: A newly allocated object describing a domain, or an error
+ * Return: A newly allocated object describing a domain, or an error
* otherwise.
*/
static struct landlock_details *get_current_details(void)
* Any new IOCTL commands that are implemented in fs/ioctl.c's do_vfs_ioctl()
* should be considered for inclusion here.
*
- * Returns: true if the IOCTL @cmd can not be restricted with Landlock for
- * device files.
+ * Return: True if the IOCTL @cmd can not be restricted with Landlock for
+ * device files, false otherwise.
*/
static __attribute_const__ bool is_masked_device_ioctl(const unsigned int cmd)
{
* Check that a destination file hierarchy has more restrictions than a source
* file hierarchy. This is only used for link and rename actions.
*
- * Returns: true if child1 may be moved from parent1 to parent2 without
- * increasing its access rights. If child2 is set, an additional condition is
+ * Return: True if child1 may be moved from parent1 to parent2 without
+ * increasing its access rights (if child2 is set, an additional condition is
* that child2 may be used from parent2 to parent1 without increasing its access
- * rights.
+ * rights), false otherwise.
*/
static bool no_more_access(const struct layer_access_masks *const parent1,
const struct layer_access_masks *const child1,
* checks that the collected accesses and the remaining ones are enough to
* allow the request.
*
- * Returns:
- * - true if the access request is granted;
- * - false otherwise.
+ * Return: True if the access request is granted, false otherwise.
*/
static bool
is_access_to_paths_allowed(const struct landlock_ruleset *const domain,
* only handles walking on the same mount point and only checks one set of
* accesses.
*
- * Returns:
- * - true if all the domain access rights are allowed for @dir;
- * - false if the walk reached @mnt_root.
+ * Return: True if all the domain access rights are allowed for @dir, false if
+ * the walk reached @mnt_root.
*/
static bool collect_domain_accesses(const struct landlock_ruleset *const domain,
const struct dentry *const mnt_root,
* ephemeral matrices take some space on the stack, which limits the number of
* layers to a deemed reasonable number: 16.
*
- * Returns:
- * - 0 if access is allowed;
- * - -EXDEV if @old_dentry would inherit new access rights from @new_dir;
- * - -EACCES if file removal or creation is denied.
+ * Return: 0 if access is allowed, -EXDEV if @old_dentry would inherit new
+ * access rights from @new_dir, or -EACCES if file removal or creation is
+ * denied.
*/
static int current_check_refer_path(struct dentry *const old_dentry,
const struct path *const new_dir,
*
* @number_of_ids: Number of IDs to hold. Must be greater than one.
*
- * Returns: The first ID in the range.
+ * Return: The first ID in the range.
*/
u64 landlock_get_id_range(size_t number_of_ids)
{
* @masks: Layer access masks to populate.
* @key_type: The key type to switch between access masks of different types.
*
- * Returns: An access mask where each access right bit is set which is handled
+ * Return: An access mask where each access right bit is set which is handled
* in any of the active layers in @domain.
*/
access_mask_t
*
* @domain: Landlock ruleset (used as a domain)
*
- * Returns: an access_masks result of the OR of all the domain's access masks.
+ * Return: An access_masks result of the OR of all the domain's access masks.
*/
static inline struct access_masks
landlock_union_access_masks(const struct landlock_ruleset *const domain)
* @server: IPC receiver domain.
* @scope: The scope restriction criteria.
*
- * Returns: True if @server is in a different domain from @client, and @client
- * is scoped to access @server (i.e. access should be denied).
+ * Return: True if @server is in a different domain from @client and @client
+ * is scoped to access @server (i.e. access should be denied), false otherwise.
*/
static bool domain_is_scoped(const struct landlock_ruleset *const client,
const struct landlock_ruleset *const server,
* capacity. This can legitimately happen if new threads get started after we
* grew the capacity.
*
- * Returns:
- * A pointer to the preallocated context struct, with task filled in.
- *
- * NULL, if we ran out of preallocated context structs.
+ * Return: A pointer to the preallocated context struct with task filled in, or
+ * NULL if preallocated context structs ran out.
*/
static struct tsync_work *tsync_works_provide(struct tsync_works *s,
struct task_struct *task)
* On a successful return, the subsequent n calls to tsync_works_provide() are
* guaranteed to succeed. (size + n <= capacity)
*
- * Returns:
- * -ENOMEM if the (re)allocation fails
-
- * 0 if the allocation succeeds, partially succeeds, or no reallocation
- * was needed
+ * Return: 0 if sufficient space for n more elements could be provided, -ENOMEM
+ * on allocation errors, -EOVERFLOW in case of integer overflow.
*/
static int tsync_works_grow_by(struct tsync_works *s, size_t n, gfp_t flags)
{
* For each added task_work, atomically increments shared_ctx->num_preparing and
* shared_ctx->num_unfinished.
*
- * Returns:
- * true, if at least one eligible sibling thread was found
+ * Return: True if at least one eligible sibling thread was found, false
+ * otherwise.
*/
static bool schedule_task_work(struct tsync_works *works,
struct tsync_shared_context *shared_ctx)