/**
* af_alg_count_tsgl - Count number of TX SG entries
*
- * The counting starts from the beginning of the SGL to @bytes. If
- * an @offset is provided, the counting of the SG entries starts at the @offset.
+ * The counting starts from the beginning of the SGL to @bytes.
*
* @sk: socket of connection to user space
* @bytes: Count the number of SG entries holding given number of bytes.
- * @offset: Start the counting of SG entries from the given offset.
* Return: Number of TX SG entries found given the constraints
*/
-unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes)
{
const struct alg_sock *ask = alg_sk(sk);
const struct af_alg_ctx *ctx = ask->private;
const struct scatterlist *sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
- size_t bytes_count;
-
- /* Skip offset */
- if (offset >= sg[i].length) {
- offset -= sg[i].length;
- bytes -= sg[i].length;
- continue;
- }
-
- bytes_count = sg[i].length - offset;
-
- offset = 0;
sgl_count++;
-
- /* If we have seen requested number of bytes, stop */
- if (bytes_count >= bytes)
+ if (sg[i].length >= bytes)
return sgl_count;
- bytes -= bytes_count;
+ bytes -= sg[i].length;
}
}
* af_alg_pull_tsgl - Release the specified buffers from TX SGL
*
* If @dst is non-null, reassign the pages to @dst. The caller must release
- * the pages. If @dst_offset is given only reassign the pages to @dst starting
- * at the @dst_offset (byte). The caller must ensure that @dst is large
- * enough (e.g. by using af_alg_count_tsgl with the same offset).
+ * the pages.
*
* @sk: socket of connection to user space
* @used: Number of bytes to pull from TX SGL
* @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
* caller must release the buffers in dst.
- * @dst_offset: Reassign the TX SGL from given offset. All buffers before
- * reaching the offset is released.
*/
-void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
- size_t dst_offset)
+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
* SG entries in dst.
*/
if (dst) {
- if (dst_offset >= plen) {
- /* discard page before offset */
- dst_offset -= plen;
- } else {
- /* reassign page to dst after offset */
- get_page(page);
- sg_set_page(dst + j, page,
- plen - dst_offset,
- sg[i].offset + dst_offset);
- dst_offset = 0;
- j++;
- }
+ /* reassign page to dst after offset */
+ get_page(page);
+ sg_set_page(dst + j, page, plen, sg[i].offset);
+ j++;
}
sg[i].length -= plen;
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
#include <crypto/if_alg.h>
-#include <crypto/skcipher.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
struct crypto_aead *tfm = pask->private;
- unsigned int i, as = crypto_aead_authsize(tfm);
+ unsigned int as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
- struct af_alg_tsgl *tsgl, *tmp;
struct scatterlist *rsgl_src, *tsgl_src = NULL;
int err = 0;
size_t used = 0; /* [in] TX bufs to be en/decrypted */
outlen -= less;
}
+ /*
+ * Create a per request TX SGL for this request which tracks the
+ * SG entries from the global TX SGL.
+ */
processed = used + ctx->aead_assoclen;
- list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
- for (i = 0; i < tsgl->cur; i++) {
- struct scatterlist *process_sg = tsgl->sg + i;
-
- if (!(process_sg->length) || !sg_page(process_sg))
- continue;
- tsgl_src = process_sg;
- break;
- }
- if (tsgl_src)
- break;
- }
- if (processed && !tsgl_src) {
- err = -EFAULT;
+ areq->tsgl_entries = af_alg_count_tsgl(sk, processed);
+ if (!areq->tsgl_entries)
+ areq->tsgl_entries = 1;
+ areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
+ areq->tsgl_entries),
+ GFP_KERNEL);
+ if (!areq->tsgl) {
+ err = -ENOMEM;
goto free;
}
+ sg_init_table(areq->tsgl, areq->tsgl_entries);
+ af_alg_pull_tsgl(sk, processed, areq->tsgl);
+ tsgl_src = areq->tsgl;
/*
* Copy of AAD from source to destination
* when user space uses an in-place cipher operation, the kernel
* will copy the data as it does not see whether such in-place operation
* is initiated.
- *
- * To ensure efficiency, the following implementation ensure that the
- * ciphers are invoked to perform a crypto operation in-place. This
- * is achieved by memory management specified as follows.
*/
/* Use the RX SGL as source (and destination) for crypto op. */
rsgl_src = areq->first_rsgl.sgl.sgt.sgl;
- if (ctx->enc) {
- /*
- * Encryption operation - The in-place cipher operation is
- * achieved by the following operation:
- *
- * TX SGL: AAD || PT
- * | |
- * | copy |
- * v v
- * RX SGL: AAD || PT || Tag
- */
- memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src,
- processed);
- af_alg_pull_tsgl(sk, processed, NULL, 0);
- } else {
- /*
- * Decryption operation - To achieve an in-place cipher
- * operation, the following SGL structure is used:
- *
- * TX SGL: AAD || CT || Tag
- * | | ^
- * | copy | | Create SGL link.
- * v v |
- * RX SGL: AAD || CT ----+
- */
-
- /* Copy AAD || CT to RX SGL buffer for in-place operation. */
- memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen);
-
- /* Create TX SGL for tag and chain it to RX SGL. */
- areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
- processed - as);
- if (!areq->tsgl_entries)
- areq->tsgl_entries = 1;
- areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
- areq->tsgl_entries),
- GFP_KERNEL);
- if (!areq->tsgl) {
- err = -ENOMEM;
- goto free;
- }
- sg_init_table(areq->tsgl, areq->tsgl_entries);
-
- /* Release TX SGL, except for tag data and reassign tag data. */
- af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
-
- /* chain the areq TX SGL holding the tag with RX SGL */
- if (usedpages) {
- /* RX SGL present */
- struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
- struct scatterlist *sg = sgl_prev->sgt.sgl;
-
- sg_unmark_end(sg + sgl_prev->sgt.nents - 1);
- sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl);
- } else
- /* no RX SGL present (e.g. authentication only) */
- rsgl_src = areq->tsgl;
- }
+ memcpy_sglist(rsgl_src, tsgl_src, ctx->aead_assoclen);
/* Initialize the crypto operation */
- aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
+ aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src,
areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
struct crypto_aead *tfm = pask->private;
unsigned int ivlen = crypto_aead_ivsize(tfm);
- af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
+ af_alg_pull_tsgl(sk, ctx->used, NULL);
sock_kzfree_s(sk, ctx->iv, ivlen);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
* Create a per request TX SGL for this request which tracks the
* SG entries from the global TX SGL.
*/
- areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
+ areq->tsgl_entries = af_alg_count_tsgl(sk, len);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
- af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
+ af_alg_pull_tsgl(sk, len, areq->tsgl);
/* Initialize the crypto operation */
skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
struct alg_sock *pask = alg_sk(psk);
struct crypto_skcipher *tfm = pask->private;
- af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
+ af_alg_pull_tsgl(sk, ctx->used, NULL);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
if (ctx->state)
sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));