Skip to content

Commit

Permalink
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git…
Browse files Browse the repository at this point in the history
…/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This fixes the following issues:

   - racy use of ctx->rcvused in af_alg

   - algif_aead crash in chacha20poly1305

   - freeing bogus pointer in pcrypt

   - build error on MIPS in mpi

   - memory leak in inside-secure

   - memory overwrite in inside-secure

   - NULL pointer dereference in inside-secure

   - state corruption in inside-secure

   - build error without CRYPTO_GF128MUL in chelsio

   - use after free in n2"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: inside-secure - do not use areq->result for partial results
  crypto: inside-secure - fix request allocations in invalidation path
  crypto: inside-secure - free requests even if their handling failed
  crypto: inside-secure - per request invalidation
  lib/mpi: Fix umul_ppmm() for MIPS64r6
  crypto: pcrypt - fix freeing pcrypt instances
  crypto: n2 - cure use after free
  crypto: af_alg - Fix race around ctx->rcvused by making it atomic_t
  crypto: chacha20poly1305 - validate the digest size
  crypto: chelsio - select CRYPTO_GF128MUL
  • Loading branch information
torvalds committed Jan 5, 2018
2 parents d8887f1 + 2973633 commit 64648a5
Show file tree
Hide file tree
Showing 12 changed files with 173 additions and 62 deletions.
4 changes: 2 additions & 2 deletions crypto/af_alg.c
Original file line number Diff line number Diff line change
Expand Up @@ -664,7 +664,7 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
unsigned int i;

list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
ctx->rcvused -= rsgl->sg_num_bytes;
atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused);
af_alg_free_sg(&rsgl->sgl);
list_del(&rsgl->list);
if (rsgl != &areq->first_rsgl)
Expand Down Expand Up @@ -1163,7 +1163,7 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,

areq->last_rsgl = rsgl;
len += err;
ctx->rcvused += err;
atomic_add(err, &ctx->rcvused);
rsgl->sg_num_bytes = err;
iov_iter_advance(&msg->msg_iter, err);
}
Expand Down
2 changes: 1 addition & 1 deletion crypto/algif_aead.c
Original file line number Diff line number Diff line change
Expand Up @@ -571,7 +571,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
ctx->rcvused = 0;
atomic_set(&ctx->rcvused, 0);
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
Expand Down
2 changes: 1 addition & 1 deletion crypto/algif_skcipher.c
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
ctx->rcvused = 0;
atomic_set(&ctx->rcvused, 0);
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
Expand Down
6 changes: 5 additions & 1 deletion crypto/chacha20poly1305.c
Original file line number Diff line number Diff line change
Expand Up @@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
algt->mask));
if (IS_ERR(poly))
return PTR_ERR(poly);
poly_hash = __crypto_hash_alg_common(poly);

err = -EINVAL;
if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
goto out_put_poly;

err = -ENOMEM;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
Expand All @@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,

ctx = aead_instance_ctx(inst);
ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
poly_hash = __crypto_hash_alg_common(poly);
err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
aead_crypto_instance(inst));
if (err)
Expand Down
19 changes: 10 additions & 9 deletions crypto/pcrypt.c
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
crypto_free_aead(ctx->child);
}

static void pcrypt_free(struct aead_instance *inst)
{
struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);

crypto_drop_aead(&ctx->spawn);
kfree(inst);
}

static int pcrypt_init_instance(struct crypto_instance *inst,
struct crypto_alg *alg)
{
Expand Down Expand Up @@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.encrypt = pcrypt_aead_encrypt;
inst->alg.decrypt = pcrypt_aead_decrypt;

inst->free = pcrypt_free;

err = aead_register_instance(tmpl, inst);
if (err)
goto out_drop_aead;
Expand Down Expand Up @@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
return -EINVAL;
}

static void pcrypt_free(struct crypto_instance *inst)
{
struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);

crypto_drop_aead(&ctx->spawn);
kfree(inst);
}

static int pcrypt_cpumask_change_notify(struct notifier_block *self,
unsigned long val, void *data)
{
Expand Down Expand Up @@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
static struct crypto_template pcrypt_tmpl = {
.name = "pcrypt",
.create = pcrypt_create,
.free = pcrypt_free,
.module = THIS_MODULE,
};

Expand Down
1 change: 1 addition & 0 deletions drivers/crypto/chelsio/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_AUTHENC
select CRYPTO_GF128MUL
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.

Expand Down
1 change: 1 addition & 0 deletions drivers/crypto/inside-secure/safexcel.c
Original file line number Diff line number Diff line change
Expand Up @@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
ndesc = ctx->handle_result(priv, ring, sreq->req,
&should_complete, &ret);
if (ndesc < 0) {
kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
return;
}
Expand Down
85 changes: 65 additions & 20 deletions drivers/crypto/inside-secure/safexcel_cipher.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#include <crypto/aes.h>
#include <crypto/skcipher.h>
#include <crypto/internal/skcipher.h>

#include "safexcel.h"

Expand All @@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
unsigned int key_len;
};

struct safexcel_cipher_req {
bool needs_inv;
};

static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
struct crypto_async_request *async,
struct safexcel_command_desc *cdesc,
Expand Down Expand Up @@ -126,9 +131,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
return 0;
}

static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *async,
bool *should_complete, int *ret)
static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *async,
bool *should_complete, int *ret)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_result_desc *rdesc;
Expand Down Expand Up @@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async,
spin_unlock_bh(&priv->ring[ring].egress_lock);

request->req = &req->base;
ctx->base.handle_result = safexcel_handle_result;

*commands = n_cdesc;
*results = n_rdesc;
Expand Down Expand Up @@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,

ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
ctx->base.needs_inv = false;
ctx->base.send = safexcel_aes_send;

spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
Expand All @@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
return ndesc;
}

static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *async,
bool *should_complete, int *ret)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
int err;

if (sreq->needs_inv) {
sreq->needs_inv = false;
err = safexcel_handle_inv_result(priv, ring, async,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async,
should_complete, ret);
}

return err;
}

static int safexcel_cipher_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
Expand All @@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;

ctx->base.handle_result = safexcel_handle_inv_result;

ret = safexcel_invalidate_cache(async, &ctx->base, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
Expand All @@ -381,28 +401,46 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
return 0;
}

static int safexcel_send(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
int ret;

if (sreq->needs_inv)
ret = safexcel_cipher_send_inv(async, ring, request,
commands, results);
else
ret = safexcel_aes_send(async, ring, request,
commands, results);
return ret;
}

static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct skcipher_request req;
SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;

memset(&req, 0, sizeof(struct skcipher_request));
memset(req, 0, sizeof(struct skcipher_request));

/* create invalidation request */
init_completion(&result.completion);
skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
safexcel_inv_complete, &result);
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
safexcel_inv_complete, &result);

skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
ctx = crypto_tfm_ctx(req.base.tfm);
skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
ctx->base.send = safexcel_cipher_send_inv;
sreq->needs_inv = true;

spin_lock_bh(&priv->ring[ring].queue_lock);
crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);

if (!priv->ring[ring].need_dequeue)
Expand All @@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req,
enum safexcel_cipher_direction dir, u32 mode)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;

sreq->needs_inv = false;
ctx->direction = dir;
ctx->mode = mode;

if (ctx->base.ctxr) {
if (ctx->base.needs_inv)
ctx->base.send = safexcel_cipher_send_inv;
if (ctx->base.needs_inv) {
sreq->needs_inv = true;
ctx->base.needs_inv = false;
}
} else {
ctx->base.ring = safexcel_select_ring(priv);
ctx->base.send = safexcel_aes_send;

ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
EIP197_GFP_FLAGS(req->base),
&ctx->base.ctxr_dma);
Expand Down Expand Up @@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
alg.skcipher.base);

ctx->priv = tmpl->priv;
ctx->base.send = safexcel_send;
ctx->base.handle_result = safexcel_handle_result;

crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
sizeof(struct safexcel_cipher_req));

return 0;
}
Expand Down
Loading

0 comments on commit 64648a5

Please sign in to comment.