This patch backports several patches that went upstream into Herbert Xu's cryptodev-2.6 tree: crypto: Use zeroing memory allocator instead of allocator/memset crypto: crypto4xx - performance optimizations crypto: crypto4xx - convert to skcipher crypto: crypto4xx - avoid VLA use crypto: crypto4xx - add aes-ctr support crypto: crypto4xx - properly set IV after de- and encrypt crypto: crypto4xx - extend aead fallback checks crypto: crypto4xx - put temporary dst sg into request ctx The older, outstanding patches from 120-wxyz series have been upstreamed as well and therefore they have been reassigned to fit into the series. Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
		
			
				
	
	
		
			103 lines
		
	
	
		
			3.5 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			103 lines
		
	
	
		
			3.5 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
From 584201f1895d915c1aa523bc86afdc126e94beca Mon Sep 17 00:00:00 2001
 | 
						|
From: Christian Lamparter <chunkeey@gmail.com>
 | 
						|
Date: Thu, 19 Apr 2018 18:41:56 +0200
 | 
						|
Subject: [PATCH 7/8] crypto: crypto4xx - extend aead fallback checks
 | 
						|
 | 
						|
1020 bytes is the limit for associated data. Any more
 | 
						|
and it will no longer fit into hash_crypto_offset anymore.
 | 
						|
 | 
						|
The hardware will not process aead requests with plaintext
 | 
						|
that have less than AES_BLOCK_SIZE bytes. When decrypting
 | 
						|
aead requests the authsize has to be taken in account as
 | 
						|
well, as it is part of the cryptlen. Otherwise the hardware
 | 
						|
will think it has been misconfigured and will return:
 | 
						|
 | 
						|
aead return err status = 0x98
 | 
						|
 | 
						|
For rtc4543(gcm(aes)), the hardware has a dedicated GMAC
 | 
						|
mode as part of the hash function set.
 | 
						|
 | 
						|
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
 | 
						|
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
 | 
						|
---
 | 
						|
 drivers/crypto/amcc/crypto4xx_alg.c | 30 +++++++++++++++--------------
 | 
						|
 1 file changed, 16 insertions(+), 14 deletions(-)
 | 
						|
 | 
						|
--- a/drivers/crypto/amcc/crypto4xx_alg.c
 | 
						|
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
 | 
						|
@@ -321,6 +321,7 @@ int crypto4xx_decrypt_ctr(struct skciphe
 | 
						|
 }
 | 
						|
 
 | 
						|
 static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
 | 
						|
+						unsigned int len,
 | 
						|
 						bool is_ccm, bool decrypt)
 | 
						|
 {
 | 
						|
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 | 
						|
@@ -330,14 +331,14 @@ static inline bool crypto4xx_aead_need_f
 | 
						|
 		return true;
 | 
						|
 
 | 
						|
 	/*
 | 
						|
-	 * hardware does not handle cases where cryptlen
 | 
						|
-	 * is less than a block
 | 
						|
+	 * hardware does not handle cases where plaintext
 | 
						|
+	 * is less than a block.
 | 
						|
 	 */
 | 
						|
-	if (req->cryptlen < AES_BLOCK_SIZE)
 | 
						|
+	if (len < AES_BLOCK_SIZE)
 | 
						|
 		return true;
 | 
						|
 
 | 
						|
-	/* assoc len needs to be a multiple of 4 */
 | 
						|
-	if (req->assoclen & 0x3)
 | 
						|
+	/* assoc len needs to be a multiple of 4 and <= 1020 */
 | 
						|
+	if (req->assoclen & 0x3 || req->assoclen > 1020)
 | 
						|
 		return true;
 | 
						|
 
 | 
						|
 	/* CCM supports only counter field length of 2 and 4 bytes */
 | 
						|
@@ -449,17 +450,17 @@ static int crypto4xx_crypt_aes_ccm(struc
 | 
						|
 {
 | 
						|
 	struct crypto4xx_ctx *ctx  = crypto_tfm_ctx(req->base.tfm);
 | 
						|
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 | 
						|
-	unsigned int len = req->cryptlen;
 | 
						|
 	__le32 iv[16];
 | 
						|
 	u32 tmp_sa[SA_AES128_CCM_LEN + 4];
 | 
						|
 	struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
 | 
						|
-
 | 
						|
-	if (crypto4xx_aead_need_fallback(req, true, decrypt))
 | 
						|
-		return crypto4xx_aead_fallback(req, ctx, decrypt);
 | 
						|
+	unsigned int len = req->cryptlen;
 | 
						|
 
 | 
						|
 	if (decrypt)
 | 
						|
 		len -= crypto_aead_authsize(aead);
 | 
						|
 
 | 
						|
+	if (crypto4xx_aead_need_fallback(req, len, true, decrypt))
 | 
						|
+		return crypto4xx_aead_fallback(req, ctx, decrypt);
 | 
						|
+
 | 
						|
 	memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, ctx->sa_len * 4);
 | 
						|
 	sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
 | 
						|
 
 | 
						|
@@ -605,18 +606,19 @@ static inline int crypto4xx_crypt_aes_gc
 | 
						|
 					  bool decrypt)
 | 
						|
 {
 | 
						|
 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 | 
						|
-	unsigned int len = req->cryptlen;
 | 
						|
+	struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
 | 
						|
 	__le32 iv[4];
 | 
						|
+	unsigned int len = req->cryptlen;
 | 
						|
+
 | 
						|
+	if (decrypt)
 | 
						|
+		len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
 | 
						|
 
 | 
						|
-	if (crypto4xx_aead_need_fallback(req, false, decrypt))
 | 
						|
+	if (crypto4xx_aead_need_fallback(req, len, false, decrypt))
 | 
						|
 		return crypto4xx_aead_fallback(req, ctx, decrypt);
 | 
						|
 
 | 
						|
 	crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
 | 
						|
 	iv[3] = cpu_to_le32(1);
 | 
						|
 
 | 
						|
-	if (decrypt)
 | 
						|
-		len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
 | 
						|
-
 | 
						|
 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
 | 
						|
 				  len, iv, sizeof(iv),
 | 
						|
 				  decrypt ? ctx->sa_in : ctx->sa_out,
 |