Thanks for the report. Please try this patch:
---8<---
When we perform a walk in the completion function, we need to ensure
that it is atomic.
Reported-by:
syzbot+6f72c2...@syzkaller.appspotmail.com
Fixes: 78105c7e769b ("crypto: xts - Drop use of auxiliary buffer")
Cc: <
sta...@vger.kernel.org>
Signed-off-by: Herbert Xu <
her...@gondor.apana.org.au>
diff --git a/crypto/xts.c b/crypto/xts.c
index 847f54f76789..c915a45711f5 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -88,7 +88,8 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
* just doing the gf128mul_x_ble() calls again.
*/
-static int xor_tweak(struct skcipher_request *req, bool second_pass)
+static int xor_tweak(struct skcipher_request *req, bool second_pass,
+ bool atomic)
{
struct rctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -102,7 +103,7 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
/* set to our TFM to enforce correct alignment: */
skcipher_request_set_tfm(req, tfm);
}
- err = skcipher_walk_virt(&w, req, false);
+ err = skcipher_walk_virt(&w, req, atomic);
while (w.nbytes) {
unsigned int avail = w.nbytes;
@@ -125,12 +126,12 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
static int xor_tweak_pre(struct skcipher_request *req)
{
- return xor_tweak(req, false);
+ return xor_tweak(req, false, false);
}
-static int xor_tweak_post(struct skcipher_request *req)
+static int xor_tweak_post(struct skcipher_request *req, bool atomic)
{
- return xor_tweak(req, true);
+ return xor_tweak(req, true, atomic);
}
static void crypt_done(struct crypto_async_request *areq, int err)
@@ -138,7 +139,7 @@ static void crypt_done(struct crypto_async_request *areq, int err)
struct skcipher_request *req = areq->data;
if (!err)
- err = xor_tweak_post(req);
+ err = xor_tweak_post(req, true);
skcipher_request_complete(req, err);
}
@@ -166,7 +167,7 @@ static int encrypt(struct skcipher_request *req)
init_crypt(req);
return xor_tweak_pre(req) ?:
crypto_skcipher_encrypt(subreq) ?:
- xor_tweak_post(req);
+ xor_tweak_post(req, false);
}
static int decrypt(struct skcipher_request *req)
@@ -177,7 +178,7 @@ static int decrypt(struct skcipher_request *req)
init_crypt(req);
return xor_tweak_pre(req) ?:
crypto_skcipher_decrypt(subreq) ?:
- xor_tweak_post(req);
+ xor_tweak_post(req, false);
}
static int init_tfm(struct crypto_skcipher *tfm)
--
Email: Herbert Xu <
her...@gondor.apana.org.au>
Home Page:
http://gondor.apana.org.au/~herbert/
PGP Key:
http://gondor.apana.org.au/~herbert/pubkey.txt