mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-12-18 08:26:08 +01:00
Merge branch 'tls-a-few-more-fixes-for-async-decrypt'
Sabrina Dubroca says: ==================== tls: a few more fixes for async decrypt The previous patchset [1] took care of "full async". This adds a few fixes for cases where only part of the crypto operations go the async route, found by extending my previous debug patch [2] to do N synchronous operations followed by M asynchronous ops (with N and M configurable). [1] https://patchwork.kernel.org/project/netdevbpf/list/?series=823784&state=* [2] https://lore.kernel.org/all/9d664093b1bf7f47497b2c40b3a085b45f3274a2.1694021240.git.sd@queasysnail.net/ ==================== Link: https://lore.kernel.org/r/cover.1709132643.git.sd@queasysnail.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
8f5afe4114
|
|
@ -52,6 +52,7 @@ struct tls_decrypt_arg {
|
|||
struct_group(inargs,
|
||||
bool zc;
|
||||
bool async;
|
||||
bool async_done;
|
||||
u8 tail;
|
||||
);
|
||||
|
||||
|
|
@ -274,22 +275,30 @@ static int tls_do_decryption(struct sock *sk,
|
|||
DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
|
||||
atomic_inc(&ctx->decrypt_pending);
|
||||
} else {
|
||||
DECLARE_CRYPTO_WAIT(wait);
|
||||
|
||||
aead_request_set_callback(aead_req,
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
crypto_req_done, &ctx->async_wait);
|
||||
crypto_req_done, &wait);
|
||||
ret = crypto_aead_decrypt(aead_req);
|
||||
if (ret == -EINPROGRESS || ret == -EBUSY)
|
||||
ret = crypto_wait_req(ret, &wait);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = crypto_aead_decrypt(aead_req);
|
||||
if (ret == -EINPROGRESS)
|
||||
return 0;
|
||||
|
||||
if (ret == -EBUSY) {
|
||||
ret = tls_decrypt_async_wait(ctx);
|
||||
ret = ret ?: -EINPROGRESS;
|
||||
darg->async_done = true;
|
||||
/* all completions have run, we're not doing async anymore */
|
||||
darg->async = false;
|
||||
return ret;
|
||||
}
|
||||
if (ret == -EINPROGRESS) {
|
||||
if (darg->async)
|
||||
return 0;
|
||||
|
||||
ret = crypto_wait_req(ret, &ctx->async_wait);
|
||||
}
|
||||
atomic_dec(&ctx->decrypt_pending);
|
||||
darg->async = false;
|
||||
|
||||
return ret;
|
||||
|
|
@ -1588,8 +1597,11 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
|
|||
/* Prepare and submit AEAD request */
|
||||
err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
|
||||
data_len + prot->tail_size, aead_req, darg);
|
||||
if (err)
|
||||
if (err) {
|
||||
if (darg->async_done)
|
||||
goto exit_free_skb;
|
||||
goto exit_free_pages;
|
||||
}
|
||||
|
||||
darg->skb = clear_skb ?: tls_strp_msg(ctx);
|
||||
clear_skb = NULL;
|
||||
|
|
@ -1601,6 +1613,9 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (unlikely(darg->async_done))
|
||||
return 0;
|
||||
|
||||
if (prot->tail_size)
|
||||
darg->tail = dctx->tail;
|
||||
|
||||
|
|
@ -1948,6 +1963,7 @@ int tls_sw_recvmsg(struct sock *sk,
|
|||
struct strp_msg *rxm;
|
||||
struct tls_msg *tlm;
|
||||
ssize_t copied = 0;
|
||||
ssize_t peeked = 0;
|
||||
bool async = false;
|
||||
int target, err;
|
||||
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
|
||||
|
|
@ -2095,8 +2111,10 @@ put_on_rx_list:
|
|||
if (err < 0)
|
||||
goto put_on_rx_list_err;
|
||||
|
||||
if (is_peek)
|
||||
if (is_peek) {
|
||||
peeked += chunk;
|
||||
goto put_on_rx_list;
|
||||
}
|
||||
|
||||
if (partially_consumed) {
|
||||
rxm->offset += chunk;
|
||||
|
|
@ -2135,8 +2153,8 @@ recv_end:
|
|||
|
||||
/* Drain records from the rx_list & copy if required */
|
||||
if (is_peek || is_kvec)
|
||||
err = process_rx_list(ctx, msg, &control, copied,
|
||||
decrypted, is_peek, NULL);
|
||||
err = process_rx_list(ctx, msg, &control, copied + peeked,
|
||||
decrypted - peeked, is_peek, NULL);
|
||||
else
|
||||
err = process_rx_list(ctx, msg, &control, 0,
|
||||
async_copy_bytes, is_peek, NULL);
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user