From 9c0e3d3a534c3e3e7f6bfce0a150ed2a0841685a Mon Sep 17 00:00:00 2001 From: John Baldwin Date: Mon, 25 May 2020 22:12:04 +0000 Subject: [PATCH] Add support for optional separate output buffers to in-kernel crypto. Some crypto consumers such as GELI and KTLS for file-backed sendfile need to store their output in a separate buffer from the input. Currently these consumers copy the contents of the input buffer into the output buffer and queue an in-place crypto operation on the output buffer. Using a separate output buffer avoids this copy. - Create a new 'struct crypto_buffer' describing a crypto buffer containing a type and type-specific fields. crp_ilen is gone, instead buffers that use a flat kernel buffer have a cb_buf_len field for their length. The length of other buffer types is inferred from the backing store (e.g. uio_resid for a uio). Requests now have two such structures: crp_buf for the input buffer, and crp_obuf for the output buffer. - Consumers now use helper functions (crypto_use_*, e.g. crypto_use_mbuf()) to configure the input buffer. If an output buffer is not configured, the request still modifies the input buffer in-place. A consumer uses a second set of helper functions (crypto_use_output_*) to configure an output buffer. - Consumers must request support for separate output buffers when creating a crypto session via the CSP_F_SEPARATE_OUTPUT flag and are only permitted to queue a request with a separate output buffer on sessions with this flag set. Existing drivers already reject sessions with unknown flags, so this permits drivers to be modified to support this extension without requiring all drivers to change. - Several data-related functions now have matching versions that operate on an explicit buffer (e.g. crypto_apply_buf, crypto_contiguous_subsegment_buf, bus_dma_load_crp_buf). - Most of the existing data-related functions operate on the input buffer. However crypto_copyback always writes to the output buffer if a request uses a separate output buffer. - For the regions in input/output buffers, the following conventions are followed: - AAD and IV are always present in input only and their fields are offsets into the input buffer. - payload is always present in both buffers. If a request uses a separate output buffer, it must set a new crp_payload_start_output field to the offset of the payload in the output buffer. - digest is in the input buffer for verify operations, and in the output buffer for compute operations. crp_digest_start is relative to the appropriate buffer. - Add a crypto buffer cursor abstraction. This is a more general form of some bits in the cryptosoft driver that tried to always use uio's. However, compared to the original code, this avoids rewalking the uio iovec array for requests with multiple vectors. It also avoids allocate an iovec array for mbufs and populating it by instead walking the mbuf chain directly. - Update the cryptosoft(4) driver to support separate output buffers making use of the cursor abstraction. Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D24545 --- share/man/man9/Makefile | 28 ++- share/man/man9/bus_dma.9 | 22 +- share/man/man9/crypto_buffer.9 | 307 +++++++++++++++++++++++ share/man/man9/crypto_driver.9 | 73 +----- share/man/man9/crypto_request.9 | 168 ++++++++----- share/man/man9/crypto_session.9 | 16 +- sys/crypto/ccp/ccp.c | 12 +- sys/dev/cxgbe/crypto/t4_crypto.c | 16 +- sys/dev/hifn/hifn7751.c | 36 +-- sys/dev/safe/safe.c | 33 +-- sys/geom/eli/g_eli_crypto.c | 4 +- sys/geom/eli/g_eli_integrity.c | 6 +- sys/geom/eli/g_eli_privacy.c | 6 +- sys/kern/subr_bus_dma.c | 25 +- sys/kgssapi/krb5/kcrypto_aes.c | 11 +- sys/netipsec/xform_ah.c | 12 +- sys/netipsec/xform_esp.c | 12 +- sys/netipsec/xform_ipcomp.c | 12 +- sys/opencrypto/criov.c | 407 ++++++++++++++++++++++++------- sys/opencrypto/crypto.c | 97 ++++++-- sys/opencrypto/cryptodev.c | 9 +- sys/opencrypto/cryptodev.h | 153 +++++++++--- sys/opencrypto/cryptosoft.c | 401 +++++++++++++++--------------- sys/opencrypto/ktls_ocf.c | 16 +- sys/sys/bus_dma.h | 5 + 25 files changed, 1288 insertions(+), 599 deletions(-) create mode 100644 share/man/man9/crypto_buffer.9 diff --git a/share/man/man9/Makefile b/share/man/man9/Makefile index a27ef130d1f8..f0babc78350a 100644 --- a/share/man/man9/Makefile +++ b/share/man/man9/Makefile @@ -72,6 +72,7 @@ MAN= accept_filter.9 \ cr_seeotheruids.9 \ crypto.9 \ crypto_asym.9 \ + crypto_buffer.9 \ crypto_driver.9 \ crypto_request.9 \ crypto_session.9 \ @@ -648,6 +649,8 @@ MLINKS+=bus_dma.9 busdma.9 \ bus_dma.9 bus_dmamap_load.9 \ bus_dma.9 bus_dmamap_load_bio.9 \ bus_dma.9 bus_dmamap_load_ccb.9 \ + bus_dma.9 bus_dmamap_load_crp.9 \ + bus_dma.9 bus_dmamap_load_crp_buffer.9 \ bus_dma.9 bus_dmamap_load_mbuf.9 \ bus_dma.9 bus_dmamap_load_mbuf_sg.9 \ bus_dma.9 bus_dmamap_load_uio.9 \ @@ -897,9 +900,20 @@ MLINKS+=crypto_asym.9 crypto_kdispatch.9 \ crypto_asym.9 crypto_kdone.9 \ crypto_asym.9 crypto_kregister.9 \ crypto_asym.9 CRYPTODEV_KPROCESS.9 -MLINKS+=crypto_driver.9 crypto_apply.9 \ - crypto_driver.9 crypto_contiguous_segment.9 \ - crypto_driver.9 crypto_copyback.9 \ +MLINKS+=crypto_buffer.9 crypto_apply.9 \ + crypto_buffer.9 crypto_apply_buf.9 \ + crypto_buffer.9 crypto_buffer_contiguous_segment.9 \ + crypto_buffer.9 crypto_buffer_len.9 \ + crypto_buffer.9 crypto_contiguous_segment.9 \ + crypto_buffer.9 crypto_cursor_init.9 \ + crypto_buffer.9 crypto_cursor_advance.9 \ + crypto_buffer.9 crypto_cursor_copyback.9 \ + crypto_buffer.9 crypto_cursor_copydata.9 \ + crypto_buffer.9 crypto_cursor_copydata_noadv.9 \ + crypto_buffer.9 crypto_cursor_segbase.9 \ + crypto_buffer.9 crypto_cursor_seglen.9 \ + crypto_buffer.9 CRYPTO_HAS_OUTPUT_BUFFER.9 +MLINKS+=crypto_driver.9 crypto_copyback.9 \ crypto_driver.9 crypto_copydata.9 \ crypto_driver.9 crypto_done.9 \ crypto_driver.9 crypto_get_driverid.9 \ @@ -915,7 +929,13 @@ MLINKS+=crypto_driver.9 crypto_apply.9 \ crypto_driver.9 hmac_init_opad.9 MLINKS+=crypto_request.9 crypto_dispatch.9 \ crypto_request.9 crypto_freereq.9 \ - crypto_request.9 crypto_getreq.9 + crypto_request.9 crypto_getreq.9 \ + crypto_request.9 crypto_use_buf.9 \ + crypto_request.9 crypto_use_mbuf.9 \ + crypto_request.9 crypto_use_output_buf.9 \ + crypto_request.9 crypto_use_output_mbuf.9 \ + crypto_request.9 crypto_use_output_uio.9 \ + crypto_request.9 crypto_use_uio.9 \ MLINKS+=crypto_session.9 crypto_auth_hash.9 \ crypto_session.9 crypto_cipher.9 \ crypto_session.9 crypto_get_params.9 \ diff --git a/share/man/man9/bus_dma.9 b/share/man/man9/bus_dma.9 index d29266d94ca2..55893efdf60e 100644 --- a/share/man/man9/bus_dma.9 +++ b/share/man/man9/bus_dma.9 @@ -53,7 +53,7 @@ .\" $FreeBSD$ .\" $NetBSD: bus_dma.9,v 1.25 2002/10/14 13:43:16 wiz Exp $ .\" -.Dd April 14, 2020 +.Dd May 25, 2020 .Dt BUS_DMA 9 .Os .Sh NAME @@ -69,6 +69,7 @@ .Nm bus_dmamap_load_bio , .Nm bus_dmamap_load_ccb , .Nm bus_dmamap_load_crp , +.Nm bus_dmamap_load_crp_buffer , .Nm bus_dmamap_load_mbuf , .Nm bus_dmamap_load_mbuf_sg , .Nm bus_dmamap_load_uio , @@ -123,6 +124,10 @@ "struct crypto *crp" "bus_dmamap_callback_t *callback" "void *callback_arg" \ "int flags" .Ft int +.Fn bus_dmamap_load_crp_buffer "bus_dma_tag_t dmat" "bus_dmamap_t map" \ +"struct crypto_buffer *cb" "bus_dmamap_callback_t *callback" \ +"void *callback_arg" "int flags" +.Ft int .Fn bus_dmamap_load_mbuf "bus_dma_tag_t dmat" "bus_dmamap_t map" \ "struct mbuf *mbuf" "bus_dmamap_callback2_t *callback" "void *callback_arg" \ "int flags" @@ -394,8 +399,9 @@ via .Fn bus_dmamap_load , .Fn bus_dmamap_load_bio , .Fn bus_dmamap_load_ccb , +.Fn bus_dmamap_load_crp , or -.Fn bus_dmamap_load_crp . +.Fn bus_dmamap_load_crp_buffer . Callbacks are of the format: .Bl -tag -width indent .It Ft void @@ -885,12 +891,22 @@ XPT_SCSI_IO .It Fn bus_dmamap_load_crp "dmat" "map" "crp" "callback" "callback_arg" "flags" This is a variation of .Fn bus_dmamap_load -which maps buffers pointed to by +which maps the input buffer pointed to by .Fa crp for DMA transfers. The .Dv BUS_DMA_NOWAIT flag is implied, thus no callback deferral will happen. +.It Fn bus_dmamap_load_crp_buffer "dmat" "map" "cb" "callback" "callback_arg" \ +"flags" +This is a variation of +.Fn bus_dmamap_load +which maps the crypto data buffer pointed to by +.Fa cb +for DMA transfers. +The +.Dv BUS_DMA_NOWAIT +flag is implied, thus no callback deferral will happen. .It Fn bus_dmamap_load_mbuf "dmat" "map" "mbuf" "callback2" "callback_arg" \ "flags" This is a variation of diff --git a/share/man/man9/crypto_buffer.9 b/share/man/man9/crypto_buffer.9 new file mode 100644 index 000000000000..9ba77b860636 --- /dev/null +++ b/share/man/man9/crypto_buffer.9 @@ -0,0 +1,307 @@ +.\" Copyright (c) 2020, Chelsio Inc +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions are met: +.\" +.\" 1. Redistributions of source code must retain the above copyright notice, +.\" this list of conditions and the following disclaimer. +.\" +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" 3. Neither the name of the Chelsio Inc nor the names of its +.\" contributors may be used to endorse or promote products derived from +.\" this software without specific prior written permission. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +.\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +.\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +.\" POSSIBILITY OF SUCH DAMAGE. +.\" +.\" * Other names and brands may be claimed as the property of others. +.\" +.\" $FreeBSD$ +.\" +.Dd May 25, 2020 +.Dt CRYPTO_BUFFER 9 +.Os +.Sh NAME +.Nm crypto_buffer +.Nd symmetric cryptographic request buffers +.Sh SYNOPSIS +.In opencrypto/cryptodev.h +.Ft int +.Fo crypto_apply +.Fa "struct cryptop *crp" +.Fa "int off" +.Fa "int len" +.Fa "int (*f)(void *, void *, u_int)" +.Fa "void *arg" +.Fc +.Ft int +.Fo crypto_apply_buf +.Fa "struct crypto_buffer *cb" +.Fa "int off" +.Fa "int len" +.Fa "int (*f)(void *, void *, u_int)" +.Fa "void *arg" +.Fc +.Ft void * +.Fo crypto_buffer_contiguous_subsegment +.Fa "struct crypto_buffer *cb" +.Fa "size_t skip" +.Fa "size_t len" +.Fc +.Ft size_t +.Fn crypto_buffer_len "struct crypto_buffer *cb" +.Ft void * +.Fo crypto_contiguous_subsegment +.Fa "struct cryptop *crp" +.Fa "size_t skip" +.Fa "size_t len" +.Fc +.Ft void +.Fo crypto_cursor_init +.Fa "struct crypto_buffer_cursor *cc" +.Fa "const struct crypto_buffer *cb" +.Fc +.Ft void +.Fn crypto_cursor_advance "struct crypto_buffer_cursor *cc" "size_t amount" +.Ft void +.Fo crypto_cursor_copyback +.Fa "struct crypto_buffer_cursor *cc" +.Fa "int size" +.Fa "const void *src" +.Fc +.Ft void +.Fo crypto_cursor_copydata +.Fa "struct crypto_buffer_cursor *cc" +.Fa "int size" +.Fa "void *dst" +.Fc +.Ft void +.Fo crypto_cursor_copydata_noadv +.Fa "struct crypto_buffer_cursor *cc" +.Fa "int size" +.Fa "void *dst" +.Fc +.Ft void * +.Fn crypto_cursor_segbase "struct crypto_buffer_cursor *cc" +.Ft size_t +.Fn crypto_cursor_seglen "struct crypto_buffer_cursor *cc" +.Ft bool +.Fn CRYPTO_HAS_OUTPUT_BUFFER "struct cryptop *crp" +.Sh DESCRIPTION +Symmetric cryptographic requests use data buffers to describe the data to +be modified. +Requests can either specify a single data buffer whose contents are modified +in place, +or requests may specify separate data buffers for input and output. +.Vt struct crypto_buffer +provides an abstraction that permits cryptographic requests to operate on +different types of buffers. +.Vt struct crypto_cursor +allows cryptographic drivers to iterate over a data buffer. +.Pp +.Fn CRYPTO_HAS_OUTPUT_BUFFER +returns true if +.Fa crp +uses separate buffers for input and output and false if +.Fa crp +uses a single buffer. +.Pp +.Fn crypto_buffer_len +returns the length of data buffer +.Fa cb +in bytes. +.Pp +.Fn crypto_apply_buf +invokes a caller-supplied function +to a region of the data buffer +.Fa cb . +The function +.Fa f +is called one or more times. +For each invocation, +the first argument to +.Fa f +is the value of +.Fa arg +passed to +.Fn crypto_apply_buf . +The second and third arguments to +.Fa f +are a pointer and length to a segment of the buffer mapped into the kernel. +The function is called enough times to cover the +.Fa len +bytes of the data buffer which starts at an offset +.Fa off . +If any invocation of +.Fa f +returns a non-zero value, +.Fn crypto_apply_buf +immediately returns that value without invoking +.Fa f +on any remaining segments of the region, +otherwise +.Fn crypto_apply_buf +returns the value from the final call to +.Fa f . +.Fn crypto_apply +invokes the callback +.Fa f +on a region of the input data buffer for +.Fa crp . +.Pp +.Fn crypto_buffer_contiguous_subsegment +attempts to locate a single, virtually-contiguous segment of the data buffer +.Fa cb . +The segment must be +.Fa len +bytes long and start at an offset of +.Fa skip +bytes. +If a segment is found, +a pointer to the start of the segment is returned. +Otherwise, +.Dv NULL +is returned. +.Fn crypto_contiguous_subsegment +attempts to locate a single, virtually-contiguous segment in the input data +buffer for +.Fa crp . +.Ss Data Buffers +Data buffers are described by an instance of +.Vt struct crypto buffer . +The +.Fa cb_type +member contains the type of the data buffer. +The following types are supported: +.Bl -tag -width " CRYPTO_BUF_CONTIG" +.It Dv CRYPTO_BUF_NONE +An invalid buffer. +Used to mark the output buffer when a crypto request uses a single data buffer. +.It Dv CRYPTO_BUF_CONTIG +An array of bytes mapped into the kernel's address space. +.It Dv CRYPTO_BUF_UIO +A scatter/gather list of kernel buffers as described in +.Xr uio 9 . +.It Dv CRYPTO_BUF_MBUF +A network memory buffer as described in +.Xr mbuf 9 . +.El +.Pp +The structure also contains the following type-specific fields: +.Bl -tag -width " cb_buf_len" +.It Fa cb_buf +A pointer to the start of a +.Dv CRYPTO_BUF_CONTIG +data buffer. +.It Fa cb_buf_len +The length of a +.Dv CRYPTO_BUF_CONTIG +data buffer +.It Fa cb_mbuf +A pointer to a +.Vt struct mbuf +for +.Dv CRYPTO_BUF_MBUF . +.It Fa cb_uio +A pointer to a +.Vt struct uio +for +.Dv CRYPTO_BUF_UIO . +.El +.Ss Cursors +Cursors provide a mechanism for iterating over a data buffer. +They are primarily intended for use in software drivers which access data +buffers via virtual addresses. +.Pp +.Fn crypto_cursor_init +initializes the cursor +.Fa cc +to reference the start of the data buffer +.Fa cb . +.Pp +.Fn crypto_cursor_advance +advances the cursor +.Fa amount +bytes forward in the data buffer. +.Pp +.Fn crypto_cursor_copyback +copies +.Fa size +bytes from the local buffer pointed to by +.Fa src +into the data buffer associated with +.Fa cc . +The bytes are written to the current position of +.Fa cc , +and the cursor is then advanced by +.Fa size +bytes. +.Pp +.Fn crypto_cursor_copydata +copies +.Fa size +bytes out of the data buffer associated with +.Fa cc +into a local buffer pointed to by +.Fa dst . +The bytes are read from the current position of +.Fa cc , +and the cursor is then advanced by +.Fa size +bytes. +.Pp +.Fn crypto_cursor_copydata_noadv +is similar to +.Fn crypto_cursor_copydata +except that it does not change the current position of +.Fa cc . +.Pp +.Fn crypto_cursor_segbase +and +.Fn crypto_cursor_seglen +return the start and length, respectively, +of the virtually-contiguous segment at the current position of +.Fa cc . +.Sh RETURN VALUES +.Fn crypto_apply +and +.Fn crypto_apply_buf +return the return value from the caller-supplied callback function. +.Pp +.Fn crypto_buffer_contiguous_subsegment , +.Fn crypto_contiguous_subsegment , +and +.Fn crypto_cursor_segbase , +return a pointer to a contiguous segment or +.Dv NULL . +.Pp +.Fn crypto_buffer_len +returns the length of a buffer in bytes. +.Pp +.Fn crypto_cursor_seglen +returns the length in bytes of a contiguous segment. +.Pp +.Fn CRYPTO_HAS_OUTPUT_BUFFER +returns true if the request uses a separate output buffer. +.Sh SEE ALSO +.Xr ipsec 4 , +.Xr bus_dma 9 , +.Xr crypto 7 , +.Xr crypto 9 , +.Xr crypto_request 9 , +.Xr crypto_driver 9 , +.Xr crypto_session 9 , +.Xr mbuf 9 +.Xr uio 9 diff --git a/share/man/man9/crypto_driver.9 b/share/man/man9/crypto_driver.9 index 095a76778183..5a205ee4a094 100644 --- a/share/man/man9/crypto_driver.9 +++ b/share/man/man9/crypto_driver.9 @@ -30,7 +30,7 @@ .\" .\" $FreeBSD$ .\" -.Dd April 20, 2020 +.Dd May 25, 2020 .Dt CRYPTO_DRIVER 9 .Os .Sh NAME @@ -38,20 +38,6 @@ .Nd interface for symmetric cryptographic drivers .Sh SYNOPSIS .In opencrypto/cryptodev.h -.Ft int -.Fo crypto_apply -.Fa "struct cryptop *crp" -.Fa "int off" -.Fa "int len" -.Fa "int (*f)(void *, void *, u_int)" -.Fa "void *arg" -.Fc -.Ft void * -.Fo crypto_contiguous_subsegment -.Fa "struct cryptop *crp" -.Fa "size_t skip" -.Fa "size_t len" -.Fc .Ft void .Fn crypto_copyback "struct cryptop *crp" "int off" "int size" "const void *src" .Ft void @@ -244,29 +230,29 @@ callbacks. .Fn crypto_copydata copies .Fa size -bytes out of the data buffer for +bytes out of the input buffer for .Fa crp into a local buffer pointed to by .Fa dst . The bytes are read starting at an offset of .Fa off -bytes in the request's data buffer. +bytes in the request's input buffer. .Pp .Fn crypto_copyback copies .Fa size bytes from the local buffer pointed to by .Fa src -into the data buffer for +into the output buffer for .Fa crp . The bytes are written starting at an offset of .Fa off -bytes in the request's data buffer. +bytes in the request's output buffer. .Pp .Fn crypto_read_iv copies the IV or nonce for .Fa crp -into the the local buffer pointed to by +into the local buffer pointed to by .Fa iv . .Pp A driver calls @@ -306,52 +292,6 @@ indicates that the driver is able to handle asymmetric requests passed to .Fn CRYPTODEV_KPROCESS . .El .Pp -.Fn crypto_apply -is a helper routine that can be used to invoke a caller-supplied function -to a region of the data buffer for -.Fa crp . -The function -.Fa f -is called one or more times. -For each invocation, -the first argument to -.Fa f -is the value of -.Fa arg passed to -.Fn crypto_apply . -The second and third arguments to -.Fa f -are a pointer and length to a segment of the buffer mapped into the kernel. -The function is called enough times to cover the -.Fa len -bytes of the data buffer which starts at an offset -.Fa off . -If any invocation of -.Fa f -returns a non-zero value, -.Fn crypto_apply -immediately returns that value without invoking -.Fa f -on any remaining segments of the region, -otherwise -.Fn crypto_apply -returns the value from the final call to -.Fa f . -.Pp -.Fn crypto_contiguous_subsegment -attempts to locate a single, virtually-contiguous segment of the data buffer -for -.Fa crp . -The segment must be -.Fa len -bytes long and start at an offset of -.Fa skip -bytes. -If a segment is found, -a pointer to the start of the segment is returned. -Otherwise, -.Dv NULL -is returned. .Pp .Fn hmac_init_ipad prepares an authentication context to generate the inner hash of an HMAC. @@ -396,5 +336,6 @@ returns a negative value on success or an error on failure. .Sh SEE ALSO .Xr crypto 7 , .Xr crypto 9 , +.Xr crypto_buffer 9 , .Xr crypto_request 9 , .Xr crypto_session 9 diff --git a/share/man/man9/crypto_request.9 b/share/man/man9/crypto_request.9 index 008581d69bbd..e8b8b374f31c 100644 --- a/share/man/man9/crypto_request.9 +++ b/share/man/man9/crypto_request.9 @@ -30,7 +30,7 @@ .\" .\" $FreeBSD$ .\" -.Dd April 20, 2020 +.Dd May 25, 2020 .Dt CRYPTO_REQUEST 9 .Os .Sh NAME @@ -44,6 +44,18 @@ .Fn crypto_freereq "struct cryptop *crp" .Ft "struct cryptop *" .Fn crypto_getreq "crypto_session_t cses" "int how" +.Ft void +.Fn crypto_use_buf "struct cryptop *crp" "void *buf" "int len" +.Ft void +.Fn crypto_use_mbuf "struct cryptop *crp" "struct mbuf *m" +.Ft void +.Fn crypto_use_uio "struct cryptop *crp" "struct uio *uio" +.Ft void +.Fn crypto_use_output_buf "struct cryptop *crp" "void *buf" "int len" +.Ft void +.Fn crypto_use_output_mbuf "struct cryptop *crp" "struct mbuf *m" +.Ft void +.Fn crypto_use_output_uio "struct cryptop *crp" "struct uio *uio" .Sh DESCRIPTION Each symmetric cryptographic operation in the kernel is described by an instance of @@ -84,57 +96,65 @@ it should be feed via .Fn crypto_freereq . .Pp Cryptographic operations include several fields to describe the request. -.Ss Buffer Types -Requests are associated with a single data buffer that is modified in place. -The type of the data buffer and the buffer itself are described by the -following fields: -.Bl -tag -width crp_buf_type -.It Fa crp_buf_type -The type of the data buffer. -The following types are supported: -.Bl -tag -width CRYPTO_BUF_CONTIG -.It Dv CRYPTO_BUF_CONTIG -An array of bytes mapped into the kernel's address space. -.It Dv CRYPTO_BUF_UIO -A scatter/gather list of kernel buffers as described in -.Xr uio 9 . -.It Dv CRYPTO_BUF_MBUF -A network memory buffer as described in -.Xr mbuf 9 . -.El -.It Fa crp_buf -A pointer to the start of a -.Dv CRYPTO_BUF_CONTIG -data buffer. -.It Fa crp_ilen -The length of a -.Dv CRYPTO_BUF_CONTIG -data buffer -.It Fa crp_mbuf -A pointer to a -.Vt struct mbuf -for -.Dv CRYPTO_BUF_MBUF . -.It Fa crp_uio -A pointer to a -.Vt struct uio -for -.Dv CRYPTO_BUF_UIO . -.It Fa crp_olen -Used with compression and decompression requests to describe the updated -length of the payload region in the data buffer. +.Ss Request Buffers +Requests can either specify a single data buffer that is modified in place +.Po +.Fa crp_buf +.Pc +or separate input +.Po +.Fa crp_buf +.Pc +and output +.Po +.Fa crp_obuf +.Pc +buffers. +Note that separate input and output buffers are not supported for compression +mode requests. .Pp -If a compression request increases the size of the payload, -then the data buffer is unmodified, the request completes successfully, -and -.Fa crp_olen -is set to the size the compressed data would have used. -Callers can compare this to the payload region length to determine if -the compressed data was discarded. +All requests must have a valid +.Fa crp_buf +initialized by one of the following functions: +.Bl -tag -width "Fn crypto_use_mbuf" +.It Fn crypto_use_buf +Uses an array of +.Fa len +bytes pointed to by +.Fa buf +as the data buffer. +.It Fn crypto_use_mbuf +Uses the network memory buffer +.Fa m +as the data buffer. +.It Fn crypto_use_uio +Uses the scatter/gather list +.Fa uio +as the data buffer. +.El +.Pp +One of the following functions should be used to initialize +.Fa crp_obuf +for requests that use separate input and output buffers: +.Bl -tag -width "Fn crypto_use_output_mbuf" +.It Fn crypto_use_output_buf +Uses an array of +.Fa len +bytes pointed to by +.Fa buf +as the output buffer. +.It Fn crypto_use_output_mbuf +Uses the network memory buffer +.Fa m +as the output buffer. +.It Fn crypto_use_output_uio +Uses the scatter/gather list +.Fa uio +as the output buffer. .El .Ss Request Regions -Each request describes one or more regions in the data buffer using. -Each region is described by an offset relative to the start of the +Each request describes one or more regions in the data buffers. +Each region is described by an offset relative to the start of a data buffer and a length. The length of some regions is the same for all requests belonging to a session. @@ -142,18 +162,43 @@ Those lengths are set in the session parameters of the associated session. All requests must define a payload region. Other regions are only required for specific session modes. +.Pp +For requests with separate input and output data buffers, +the AAD, IV, and payload regions are always defined as regions in the +input buffer, +and a separate payload output region is defined to hold the output of +encryption or decryption in the output buffer. +The digest region describes a region in the input data buffer for +requests that verify an existing digest. +For requests that compute a digest, +the digest region describes a region in the output data buffer. +Note that the only data written to the output buffer is the encryption +or decryption result and any computed digest. +AAD and IV regions are not copied from the input buffer into the output +buffer but are only used as inputs. +.Pp The following regions are defined: -.Bl -column "Payload" "crp_payload_start" "crp_payload_length" -.It Sy Region Ta Sy Start Ta Sy Length Ta Sy Description -.It AAD Ta Fa crp_aad_start Ta Fa crp_aad_length Ta +.Bl -column "Payload Output" "Input/Output" +.It Sy Region Ta Sy Buffer Ta Sy Description +.It AAD Ta Input Ta Additional Authenticated Data -.It IV Ta Fa crp_iv_start Ta Fa csp_ivlen Ta +.It IV Ta Input Ta Embedded IV or nonce -.It Payload Ta Fa crp_payload_start Ta Fa crp_payload_length Ta +.It Payload Ta Input Ta Data to encrypt, decrypt, compress, or decompress -.It Digest Ta Fa crp_digest_start Ta Fa csp_auth_mlen Ta +.It Payload Output Ta Output Ta +Encrypted or decrypted data +.It Digest Ta Input/Output Ta Authentication digest, hash, or tag .El +.Bl -column "Payload Output" ".Fa crp_payload_output_start" +.It Sy Region Ta Sy Start Ta Sy Length +.It AAD Ta Fa crp_aad_start Ta Fa crp_aad_length +.It IV Ta Fa crp_iv_start Ta Fa csp_ivlen +.It Payload Ta Fa crp_payload_start Ta Fa crp_payload_length +.It Payload Output Ta Fa crp_payload_output_start Ta Fa crp_payload_length +.It Digest Ta Fa crp_digest_start Ta Fa csp_auth_mlen +.El .Pp Requests are permitted to operate on only a subset of the data buffer. For example, @@ -223,7 +268,7 @@ If the IV is stored in should be set in .Fa crp_flags and -.Fa crp_digest_start +.Fa crp_iv_start should be left as zero. .Pp Requests that store part, but not all, of the IV in the data buffer should @@ -380,6 +425,17 @@ The callback function should inspect to determine the status of the completed operation. It should also arrange for the request to be freed via .Fn crypto_freereq . +.It Fa crp_olen +Used with compression and decompression requests to describe the updated +length of the payload region in the data buffer. +.Pp +If a compression request increases the size of the payload, +then the data buffer is unmodified, the request completes successfully, +and +.Fa crp_olen +is set to the size the compressed data would have used. +Callers can compare this to the payload region length to determine if +the compressed data was discarded. .El .Sh RETURN VALUES .Fn crypto_dispatch diff --git a/share/man/man9/crypto_session.9 b/share/man/man9/crypto_session.9 index bb89afa93d63..1acbb1bea0c4 100644 --- a/share/man/man9/crypto_session.9 +++ b/share/man/man9/crypto_session.9 @@ -30,7 +30,7 @@ .\" .\" $FreeBSD$ .\" -.Dd March 27, 2020 +.Dd May 25, 2020 .Dt CRYPTO_SESSION 9 .Os .Sh NAME @@ -183,8 +183,18 @@ and the authentication algorithm is specified in .Fa csp_auth_alg . .El .It Fa csp_flags -Currently, no additional flags are defined and this field should be set to -zero. +A mask of optional driver features. +Drivers will only attach to a session if they support all of the +requested features. +.Bl -tag -width CSP_F_SEPARATE_OUTPUT +.It Dv CSP_F_SEPARATE_OUTPUT +Support requests that use separate input and output buffers. +Sessions with this flag set permit requests with either a single buffer +that is modified in-place, or requests with separate input and output +buffers. +Sessions without this flag only permit requests with a single buffer that +is modified in-place. +.El .It Fa csp_ivlen If either the cipher or authentication algorithms require an explicit initialization vector (IV) or nonce, diff --git a/sys/crypto/ccp/ccp.c b/sys/crypto/ccp/ccp.c index e0a5f010b0c6..f569d18caa54 100644 --- a/sys/crypto/ccp/ccp.c +++ b/sys/crypto/ccp/ccp.c @@ -92,20 +92,20 @@ static struct random_source random_ccp = { * crypto operation buffer. */ static int -ccp_populate_sglist(struct sglist *sg, struct cryptop *crp) +ccp_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) { int error; sglist_reset(sg); - switch (crp->crp_buf_type) { + switch (cb->cb_type) { case CRYPTO_BUF_MBUF: - error = sglist_append_mbuf(sg, crp->crp_mbuf); + error = sglist_append_mbuf(sg, cb->cb_mbuf); break; case CRYPTO_BUF_UIO: - error = sglist_append_uio(sg, crp->crp_uio); + error = sglist_append_uio(sg, cb->cb_uio); break; case CRYPTO_BUF_CONTIG: - error = sglist_append(sg, crp->crp_buf, crp->crp_ilen); + error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); break; default: error = EINVAL; @@ -547,7 +547,7 @@ ccp_process(device_t dev, struct cryptop *crp, int hint) goto out; qpheld = true; - error = ccp_populate_sglist(qp->cq_sg_crp, crp); + error = ccp_populate_sglist(qp->cq_sg_crp, &crp->crp_buf); if (error != 0) goto out; diff --git a/sys/dev/cxgbe/crypto/t4_crypto.c b/sys/dev/cxgbe/crypto/t4_crypto.c index 06a34e899b53..ae1e4986fe62 100644 --- a/sys/dev/cxgbe/crypto/t4_crypto.c +++ b/sys/dev/cxgbe/crypto/t4_crypto.c @@ -247,26 +247,26 @@ struct ccr_softc { * requests. * * These scatter/gather lists can describe different subsets of the - * buffer described by the crypto operation. ccr_populate_sglist() - * generates a scatter/gather list that covers the entire crypto + * buffers described by the crypto operation. ccr_populate_sglist() + * generates a scatter/gather list that covers an entire crypto * operation buffer that is then used to construct the other * scatter/gather lists. */ static int -ccr_populate_sglist(struct sglist *sg, struct cryptop *crp) +ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) { int error; sglist_reset(sg); - switch (crp->crp_buf_type) { + switch (cb->cb_type) { case CRYPTO_BUF_MBUF: - error = sglist_append_mbuf(sg, crp->crp_mbuf); + error = sglist_append_mbuf(sg, cb->cb_mbuf); break; case CRYPTO_BUF_UIO: - error = sglist_append_uio(sg, crp->crp_uio); + error = sglist_append_uio(sg, cb->cb_uio); break; case CRYPTO_BUF_CONTIG: - error = sglist_append(sg, crp->crp_buf, crp->crp_ilen); + error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); break; default: error = EINVAL; @@ -2576,7 +2576,7 @@ ccr_process(device_t dev, struct cryptop *crp, int hint) sc = device_get_softc(dev); mtx_lock(&sc->lock); - error = ccr_populate_sglist(sc->sg_crp, crp); + error = ccr_populate_sglist(sc->sg_crp, &crp->crp_buf); if (error) { sc->stats_sglist_error++; goto out; diff --git a/sys/dev/hifn/hifn7751.c b/sys/dev/hifn/hifn7751.c index 2e4da1ea48ff..38b192d27c57 100644 --- a/sys/dev/hifn/hifn7751.c +++ b/sys/dev/hifn/hifn7751.c @@ -1760,22 +1760,6 @@ hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) return (idx); } -static bus_size_t -hifn_crp_length(struct cryptop *crp) -{ - - switch (crp->crp_buf_type) { - case CRYPTO_BUF_MBUF: - return (crp->crp_mbuf->m_pkthdr.len); - case CRYPTO_BUF_UIO: - return (crp->crp_uio->uio_resid); - case CRYPTO_BUF_CONTIG: - return (crp->crp_ilen); - default: - panic("bad crp buffer type"); - } -} - static void hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, int error) { @@ -1831,12 +1815,12 @@ hifn_crypto( err = ENOMEM; goto err_srcmap1; } - cmd->src_mapsize = hifn_crp_length(crp); + cmd->src_mapsize = crypto_buffer_len(&crp->crp_buf); if (hifn_dmamap_aligned(&cmd->src)) { cmd->sloplen = cmd->src_mapsize & 3; cmd->dst = cmd->src; - } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { + } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { int totlen, len; struct mbuf *m, *m0, *mlast; @@ -1854,10 +1838,11 @@ hifn_crypto( * have no guarantee that we'll be re-entered. */ totlen = cmd->src_mapsize; - if (crp->crp_mbuf->m_flags & M_PKTHDR) { + if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m0, M_NOWAIT, MT_DATA); - if (m0 && !m_dup_pkthdr(m0, crp->crp_mbuf, M_NOWAIT)) { + if (m0 && !m_dup_pkthdr(m0, crp->crp_buf.cb_mbuf, + M_NOWAIT)) { m_free(m0); m0 = NULL; } @@ -2084,7 +2069,7 @@ hifn_crypto( if (cmd->src_map != cmd->dst_map) bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); err_srcmap: - if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { + if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { if (cmd->dst_m != NULL) m_freem(cmd->dst_m); } @@ -2626,7 +2611,7 @@ hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) BUS_DMASYNC_POSTREAD); } - if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { + if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { if (cmd->dst_m != NULL) { totlen = cmd->src_mapsize; for (m = cmd->dst_m; m != NULL; m = m->m_next) { @@ -2636,9 +2621,10 @@ hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) } else totlen -= m->m_len; } - cmd->dst_m->m_pkthdr.len = crp->crp_mbuf->m_pkthdr.len; - m_freem(crp->crp_mbuf); - crp->crp_mbuf = cmd->dst_m; + cmd->dst_m->m_pkthdr.len = + crp->crp_buf.cb_mbuf->m_pkthdr.len; + m_freem(crp->crp_buf.cb_mbuf); + crp->crp_buf.cb_mbuf = cmd->dst_m; } } diff --git a/sys/dev/safe/safe.c b/sys/dev/safe/safe.c index 10f90a1ea51f..12636c0eccf5 100644 --- a/sys/dev/safe/safe.c +++ b/sys/dev/safe/safe.c @@ -752,22 +752,6 @@ safe_newsession(device_t dev, crypto_session_t cses, return (0); } -static bus_size_t -safe_crp_length(struct cryptop *crp) -{ - - switch (crp->crp_buf_type) { - case CRYPTO_BUF_MBUF: - return (crp->crp_mbuf->m_pkthdr.len); - case CRYPTO_BUF_UIO: - return (crp->crp_uio->uio_resid); - case CRYPTO_BUF_CONTIG: - return (crp->crp_ilen); - default: - panic("bad crp buffer type"); - } -} - static void safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) { @@ -996,7 +980,7 @@ safe_process(device_t dev, struct cryptop *crp, int hint) err = ENOMEM; goto errout; } - re->re_src_mapsize = safe_crp_length(crp); + re->re_src_mapsize = crypto_buffer_len(&crp->crp_buf); nicealign = safe_dmamap_aligned(&re->re_src); uniform = safe_dmamap_uniform(&re->re_src); @@ -1063,7 +1047,7 @@ safe_process(device_t dev, struct cryptop *crp, int hint) err = ENOMEM; goto errout; } - } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { + } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { int totlen, len; struct mbuf *m, *top, **mp; @@ -1080,10 +1064,10 @@ safe_process(device_t dev, struct cryptop *crp, int hint) if (!uniform) safestats.st_notuniform++; totlen = re->re_src_mapsize; - if (crp->crp_mbuf->m_flags & M_PKTHDR) { + if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m, M_NOWAIT, MT_DATA); - if (m && !m_dup_pkthdr(m, crp->crp_mbuf, + if (m && !m_dup_pkthdr(m, crp->crp_buf.cb_mbuf, M_NOWAIT)) { m_free(m); m = NULL; @@ -1168,8 +1152,8 @@ safe_process(device_t dev, struct cryptop *crp, int hint) if (!(csp->csp_mode == CSP_MODE_ETA && (re->re_src.mapsize-oplen) == ses->ses_mlen && crp->crp_digest_start == oplen)) - safe_mcopy(crp->crp_mbuf, re->re_dst_m, - oplen); + safe_mcopy(crp->crp_buf.cb_mbuf, + re->re_dst_m, oplen); else safestats.st_noicvcopy++; } @@ -1305,7 +1289,10 @@ safe_callback(struct safe_softc *sc, struct safe_ringentry *re) crp->crp_etype = EIO; /* something more meaningful? */ } - /* XXX: Should crp_mbuf be updated to re->re_dst_m if it is non-NULL? */ + /* + * XXX: Should crp_buf.cb_mbuf be updated to re->re_dst_m if + * it is non-NULL? + */ if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, diff --git a/sys/geom/eli/g_eli_crypto.c b/sys/geom/eli/g_eli_crypto.c index b38ed7ee1db3..ca8170db7e4d 100644 --- a/sys/geom/eli/g_eli_crypto.c +++ b/sys/geom/eli/g_eli_crypto.c @@ -92,9 +92,7 @@ g_eli_crypto_cipher(u_int algo, int enc, u_char *data, size_t datasize, crp->crp_opaque = NULL; crp->crp_callback = g_eli_crypto_done; - crp->crp_buf_type = CRYPTO_BUF_CONTIG; - crp->crp_ilen = datasize; - crp->crp_buf = (void *)data; + crypto_use_buf(crp, data, datasize); error = crypto_dispatch(crp); if (error == 0) { diff --git a/sys/geom/eli/g_eli_integrity.c b/sys/geom/eli/g_eli_integrity.c index 7ec1b5662e6e..ae3ad52d13ff 100644 --- a/sys/geom/eli/g_eli_integrity.c +++ b/sys/geom/eli/g_eli_integrity.c @@ -159,7 +159,7 @@ g_eli_auth_read_done(struct cryptop *crp) /* Number of sectors from encrypted provider, eg. 18. */ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; /* Which relative sector this request decrypted. */ - rel_sec = ((crp->crp_buf + crp->crp_payload_start) - + rel_sec = ((crp->crp_buf.cb_buf + crp->crp_payload_start) - (char *)bp->bio_driver2) / encr_secsize; errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec + @@ -517,10 +517,8 @@ g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp) plaindata += data_secsize; } - crp->crp_ilen = sc->sc_alen + data_secsize; + crypto_use_buf(crp, data, sc->sc_alen + data_secsize); crp->crp_opaque = (void *)bp; - crp->crp_buf_type = CRYPTO_BUF_CONTIG; - crp->crp_buf = (void *)data; data += encr_secsize; crp->crp_flags = CRYPTO_F_CBIFSYNC; if (g_eli_batch) diff --git a/sys/geom/eli/g_eli_privacy.c b/sys/geom/eli/g_eli_privacy.c index bfa1b800266b..7ec73968394a 100644 --- a/sys/geom/eli/g_eli_privacy.c +++ b/sys/geom/eli/g_eli_privacy.c @@ -82,7 +82,7 @@ g_eli_crypto_read_done(struct cryptop *crp) if (crp->crp_etype == 0) { G_ELI_DEBUG(3, "Crypto READ request done (%d/%d).", bp->bio_inbed, bp->bio_children); - bp->bio_completed += crp->crp_ilen; + bp->bio_completed += crp->crp_payload_length; } else { G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); @@ -265,10 +265,8 @@ g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp) for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) { crp = crypto_getreq(wr->w_sid, M_WAITOK); - crp->crp_ilen = secsize; + crypto_use_buf(crp, data, secsize); crp->crp_opaque = (void *)bp; - crp->crp_buf_type = CRYPTO_BUF_CONTIG; - crp->crp_buf = (void *)data; data += secsize; if (bp->bio_cmd == BIO_WRITE) { crp->crp_op = CRYPTO_OP_ENCRYPT; diff --git a/sys/kern/subr_bus_dma.c b/sys/kern/subr_bus_dma.c index bf524a059d05..896bd8c1c346 100644 --- a/sys/kern/subr_bus_dma.c +++ b/sys/kern/subr_bus_dma.c @@ -637,8 +637,9 @@ bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, } int -bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, - bus_dmamap_callback_t *callback, void *callback_arg, int flags) +bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, + struct crypto_buffer *cb, bus_dmamap_callback_t *callback, + void *callback_arg, int flags) { bus_dma_segment_t *segs; int error; @@ -647,19 +648,21 @@ bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, flags |= BUS_DMA_NOWAIT; nsegs = -1; error = 0; - switch (crp->crp_buf_type) { + switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: - error = _bus_dmamap_load_buffer(dmat, map, crp->crp_buf, - crp->crp_ilen, kernel_pmap, flags, NULL, &nsegs); + error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf, + cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs); break; case CRYPTO_BUF_MBUF: - error = _bus_dmamap_load_mbuf_sg(dmat, map, crp->crp_mbuf, + error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf, NULL, &nsegs, flags); break; case CRYPTO_BUF_UIO: - error = _bus_dmamap_load_uio(dmat, map, crp->crp_uio, &nsegs, + error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs, flags); break; + default: + error = EINVAL; } nsegs++; @@ -684,3 +687,11 @@ bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, return (0); } + +int +bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, + bus_dmamap_callback_t *callback, void *callback_arg, int flags) +{ + return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback, + callback_arg, flags)); +} diff --git a/sys/kgssapi/krb5/kcrypto_aes.c b/sys/kgssapi/krb5/kcrypto_aes.c index 451f6d77a72f..04b68357be73 100644 --- a/sys/kgssapi/krb5/kcrypto_aes.c +++ b/sys/kgssapi/krb5/kcrypto_aes.c @@ -156,9 +156,10 @@ aes_encrypt_1(const struct krb5_key_state *ks, int buftype, void *buf, memset(crp->crp_iv, 0, 16); } - crp->crp_buf_type = buftype; - crp->crp_buf = buf; - crp->crp_ilen = skip + len; + if (buftype == CRYPTO_BUF_MBUF) + crypto_use_mbuf(crp, buf); + else + crypto_use_buf(crp, buf, skip + len); crp->crp_opaque = as; crp->crp_callback = aes_crypto_cb; @@ -328,9 +329,7 @@ aes_checksum(const struct krb5_key_state *ks, int usage, crp->crp_payload_length = inlen; crp->crp_digest_start = skip + inlen; crp->crp_flags = CRYPTO_F_CBIFSYNC; - crp->crp_buf_type = CRYPTO_BUF_MBUF; - crp->crp_mbuf = inout; - crp->crp_ilen = skip + inlen + 12; + crypto_use_mbuf(crp, inout); crp->crp_opaque = as; crp->crp_callback = aes_crypto_cb; diff --git a/sys/netipsec/xform_ah.c b/sys/netipsec/xform_ah.c index 456ba7e85865..8c6032a070c1 100644 --- a/sys/netipsec/xform_ah.c +++ b/sys/netipsec/xform_ah.c @@ -654,13 +654,11 @@ ah_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) } /* Crypto operation descriptor. */ - crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; crp->crp_flags = CRYPTO_F_CBIFSYNC; if (V_async_crypto) crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; - crp->crp_mbuf = m; - crp->crp_buf_type = CRYPTO_BUF_MBUF; + crypto_use_mbuf(crp, m); crp->crp_callback = ah_input_cb; crp->crp_opaque = xd; @@ -695,7 +693,7 @@ ah_input_cb(struct cryptop *crp) int authsize, rplen, ahsize, error, skip, protoff; uint8_t nxt; - m = crp->crp_mbuf; + m = crp->crp_buf.cb_mbuf; xd = crp->crp_opaque; CURVNET_SET(xd->vnet); sav = xd->sav; @@ -1031,13 +1029,11 @@ ah_output(struct mbuf *m, struct secpolicy *sp, struct secasvar *sav, } /* Crypto operation descriptor. */ - crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; crp->crp_flags = CRYPTO_F_CBIFSYNC; if (V_async_crypto) crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; - crp->crp_mbuf = m; - crp->crp_buf_type = CRYPTO_BUF_MBUF; + crypto_use_mbuf(crp, m); crp->crp_callback = ah_output_cb; crp->crp_opaque = xd; @@ -1073,7 +1069,7 @@ ah_output_cb(struct cryptop *crp) u_int idx; int skip, error; - m = (struct mbuf *) crp->crp_buf; + m = crp->crp_buf.cb_mbuf; xd = (struct xform_data *) crp->crp_opaque; CURVNET_SET(xd->vnet); sp = xd->sp; diff --git a/sys/netipsec/xform_esp.c b/sys/netipsec/xform_esp.c index ba1cb7044390..214df48157c9 100644 --- a/sys/netipsec/xform_esp.c +++ b/sys/netipsec/xform_esp.c @@ -366,12 +366,10 @@ esp_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) } /* Crypto operation descriptor */ - crp->crp_ilen = m->m_pkthdr.len; /* Total input length */ crp->crp_flags = CRYPTO_F_CBIFSYNC; if (V_async_crypto) crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; - crp->crp_mbuf = m; - crp->crp_buf_type = CRYPTO_BUF_MBUF; + crypto_use_mbuf(crp, m); crp->crp_callback = esp_input_cb; crp->crp_opaque = xd; @@ -446,7 +444,7 @@ esp_input_cb(struct cryptop *crp) crypto_session_t cryptoid; int hlen, skip, protoff, error, alen; - m = crp->crp_mbuf; + m = crp->crp_buf.cb_mbuf; xd = crp->crp_opaque; CURVNET_SET(xd->vnet); sav = xd->sav; @@ -840,12 +838,10 @@ esp_output(struct mbuf *m, struct secpolicy *sp, struct secasvar *sav, xd->vnet = curvnet; /* Crypto operation descriptor. */ - crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags |= CRYPTO_F_CBIFSYNC; if (V_async_crypto) crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; - crp->crp_mbuf = m; - crp->crp_buf_type = CRYPTO_BUF_MBUF; + crypto_use_mbuf(crp, m); crp->crp_callback = esp_output_cb; crp->crp_opaque = xd; @@ -884,7 +880,7 @@ esp_output_cb(struct cryptop *crp) xd = (struct xform_data *) crp->crp_opaque; CURVNET_SET(xd->vnet); - m = (struct mbuf *) crp->crp_buf; + m = crp->crp_buf.cb_mbuf; sp = xd->sp; sav = xd->sav; idx = xd->idx; diff --git a/sys/netipsec/xform_ipcomp.c b/sys/netipsec/xform_ipcomp.c index 0529b2dda7c5..b2d98b82bf87 100644 --- a/sys/netipsec/xform_ipcomp.c +++ b/sys/netipsec/xform_ipcomp.c @@ -249,10 +249,8 @@ ipcomp_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) crp->crp_payload_length = m->m_pkthdr.len - (skip + hlen); /* Crypto operation descriptor */ - crp->crp_ilen = m->m_pkthdr.len - (skip + hlen); crp->crp_flags = CRYPTO_F_CBIFSYNC; - crp->crp_mbuf = m; - crp->crp_buf_type = CRYPTO_BUF_MBUF; + crypto_use_mbuf(crp, m); crp->crp_callback = ipcomp_input_cb; crp->crp_opaque = xd; @@ -291,7 +289,7 @@ ipcomp_input_cb(struct cryptop *crp) int skip, protoff; uint8_t nproto; - m = crp->crp_mbuf; + m = crp->crp_buf.cb_mbuf; xd = crp->crp_opaque; CURVNET_SET(xd->vnet); sav = xd->sav; @@ -506,10 +504,8 @@ ipcomp_output(struct mbuf *m, struct secpolicy *sp, struct secasvar *sav, xd->cryptoid = cryptoid; /* Crypto operation descriptor */ - crp->crp_ilen = m->m_pkthdr.len; /* Total input length */ crp->crp_flags = CRYPTO_F_CBIFSYNC; - crp->crp_mbuf = m; - crp->crp_buf_type = CRYPTO_BUF_MBUF; + crypto_use_mbuf(crp, m); crp->crp_callback = ipcomp_output_cb; crp->crp_opaque = xd; @@ -537,7 +533,7 @@ ipcomp_output_cb(struct cryptop *crp) u_int idx; int error, skip, protoff; - m = crp->crp_mbuf; + m = crp->crp_buf.cb_mbuf; xd = crp->crp_opaque; CURVNET_SET(xd->vnet); idx = xd->idx; diff --git a/sys/opencrypto/criov.c b/sys/opencrypto/criov.c index e097a22713b3..2f0d9bfdb3c9 100644 --- a/sys/opencrypto/criov.c +++ b/sys/opencrypto/criov.c @@ -60,7 +60,7 @@ __FBSDID("$FreeBSD$"); } \ } while (0) -void +static void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp) { struct iovec *iov = uio->uio_iov; @@ -80,7 +80,7 @@ cuio_copydata(struct uio* uio, int off, int len, caddr_t cp) } } -void +static void cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp) { struct iovec *iov = uio->uio_iov; @@ -103,7 +103,7 @@ cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp) /* * Return the index and offset of location in iovec list. */ -int +static int cuio_getptr(struct uio *uio, int loc, int *off) { int ind, len; @@ -128,11 +128,263 @@ cuio_getptr(struct uio *uio, int loc, int *off) return (-1); } +void +crypto_cursor_init(struct crypto_buffer_cursor *cc, + const struct crypto_buffer *cb) +{ + memset(cc, 0, sizeof(*cc)); + cc->cc_type = cb->cb_type; + switch (cc->cc_type) { + case CRYPTO_BUF_CONTIG: + cc->cc_buf = cb->cb_buf; + cc->cc_buf_len = cb->cb_buf_len; + break; + case CRYPTO_BUF_MBUF: + cc->cc_mbuf = cb->cb_mbuf; + break; + case CRYPTO_BUF_UIO: + cc->cc_iov = cb->cb_uio->uio_iov; + break; + default: +#ifdef INVARIANTS + panic("%s: invalid buffer type %d", __func__, cb->cb_type); +#endif + break; + } +} + +void +crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount) +{ + size_t remain; + + switch (cc->cc_type) { + case CRYPTO_BUF_CONTIG: + MPASS(cc->cc_buf_len >= amount); + cc->cc_buf += amount; + cc->cc_buf_len -= amount; + break; + case CRYPTO_BUF_MBUF: + for (;;) { + remain = cc->cc_mbuf->m_len - cc->cc_offset; + if (amount < remain) { + cc->cc_offset += amount; + break; + } + amount -= remain; + cc->cc_mbuf = cc->cc_mbuf->m_next; + cc->cc_offset = 0; + if (amount == 0) + break; + } + break; + case CRYPTO_BUF_UIO: + for (;;) { + remain = cc->cc_iov->iov_len - cc->cc_offset; + if (amount < remain) { + cc->cc_offset += amount; + break; + } + amount -= remain; + cc->cc_iov++; + cc->cc_offset = 0; + if (amount == 0) + break; + } + break; + default: +#ifdef INVARIANTS + panic("%s: invalid buffer type %d", __func__, cc->cc_type); +#endif + break; + } +} + +void * +crypto_cursor_segbase(struct crypto_buffer_cursor *cc) +{ + switch (cc->cc_type) { + case CRYPTO_BUF_CONTIG: + return (cc->cc_buf); + case CRYPTO_BUF_MBUF: + if (cc->cc_mbuf == NULL) + return (NULL); + KASSERT((cc->cc_mbuf->m_flags & M_EXTPG) == 0, + ("%s: not supported for unmapped mbufs", __func__)); + return (mtod(cc->cc_mbuf, char *) + cc->cc_offset); + case CRYPTO_BUF_UIO: + return ((char *)cc->cc_iov->iov_base + cc->cc_offset); + default: +#ifdef INVARIANTS + panic("%s: invalid buffer type %d", __func__, cc->cc_type); +#endif + return (NULL); + } +} + +size_t +crypto_cursor_seglen(struct crypto_buffer_cursor *cc) +{ + switch (cc->cc_type) { + case CRYPTO_BUF_CONTIG: + return (cc->cc_buf_len); + case CRYPTO_BUF_MBUF: + if (cc->cc_mbuf == NULL) + return (0); + return (cc->cc_mbuf->m_len - cc->cc_offset); + case CRYPTO_BUF_UIO: + return (cc->cc_iov->iov_len - cc->cc_offset); + default: +#ifdef INVARIANTS + panic("%s: invalid buffer type %d", __func__, cc->cc_type); +#endif + return (0); + } +} + +void +crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size, + const void *vsrc) +{ + size_t remain, todo; + const char *src; + char *dst; + + src = vsrc; + switch (cc->cc_type) { + case CRYPTO_BUF_CONTIG: + MPASS(cc->cc_buf_len >= size); + memcpy(cc->cc_buf, src, size); + cc->cc_buf += size; + cc->cc_buf_len -= size; + break; + case CRYPTO_BUF_MBUF: + for (;;) { + KASSERT((cc->cc_mbuf->m_flags & M_EXTPG) == 0, + ("%s: not supported for unmapped mbufs", __func__)); + dst = mtod(cc->cc_mbuf, char *) + cc->cc_offset; + remain = cc->cc_mbuf->m_len - cc->cc_offset; + todo = MIN(remain, size); + memcpy(dst, src, todo); + dst += todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_mbuf = cc->cc_mbuf->m_next; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; + case CRYPTO_BUF_UIO: + for (;;) { + dst = (char *)cc->cc_iov->iov_base + cc->cc_offset; + remain = cc->cc_iov->iov_len - cc->cc_offset; + todo = MIN(remain, size); + memcpy(dst, src, todo); + dst += todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_iov++; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; + default: +#ifdef INVARIANTS + panic("%s: invalid buffer type %d", __func__, cc->cc_type); +#endif + break; + } +} + +void +crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst) +{ + size_t remain, todo; + const char *src; + char *dst; + + dst = vdst; + switch (cc->cc_type) { + case CRYPTO_BUF_CONTIG: + MPASS(cc->cc_buf_len >= size); + memcpy(dst, cc->cc_buf, size); + cc->cc_buf += size; + cc->cc_buf_len -= size; + break; + case CRYPTO_BUF_MBUF: + for (;;) { + KASSERT((cc->cc_mbuf->m_flags & M_EXTPG) == 0, + ("%s: not supported for unmapped mbufs", __func__)); + src = mtod(cc->cc_mbuf, const char *) + cc->cc_offset; + remain = cc->cc_mbuf->m_len - cc->cc_offset; + todo = MIN(remain, size); + memcpy(dst, src, todo); + dst += todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_mbuf = cc->cc_mbuf->m_next; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; + case CRYPTO_BUF_UIO: + for (;;) { + src = (const char *)cc->cc_iov->iov_base + + cc->cc_offset; + remain = cc->cc_iov->iov_len - cc->cc_offset; + todo = MIN(remain, size); + memcpy(dst, src, todo); + dst += todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_iov++; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; + default: +#ifdef INVARIANTS + panic("%s: invalid buffer type %d", __func__, cc->cc_type); +#endif + break; + } +} + +/* + * To avoid advancing 'cursor', make a local copy that gets advanced + * instead. + */ +void +crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size, + void *vdst) +{ + struct crypto_buffer_cursor copy; + + copy = *cc; + crypto_cursor_copydata(©, size, vdst); +} + /* * Apply function f to the data in an iovec list starting "off" bytes from * the beginning, continuing for "len" bytes. */ -int +static int cuio_apply(struct uio *uio, int off, int len, int (*f)(void *, void *, u_int), void *arg) { @@ -159,19 +411,28 @@ cuio_apply(struct uio *uio, int off, int len, int (*f)(void *, void *, u_int), void crypto_copyback(struct cryptop *crp, int off, int size, const void *src) { + struct crypto_buffer *cb; - switch (crp->crp_buf_type) { + if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) + cb = &crp->crp_obuf; + else + cb = &crp->crp_buf; + switch (cb->cb_type) { case CRYPTO_BUF_MBUF: - m_copyback(crp->crp_mbuf, off, size, src); + m_copyback(cb->cb_mbuf, off, size, src); break; case CRYPTO_BUF_UIO: - cuio_copyback(crp->crp_uio, off, size, src); + cuio_copyback(cb->cb_uio, off, size, src); break; case CRYPTO_BUF_CONTIG: - bcopy(src, crp->crp_buf + off, size); + MPASS(off + size <= cb->cb_buf_len); + bcopy(src, cb->cb_buf + off, size); break; default: - panic("invalid crp buf type %d", crp->crp_buf_type); +#ifdef INVARIANTS + panic("invalid crp buf type %d", cb->cb_type); +#endif + break; } } @@ -179,88 +440,57 @@ void crypto_copydata(struct cryptop *crp, int off, int size, void *dst) { - switch (crp->crp_buf_type) { + switch (crp->crp_buf.cb_type) { case CRYPTO_BUF_MBUF: - m_copydata(crp->crp_mbuf, off, size, dst); + m_copydata(crp->crp_buf.cb_mbuf, off, size, dst); break; case CRYPTO_BUF_UIO: - cuio_copydata(crp->crp_uio, off, size, dst); + cuio_copydata(crp->crp_buf.cb_uio, off, size, dst); break; case CRYPTO_BUF_CONTIG: - bcopy(crp->crp_buf + off, dst, size); + MPASS(off + size <= crp->crp_buf.cb_buf_len); + bcopy(crp->crp_buf.cb_buf + off, dst, size); break; default: - panic("invalid crp buf type %d", crp->crp_buf_type); +#ifdef INVARIANTS + panic("invalid crp buf type %d", crp->crp_buf.cb_type); +#endif + break; } } +int +crypto_apply_buf(struct crypto_buffer *cb, int off, int len, + int (*f)(void *, void *, u_int), void *arg) +{ + int error; + + switch (cb->cb_type) { + case CRYPTO_BUF_MBUF: + error = m_apply(cb->cb_mbuf, off, len, f, arg); + break; + case CRYPTO_BUF_UIO: + error = cuio_apply(cb->cb_uio, off, len, f, arg); + break; + case CRYPTO_BUF_CONTIG: + MPASS(off + len <= cb->cb_buf_len); + error = (*f)(arg, cb->cb_buf + off, len); + break; + default: +#ifdef INVARIANTS + panic("invalid crypto buf type %d", cb->cb_type); +#endif + error = 0; + break; + } + return (error); +} + int crypto_apply(struct cryptop *crp, int off, int len, int (*f)(void *, void *, u_int), void *arg) { - int error; - - switch (crp->crp_buf_type) { - case CRYPTO_BUF_MBUF: - error = m_apply(crp->crp_mbuf, off, len, f, arg); - break; - case CRYPTO_BUF_UIO: - error = cuio_apply(crp->crp_uio, off, len, f, arg); - break; - case CRYPTO_BUF_CONTIG: - error = (*f)(arg, crp->crp_buf + off, len); - break; - default: - panic("invalid crp buf type %d", crp->crp_buf_type); - } - return (error); -} - -int -crypto_mbuftoiov(struct mbuf *mbuf, struct iovec **iovptr, int *cnt, - int *allocated) -{ - struct iovec *iov; - struct mbuf *m, *mtmp; - int i, j; - - *allocated = 0; - iov = *iovptr; - if (iov == NULL) - *cnt = 0; - - m = mbuf; - i = 0; - while (m != NULL) { - if (i == *cnt) { - /* we need to allocate a larger array */ - j = 1; - mtmp = m; - while ((mtmp = mtmp->m_next) != NULL) - j++; - iov = malloc(sizeof *iov * (i + j), M_CRYPTO_DATA, - M_NOWAIT); - if (iov == NULL) - return ENOMEM; - *allocated = 1; - *cnt = i + j; - memcpy(iov, *iovptr, sizeof *iov * i); - } - - iov[i].iov_base = m->m_data; - iov[i].iov_len = m->m_len; - - i++; - m = m->m_next; - } - - if (*allocated) - KASSERT(*cnt == i, ("did not allocate correct amount: %d != %d", - *cnt, i)); - - *iovptr = iov; - *cnt = i; - return 0; + return (crypto_apply_buf(&crp->crp_buf, off, len, f, arg)); } static inline void * @@ -300,17 +530,28 @@ cuio_contiguous_segment(struct uio *uio, size_t skip, size_t len) } void * -crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len) +crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip, + size_t len) { - switch (crp->crp_buf_type) { + switch (cb->cb_type) { case CRYPTO_BUF_MBUF: - return (m_contiguous_subsegment(crp->crp_mbuf, skip, len)); + return (m_contiguous_subsegment(cb->cb_mbuf, skip, len)); case CRYPTO_BUF_UIO: - return (cuio_contiguous_segment(crp->crp_uio, skip, len)); + return (cuio_contiguous_segment(cb->cb_uio, skip, len)); case CRYPTO_BUF_CONTIG: - return (crp->crp_buf + skip); + MPASS(skip + len <= cb->cb_buf_len); + return (cb->cb_buf + skip); default: - panic("invalid crp buf type %d", crp->crp_buf_type); +#ifdef INVARIANTS + panic("invalid crp buf type %d", cb->cb_type); +#endif + return (NULL); } } + +void * +crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len) +{ + return (crypto_buffer_contiguous_subsegment(&crp->crp_buf, skip, len)); +} diff --git a/sys/opencrypto/crypto.c b/sys/opencrypto/crypto.c index b4aafdfb6914..73a38ea399d4 100644 --- a/sys/opencrypto/crypto.c +++ b/sys/opencrypto/crypto.c @@ -69,12 +69,14 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include +#include #include @@ -753,7 +755,7 @@ check_csp(const struct crypto_session_params *csp) struct auth_hash *axf; /* Mode-independent checks. */ - if (csp->csp_flags != 0) + if ((csp->csp_flags & ~CSP_F_SEPARATE_OUTPUT) != 0) return (false); if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) @@ -767,7 +769,7 @@ check_csp(const struct crypto_session_params *csp) case CSP_MODE_COMPRESS: if (!alg_is_compression(csp->csp_cipher_alg)) return (false); - if (csp->csp_flags != 0) + if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) return (false); if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || @@ -1206,20 +1208,66 @@ crypto_unblock(u_int32_t driverid, int what) return err; } +size_t +crypto_buffer_len(struct crypto_buffer *cb) +{ + switch (cb->cb_type) { + case CRYPTO_BUF_CONTIG: + return (cb->cb_buf_len); + case CRYPTO_BUF_MBUF: + if (cb->cb_mbuf->m_flags & M_PKTHDR) + return (cb->cb_mbuf->m_pkthdr.len); + return (m_length(cb->cb_mbuf, NULL)); + case CRYPTO_BUF_UIO: + return (cb->cb_uio->uio_resid); + default: + return (0); + } +} + #ifdef INVARIANTS /* Various sanity checks on crypto requests. */ +static void +cb_sanity(struct crypto_buffer *cb, const char *name) +{ + KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, + ("incoming crp with invalid %s buffer type", name)); + if (cb->cb_type == CRYPTO_BUF_CONTIG) + KASSERT(cb->cb_buf_len >= 0, + ("incoming crp with -ve %s buffer length", name)); +} + static void crp_sanity(struct cryptop *crp) { struct crypto_session_params *csp; + struct crypto_buffer *out; + size_t ilen, len, olen; KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); - KASSERT(crp->crp_ilen >= 0, ("incoming crp with -ve input length")); + KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && + crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST, + ("incoming crp with invalid output buffer type")); KASSERT(crp->crp_etype == 0, ("incoming crp with error")); KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), ("incoming crp already done")); csp = &crp->crp_session->csp; + cb_sanity(&crp->crp_buf, "input"); + ilen = crypto_buffer_len(&crp->crp_buf); + olen = ilen; + out = NULL; + if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { + if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { + cb_sanity(&crp->crp_obuf, "output"); + out = &crp->crp_obuf; + olen = crypto_buffer_len(out); + } + } else + KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, + ("incoming crp with separate output buffer " + "but no session support")); + switch (csp->csp_mode) { case CSP_MODE_COMPRESS: KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || @@ -1257,17 +1305,14 @@ crp_sanity(struct cryptop *crp) ("invalid ETA op %x", crp->crp_op)); break; } - KASSERT(crp->crp_buf_type >= CRYPTO_BUF_CONTIG && - crp->crp_buf_type <= CRYPTO_BUF_MBUF, - ("invalid crp buffer type %d", crp->crp_buf_type)); if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { KASSERT(crp->crp_aad_start == 0 || - crp->crp_aad_start < crp->crp_ilen, + crp->crp_aad_start < ilen, ("invalid AAD start")); KASSERT(crp->crp_aad_length != 0 || crp->crp_aad_start == 0, ("AAD with zero length and non-zero start")); KASSERT(crp->crp_aad_length == 0 || - crp->crp_aad_start + crp->crp_aad_length <= crp->crp_ilen, + crp->crp_aad_start + crp->crp_aad_length <= ilen, ("AAD outside input length")); } else { KASSERT(crp->crp_aad_start == 0 && crp->crp_aad_length == 0, @@ -1282,25 +1327,39 @@ crp_sanity(struct cryptop *crp) KASSERT(crp->crp_iv_start == 0, ("IV_SEPARATE used with non-zero IV start")); } else { - KASSERT(crp->crp_iv_start < crp->crp_ilen, + KASSERT(crp->crp_iv_start < ilen, ("invalid IV start")); - KASSERT(crp->crp_iv_start + csp->csp_ivlen <= crp->crp_ilen, - ("IV outside input length")); + KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, + ("IV outside buffer length")); } + /* XXX: payload_start of 0 should always be < ilen? */ KASSERT(crp->crp_payload_start == 0 || - crp->crp_payload_start < crp->crp_ilen, + crp->crp_payload_start < ilen, ("invalid payload start")); KASSERT(crp->crp_payload_start + crp->crp_payload_length <= - crp->crp_ilen, ("payload outside input length")); + ilen, ("payload outside input buffer")); + if (out == NULL) { + KASSERT(crp->crp_payload_output_start == 0, + ("payload output start non-zero without output buffer")); + } else { + KASSERT(crp->crp_payload_output_start < olen, + ("invalid payload output start")); + KASSERT(crp->crp_payload_output_start + + crp->crp_payload_length <= olen, + ("payload outside output buffer")); + } if (csp->csp_mode == CSP_MODE_DIGEST || csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { + if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) + len = ilen; + else + len = olen; KASSERT(crp->crp_digest_start == 0 || - crp->crp_digest_start < crp->crp_ilen, + crp->crp_digest_start < len, ("invalid digest start")); /* XXX: For the mlen == 0 case this check isn't perfect. */ - KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= - crp->crp_ilen, - ("digest outside input length")); + KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, + ("digest outside buffer")); } else { KASSERT(crp->crp_digest_start == 0, ("non-zero digest start for request without a digest")); @@ -2143,10 +2202,10 @@ DB_SHOW_COMMAND(crypto, db_show_crypto) "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", "Device", "Callback"); TAILQ_FOREACH(crp, &crp_q, crp_next) { - db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n" + db_printf("%4u %08x %4u %4u %04x %8p %8p\n" , crp->crp_session->cap->cc_hid , (int) crypto_ses2caps(crp->crp_session) - , crp->crp_ilen, crp->crp_olen + , crp->crp_olen , crp->crp_etype , crp->crp_flags , device_get_nameunit(crp->crp_session->cap->cc_dev) diff --git a/sys/opencrypto/cryptodev.c b/sys/opencrypto/cryptodev.c index 653c951e2da1..4046d1b3561a 100644 --- a/sys/opencrypto/cryptodev.c +++ b/sys/opencrypto/cryptodev.c @@ -948,10 +948,8 @@ cryptodev_op( goto bail; } - crp->crp_ilen = cop->len + cse->hashsize; crp->crp_flags = CRYPTO_F_CBIMM | (cop->flags & COP_F_BATCH); - crp->crp_buf = cod->buf; - crp->crp_buf_type = CRYPTO_BUF_CONTIG; + crypto_use_buf(crp, cod->buf, cop->len + cse->hashsize); crp->crp_callback = cryptodev_cb; crp->crp_opaque = cod; @@ -1129,10 +1127,9 @@ cryptodev_aead( goto bail; } - crp->crp_ilen = caead->aadlen + caead->len + cse->hashsize; crp->crp_flags = CRYPTO_F_CBIMM | (caead->flags & COP_F_BATCH); - crp->crp_buf = cod->buf; - crp->crp_buf_type = CRYPTO_BUF_CONTIG; + crypto_use_buf(crp, cod->buf, caead->aadlen + caead->len + + cse->hashsize); crp->crp_callback = cryptodev_cb; crp->crp_opaque = cod; diff --git a/sys/opencrypto/cryptodev.h b/sys/opencrypto/cryptodev.h index 76553184bc6a..4483fc122f28 100644 --- a/sys/opencrypto/cryptodev.h +++ b/sys/opencrypto/cryptodev.h @@ -383,7 +383,9 @@ struct crypto_session_params { int csp_flags; - int csp_ivlen; /* IV length in bytes. */ +#define CSP_F_SEPARATE_OUTPUT 0x0001 /* Requests can use separate output */ + + int csp_ivlen; /* IV length in bytes. */ int csp_cipher_alg; int csp_cipher_klen; /* Key length in bytes. */ @@ -396,6 +398,47 @@ struct crypto_session_params { 0 means all. */ }; +enum crypto_buffer_type { + CRYPTO_BUF_NONE = 0, + CRYPTO_BUF_CONTIG, + CRYPTO_BUF_UIO, + CRYPTO_BUF_MBUF, + CRYPTO_BUF_LAST = CRYPTO_BUF_MBUF +}; + +/* + * Description of a data buffer for a request. Requests can either + * have a single buffer that is modified in place or separate input + * and output buffers. + */ +struct crypto_buffer { + union { + struct { + char *cb_buf; + int cb_buf_len; + }; + struct mbuf *cb_mbuf; + struct uio *cb_uio; + }; + enum crypto_buffer_type cb_type; +}; + +/* + * A cursor is used to iterate through a crypto request data buffer. + */ +struct crypto_buffer_cursor { + union { + char *cc_buf; + struct mbuf *cc_mbuf; + struct iovec *cc_iov; + }; + union { + int cc_buf_len; + size_t cc_offset; + }; + enum crypto_buffer_type cc_type; +}; + /* Structure describing complete operation */ struct cryptop { TAILQ_ENTRY(cryptop) crp_next; @@ -403,7 +446,6 @@ struct cryptop { struct task crp_task; crypto_session_t crp_session; /* Session */ - int crp_ilen; /* Input data total length */ int crp_olen; /* Result total length */ int crp_etype; /* @@ -434,12 +476,8 @@ struct cryptop { int crp_op; - union { - caddr_t crp_buf; /* Data to be processed */ - struct mbuf *crp_mbuf; - struct uio *crp_uio; - }; - int crp_buf_type; /* Which union member describes data. */ + struct crypto_buffer crp_buf; + struct crypto_buffer crp_obuf; int crp_aad_start; /* Location of AAD. */ int crp_aad_length; /* 0 => no AAD. */ @@ -447,6 +485,7 @@ struct cryptop { * the session. */ int crp_payload_start; /* Location of ciphertext. */ + int crp_payload_output_start; int crp_payload_length; int crp_digest_start; /* Location of MAC/tag. Length is * from the session. @@ -469,16 +508,72 @@ struct cryptop { */ }; -#define CRYPTOP_ASYNC(crp) \ +static __inline void +_crypto_use_buf(struct crypto_buffer *cb, void *buf, int len) +{ + cb->cb_buf = buf; + cb->cb_buf_len = len; + cb->cb_type = CRYPTO_BUF_CONTIG; +} + +static __inline void +_crypto_use_mbuf(struct crypto_buffer *cb, struct mbuf *m) +{ + cb->cb_mbuf = m; + cb->cb_type = CRYPTO_BUF_MBUF; +} + +static __inline void +_crypto_use_uio(struct crypto_buffer *cb, struct uio *uio) +{ + cb->cb_uio = uio; + cb->cb_type = CRYPTO_BUF_UIO; +} + +static __inline void +crypto_use_buf(struct cryptop *crp, void *buf, int len) +{ + _crypto_use_buf(&crp->crp_buf, buf, len); +} + +static __inline void +crypto_use_mbuf(struct cryptop *crp, struct mbuf *m) +{ + _crypto_use_mbuf(&crp->crp_buf, m); +} + +static __inline void +crypto_use_uio(struct cryptop *crp, struct uio *uio) +{ + _crypto_use_uio(&crp->crp_buf, uio); +} + +static __inline void +crypto_use_output_buf(struct cryptop *crp, void *buf, int len) +{ + _crypto_use_buf(&crp->crp_obuf, buf, len); +} + +static __inline void +crypto_use_output_mbuf(struct cryptop *crp, struct mbuf *m) +{ + _crypto_use_mbuf(&crp->crp_obuf, m); +} + +static __inline void +crypto_use_output_uio(struct cryptop *crp, struct uio *uio) +{ + _crypto_use_uio(&crp->crp_obuf, uio); +} + +#define CRYPTOP_ASYNC(crp) \ (((crp)->crp_flags & CRYPTO_F_ASYNC) && \ crypto_ses2caps((crp)->crp_session) & CRYPTOCAP_F_SYNC) #define CRYPTOP_ASYNC_KEEPORDER(crp) \ (CRYPTOP_ASYNC(crp) && \ (crp)->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) - -#define CRYPTO_BUF_CONTIG 0x0 -#define CRYPTO_BUF_UIO 0x1 -#define CRYPTO_BUF_MBUF 0x2 +#define CRYPTO_HAS_OUTPUT_BUFFER(crp) \ + ((crp)->crp_obuf.cb_type != CRYPTO_BUF_NONE) /* Flags in crp_op. */ #define CRYPTO_OP_DECRYPT 0x0 @@ -559,26 +654,11 @@ void hmac_init_opad(struct auth_hash *axf, const char *key, int klen, /* * Crypto-related utility routines used mainly by drivers. * - * XXX these don't really belong here; but for now they're - * kept apart from the rest of the system. - * * Similar to m_copyback/data, *_copyback copy data from the 'src' * buffer into the crypto request's data buffer while *_copydata copy * data from the crypto request's data buffer into the the 'dst' * buffer. */ -struct uio; -extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp); -extern void cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp); -extern int cuio_getptr(struct uio *uio, int loc, int *off); -extern int cuio_apply(struct uio *uio, int off, int len, - int (*f)(void *, void *, u_int), void *arg); - -struct mbuf; -struct iovec; -extern int crypto_mbuftoiov(struct mbuf *mbuf, struct iovec **iovptr, - int *cnt, int *allocated); - void crypto_copyback(struct cryptop *crp, int off, int size, const void *src); void crypto_copydata(struct cryptop *crp, int off, int size, void *dst); @@ -587,6 +667,23 @@ int crypto_apply(struct cryptop *crp, int off, int len, void *crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len); +int crypto_apply_buf(struct crypto_buffer *cb, int off, int len, + int (*f)(void *, void *, u_int), void *arg); +void *crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, + size_t skip, size_t len); +size_t crypto_buffer_len(struct crypto_buffer *cb); +void crypto_cursor_init(struct crypto_buffer_cursor *cc, + const struct crypto_buffer *cb); +void crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount); +void *crypto_cursor_segbase(struct crypto_buffer_cursor *cc); +size_t crypto_cursor_seglen(struct crypto_buffer_cursor *cc); +void crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size, + const void *vsrc); +void crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, + void *vdst); +void crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size, + void *vdst); + static __inline void crypto_read_iv(struct cryptop *crp, void *iv) { diff --git a/sys/opencrypto/cryptosoft.c b/sys/opencrypto/cryptosoft.c index 4d37cd4998e9..db2f611db597 100644 --- a/sys/opencrypto/cryptosoft.c +++ b/sys/opencrypto/cryptosoft.c @@ -105,11 +105,10 @@ swcr_encdec(struct swcr_session *ses, struct cryptop *crp) const struct crypto_session_params *csp; struct swcr_encdec *sw; struct enc_xform *exf; - int i, j, k, blks, ind, count, ivlen; - struct uio *uio, uiolcl; - struct iovec iovlcl[4]; - struct iovec *iov; - int iovcnt, iovalloc; + int i, blks, inlen, ivlen, outlen, resid; + struct crypto_buffer_cursor cc_in, cc_out; + const char *inblk; + char *outblk; int error; bool encrypting; @@ -142,32 +141,6 @@ swcr_encdec(struct swcr_session *ses, struct cryptop *crp) return (error); } - iov = iovlcl; - iovcnt = nitems(iovlcl); - iovalloc = 0; - uio = &uiolcl; - switch (crp->crp_buf_type) { - case CRYPTO_BUF_MBUF: - error = crypto_mbuftoiov(crp->crp_mbuf, &iov, &iovcnt, - &iovalloc); - if (error) - return (error); - uio->uio_iov = iov; - uio->uio_iovcnt = iovcnt; - break; - case CRYPTO_BUF_UIO: - uio = crp->crp_uio; - break; - case CRYPTO_BUF_CONTIG: - iov[0].iov_base = crp->crp_buf; - iov[0].iov_len = crp->crp_ilen; - uio->uio_iov = iov; - uio->uio_iovcnt = 1; - break; - } - - ivp = iv; - if (exf->reinit) { /* * xforms that provide a reinit method perform all IV @@ -176,164 +149,135 @@ swcr_encdec(struct swcr_session *ses, struct cryptop *crp) exf->reinit(sw->sw_kschedule, iv); } - count = crp->crp_payload_start; - ind = cuio_getptr(uio, count, &k); - if (ind == -1) { - error = EINVAL; - goto out; - } + ivp = iv; - i = crp->crp_payload_length; + crypto_cursor_init(&cc_in, &crp->crp_buf); + crypto_cursor_advance(&cc_in, crp->crp_payload_start); + inlen = crypto_cursor_seglen(&cc_in); + inblk = crypto_cursor_segbase(&cc_in); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { + crypto_cursor_init(&cc_out, &crp->crp_obuf); + crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); + } else + cc_out = cc_in; + outlen = crypto_cursor_seglen(&cc_out); + outblk = crypto_cursor_segbase(&cc_out); + + resid = crp->crp_payload_length; encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); - while (i >= blks) { + /* + * Loop through encrypting blocks. 'inlen' is the remaining + * length of the current segment in the input buffer. + * 'outlen' is the remaining length of current segment in the + * output buffer. + */ + while (resid >= blks) { /* - * If there's insufficient data at the end of - * an iovec, we have to do some copying. + * If the current block is not contained within the + * current input/output segment, use 'blk' as a local + * buffer. */ - if (uio->uio_iov[ind].iov_len < k + blks && - uio->uio_iov[ind].iov_len != k) { - cuio_copydata(uio, count, blks, blk); - - /* Actual encryption/decryption */ - if (exf->reinit) { - if (encrypting) { - exf->encrypt(sw->sw_kschedule, blk, - blk); - } else { - exf->decrypt(sw->sw_kschedule, blk, - blk); - } - } else if (encrypting) { - /* XOR with previous block */ - for (j = 0; j < blks; j++) - blk[j] ^= ivp[j]; - - exf->encrypt(sw->sw_kschedule, blk, blk); - - /* - * Keep encrypted block for XOR'ing - * with next block - */ - bcopy(blk, iv, blks); - ivp = iv; - } else { /* decrypt */ - /* - * Keep encrypted block for XOR'ing - * with next block - */ - nivp = (ivp == iv) ? iv2 : iv; - bcopy(blk, nivp, blks); - - exf->decrypt(sw->sw_kschedule, blk, blk); - - /* XOR with previous block */ - for (j = 0; j < blks; j++) - blk[j] ^= ivp[j]; - - ivp = nivp; - } - - /* Copy back decrypted block */ - cuio_copyback(uio, count, blks, blk); - - count += blks; - - /* Advance pointer */ - ind = cuio_getptr(uio, count, &k); - if (ind == -1) { - error = EINVAL; - goto out; - } - - i -= blks; - - /* Could be done... */ - if (i == 0) - break; - } - - while (uio->uio_iov[ind].iov_len >= k + blks && i >= blks) { - uint8_t *idat; - - idat = (uint8_t *)uio->uio_iov[ind].iov_base + k; - - if (exf->reinit) { - if (encrypting) - exf->encrypt(sw->sw_kschedule, - idat, idat); - else - exf->decrypt(sw->sw_kschedule, - idat, idat); - } else if (encrypting) { - /* XOR with previous block/IV */ - for (j = 0; j < blks; j++) - idat[j] ^= ivp[j]; - - exf->encrypt(sw->sw_kschedule, idat, idat); - ivp = idat; - } else { /* decrypt */ - /* - * Keep encrypted block to be used - * in next block's processing. - */ - nivp = (ivp == iv) ? iv2 : iv; - bcopy(idat, nivp, blks); - - exf->decrypt(sw->sw_kschedule, idat, idat); - - /* XOR with previous block/IV */ - for (j = 0; j < blks; j++) - idat[j] ^= ivp[j]; - - ivp = nivp; - } - - count += blks; - k += blks; - i -= blks; + if (inlen < blks) { + crypto_cursor_copydata(&cc_in, blks, blk); + inblk = blk; } + if (outlen < blks) + outblk = blk; /* - * Advance to the next iov if the end of the current iov - * is aligned with the end of a cipher block. - * Note that the code is equivalent to calling: - * ind = cuio_getptr(uio, count, &k); + * Ciphers without a 'reinit' hook are assumed to be + * used in CBC mode where the chaining is done here. */ - if (i > 0 && k == uio->uio_iov[ind].iov_len) { - k = 0; - ind++; - if (ind >= uio->uio_iovcnt) { - error = EINVAL; - goto out; - } + if (exf->reinit != NULL) { + if (encrypting) + exf->encrypt(sw->sw_kschedule, inblk, outblk); + else + exf->decrypt(sw->sw_kschedule, inblk, outblk); + } else if (encrypting) { + /* XOR with previous block */ + for (i = 0; i < blks; i++) + outblk[i] = inblk[i] ^ ivp[i]; + + exf->encrypt(sw->sw_kschedule, outblk, outblk); + + /* + * Keep encrypted block for XOR'ing + * with next block + */ + memcpy(iv, outblk, blks); + ivp = iv; + } else { /* decrypt */ + /* + * Keep encrypted block for XOR'ing + * with next block + */ + nivp = (ivp == iv) ? iv2 : iv; + memcpy(nivp, inblk, blks); + + exf->decrypt(sw->sw_kschedule, inblk, outblk); + + /* XOR with previous block */ + for (i = 0; i < blks; i++) + outblk[i] ^= ivp[i]; + + ivp = nivp; } + + if (inlen < blks) { + inlen = crypto_cursor_seglen(&cc_in); + inblk = crypto_cursor_segbase(&cc_in); + } else { + crypto_cursor_advance(&cc_in, blks); + inlen -= blks; + inblk += blks; + } + + if (outlen < blks) { + crypto_cursor_copyback(&cc_out, blks, blk); + outlen = crypto_cursor_seglen(&cc_out); + outblk = crypto_cursor_segbase(&cc_out); + } else { + crypto_cursor_advance(&cc_out, blks); + outlen -= blks; + outblk += blks; + } + + resid -= blks; } /* Handle trailing partial block for stream ciphers. */ - if (i > 0) { + if (resid > 0) { KASSERT(exf->native_blocksize != 0, ("%s: partial block of %d bytes for cipher %s", __func__, i, exf->name)); KASSERT(exf->reinit != NULL, ("%s: partial block cipher %s without reinit hook", __func__, exf->name)); - KASSERT(i < blks, ("%s: partial block too big", __func__)); + KASSERT(resid < blks, ("%s: partial block too big", __func__)); - cuio_copydata(uio, count, i, blk); - if (encrypting) { - exf->encrypt_last(sw->sw_kschedule, blk, blk, i); - } else { - exf->decrypt_last(sw->sw_kschedule, blk, blk, i); - } - cuio_copyback(uio, count, i, blk); + inlen = crypto_cursor_seglen(&cc_in); + outlen = crypto_cursor_seglen(&cc_out); + if (inlen < resid) { + crypto_cursor_copydata(&cc_in, resid, blk); + inblk = blk; + } else + inblk = crypto_cursor_segbase(&cc_in); + if (outlen < resid) + outblk = blk; + else + outblk = crypto_cursor_segbase(&cc_out); + if (encrypting) + exf->encrypt_last(sw->sw_kschedule, inblk, outblk, + resid); + else + exf->decrypt_last(sw->sw_kschedule, inblk, outblk, + resid); + if (outlen < resid) + crypto_cursor_copyback(&cc_out, resid, blk); } -out: - if (iovalloc) - free(iov, M_CRYPTO_DATA); - - return (error); + return (0); } static void @@ -394,8 +338,15 @@ swcr_authcompute(struct swcr_session *ses, struct cryptop *crp) if (err) return err; - err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, - (int (*)(void *, void *, unsigned int))axf->Update, &ctx); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && + CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) + err = crypto_apply_buf(&crp->crp_obuf, + crp->crp_payload_output_start, crp->crp_payload_length, + (int (*)(void *, void *, unsigned int))axf->Update, &ctx); + else + err = crypto_apply(crp, crp->crp_payload_start, + crp->crp_payload_length, + (int (*)(void *, void *, unsigned int))axf->Update, &ctx); if (err) return err; @@ -453,11 +404,12 @@ swcr_gmac(struct swcr_session *ses, struct cryptop *crp) u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; + struct crypto_buffer_cursor cc; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; uint32_t *blkp; - int blksz, i, ivlen, len; + int blksz, ivlen, len, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; @@ -470,9 +422,11 @@ swcr_gmac(struct swcr_session *ses, struct cryptop *crp) crypto_read_iv(crp, iv); axf->Reinit(&ctx, iv, ivlen); - for (i = 0; i < crp->crp_payload_length; i += blksz) { - len = MIN(crp->crp_payload_length - i, blksz); - crypto_copydata(crp, crp->crp_payload_start + i, len, blk); + crypto_cursor_init(&cc, &crp->crp_buf); + crypto_cursor_advance(&cc, crp->crp_payload_start); + for (resid = crp->crp_payload_length; resid > 0; resid -= len) { + len = MIN(resid, blksz); + crypto_cursor_copydata(&cc, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } @@ -506,13 +460,14 @@ swcr_gcm(struct swcr_session *ses, struct cryptop *crp) u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; + struct crypto_buffer_cursor cc_in, cc_out; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; uint32_t *blkp; - int blksz, i, ivlen, len, r; + int blksz, ivlen, len, r, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; @@ -536,9 +491,11 @@ swcr_gcm(struct swcr_session *ses, struct cryptop *crp) axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ - for (i = 0; i < crp->crp_aad_length; i += blksz) { - len = MIN(crp->crp_aad_length - i, blksz); - crypto_copydata(crp, crp->crp_aad_start + i, len, blk); + crypto_cursor_init(&cc_in, &crp->crp_buf); + crypto_cursor_advance(&cc_in, crp->crp_aad_start); + for (resid = crp->crp_aad_length; resid > 0; resid -= len) { + len = MIN(resid, blksz); + crypto_cursor_copydata(&cc_in, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } @@ -546,16 +503,22 @@ swcr_gcm(struct swcr_session *ses, struct cryptop *crp) exf->reinit(swe->sw_kschedule, iv); /* Do encryption with MAC */ - for (i = 0; i < crp->crp_payload_length; i += len) { - len = MIN(crp->crp_payload_length - i, blksz); + crypto_cursor_init(&cc_in, &crp->crp_buf); + crypto_cursor_advance(&cc_in, crp->crp_payload_start); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { + crypto_cursor_init(&cc_out, &crp->crp_obuf); + crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); + } else + cc_out = cc_in; + for (resid = crp->crp_payload_length; resid > 0; resid -= len) { + len = MIN(resid, blksz); if (len < blksz) bzero(blk, blksz); - crypto_copydata(crp, crp->crp_payload_start + i, len, blk); + crypto_cursor_copydata(&cc_in, len, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { exf->encrypt(swe->sw_kschedule, blk, blk); axf->Update(&ctx, blk, len); - crypto_copyback(crp, crp->crp_payload_start + i, len, - blk); + crypto_cursor_copyback(&cc_out, len, blk); } else { axf->Update(&ctx, blk, len); } @@ -582,15 +545,16 @@ swcr_gcm(struct swcr_session *ses, struct cryptop *crp) return (EBADMSG); /* tag matches, decrypt data */ - for (i = 0; i < crp->crp_payload_length; i += blksz) { - len = MIN(crp->crp_payload_length - i, blksz); + crypto_cursor_init(&cc_in, &crp->crp_buf); + crypto_cursor_advance(&cc_in, crp->crp_payload_start); + for (resid = crp->crp_payload_length; resid > 0; + resid -= len) { + len = MIN(resid, blksz); if (len < blksz) bzero(blk, blksz); - crypto_copydata(crp, crp->crp_payload_start + i, len, - blk); + crypto_cursor_copydata(&cc_in, len, blk); exf->decrypt(swe->sw_kschedule, blk, blk); - crypto_copyback(crp, crp->crp_payload_start + i, len, - blk); + crypto_cursor_copyback(&cc_out, len, blk); } } else { /* Inject the authentication data */ @@ -609,10 +573,11 @@ swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp) u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; + struct crypto_buffer_cursor cc; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; - int blksz, i, ivlen, len; + int blksz, ivlen, len, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; @@ -632,9 +597,11 @@ swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp) ctx.aes_cbc_mac_ctx.cryptDataLength = 0; axf->Reinit(&ctx, iv, ivlen); - for (i = 0; i < crp->crp_payload_length; i += blksz) { - len = MIN(crp->crp_payload_length - i, blksz); - crypto_copydata(crp, crp->crp_payload_start + i, len, blk); + crypto_cursor_init(&cc, &crp->crp_buf); + crypto_cursor_advance(&cc, crp->crp_aad_start); + for (resid = crp->crp_payload_length; resid > 0; resid -= len) { + len = MIN(resid, blksz); + crypto_cursor_copydata(&cc, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } @@ -662,12 +629,13 @@ swcr_ccm(struct swcr_session *ses, struct cryptop *crp) u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; + struct crypto_buffer_cursor cc_in, cc_out; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; - int blksz, i, ivlen, len, r; + int blksz, ivlen, len, r, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; @@ -698,9 +666,11 @@ swcr_ccm(struct swcr_session *ses, struct cryptop *crp) axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ - for (i = 0; i < crp->crp_aad_length; i += blksz) { - len = MIN(crp->crp_aad_length - i, blksz); - crypto_copydata(crp, crp->crp_aad_start + i, len, blk); + crypto_cursor_init(&cc_in, &crp->crp_buf); + crypto_cursor_advance(&cc_in, crp->crp_aad_start); + for (resid = crp->crp_aad_length; resid > 0; resid -= len) { + len = MIN(resid, blksz); + crypto_cursor_copydata(&cc_in, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } @@ -708,16 +678,22 @@ swcr_ccm(struct swcr_session *ses, struct cryptop *crp) exf->reinit(swe->sw_kschedule, iv); /* Do encryption/decryption with MAC */ - for (i = 0; i < crp->crp_payload_length; i += len) { - len = MIN(crp->crp_payload_length - i, blksz); + crypto_cursor_init(&cc_in, &crp->crp_buf); + crypto_cursor_advance(&cc_in, crp->crp_payload_start); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { + crypto_cursor_init(&cc_out, &crp->crp_obuf); + crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); + } else + cc_out = cc_in; + for (resid = crp->crp_payload_length; resid > 0; resid -= len) { + len = MIN(resid, blksz); if (len < blksz) bzero(blk, blksz); - crypto_copydata(crp, crp->crp_payload_start + i, len, blk); + crypto_cursor_copydata(&cc_in, len, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { axf->Update(&ctx, blk, len); exf->encrypt(swe->sw_kschedule, blk, blk); - crypto_copyback(crp, crp->crp_payload_start + i, len, - blk); + crypto_cursor_copyback(&cc_out, len, blk); } else { /* * One of the problems with CCM+CBC is that @@ -746,15 +722,16 @@ swcr_ccm(struct swcr_session *ses, struct cryptop *crp) /* tag matches, decrypt data */ exf->reinit(swe->sw_kschedule, iv); - for (i = 0; i < crp->crp_payload_length; i += blksz) { - len = MIN(crp->crp_payload_length - i, blksz); + crypto_cursor_init(&cc_in, &crp->crp_buf); + crypto_cursor_advance(&cc_in, crp->crp_payload_start); + for (resid = crp->crp_payload_length; resid > 0; + resid -= len) { + len = MIN(resid, blksz); if (len < blksz) bzero(blk, blksz); - crypto_copydata(crp, crp->crp_payload_start + i, len, - blk); + crypto_cursor_copydata(&cc_in, len, blk); exf->decrypt(swe->sw_kschedule, blk, blk); - crypto_copyback(crp, crp->crp_payload_start + i, len, - blk); + crypto_cursor_copyback(&cc_out, len, blk); } } else { /* Inject the authentication data */ @@ -833,13 +810,13 @@ swcr_compdec(struct swcr_session *ses, struct cryptop *crp) */ crypto_copyback(crp, crp->crp_payload_start, result, out); if (result < crp->crp_payload_length) { - switch (crp->crp_buf_type) { + switch (crp->crp_buf.cb_type) { case CRYPTO_BUF_MBUF: adj = result - crp->crp_payload_length; - m_adj(crp->crp_mbuf, adj); + m_adj(crp->crp_buf.cb_mbuf, adj); break; case CRYPTO_BUF_UIO: { - struct uio *uio = crp->crp_uio; + struct uio *uio = crp->crp_buf.cb_uio; int ind; adj = crp->crp_payload_length - result; @@ -858,6 +835,8 @@ swcr_compdec(struct swcr_session *ses, struct cryptop *crp) } } break; + default: + break; } } free(out, M_CRYPTO_DATA); @@ -1134,7 +1113,7 @@ static int swcr_probesession(device_t dev, const struct crypto_session_params *csp) { - if (csp->csp_flags != 0) + if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT)) != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: diff --git a/sys/opencrypto/ktls_ocf.c b/sys/opencrypto/ktls_ocf.c index b607f2eead3d..435b68129d82 100644 --- a/sys/opencrypto/ktls_ocf.c +++ b/sys/opencrypto/ktls_ocf.c @@ -155,18 +155,16 @@ ktls_ocf_tls12_gcm_encrypt(struct ktls_session *tls, crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; - crp->crp_buf_type = CRYPTO_BUF_UIO; - crp->crp_uio = &uio; - crp->crp_ilen = uio.uio_resid; + crypto_use_uio(crp, &uio); crp->crp_opaque = oo; crp->crp_callback = ktls_ocf_callback; crp->crp_aad_start = 0; crp->crp_aad_length = sizeof(ad); crp->crp_payload_start = sizeof(ad); - crp->crp_payload_length = crp->crp_ilen - + crp->crp_payload_length = uio.uio_resid - (sizeof(ad) + AES_GMAC_HASH_LEN); - crp->crp_digest_start = crp->crp_ilen - AES_GMAC_HASH_LEN; + crp->crp_digest_start = uio.uio_resid - AES_GMAC_HASH_LEN; counter_u64_add(ocf_tls12_gcm_crypts, 1); for (;;) { @@ -256,18 +254,16 @@ ktls_ocf_tls13_gcm_encrypt(struct ktls_session *tls, crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; - crp->crp_buf_type = CRYPTO_BUF_UIO; - crp->crp_uio = &uio; - crp->crp_ilen = uio.uio_resid; + crypto_use_uio(crp, &uio); crp->crp_opaque = oo; crp->crp_callback = ktls_ocf_callback; crp->crp_aad_start = 0; crp->crp_aad_length = sizeof(ad); crp->crp_payload_start = sizeof(ad); - crp->crp_payload_length = crp->crp_ilen - + crp->crp_payload_length = uio.uio_resid - (sizeof(ad) + AES_GMAC_HASH_LEN); - crp->crp_digest_start = crp->crp_ilen - AES_GMAC_HASH_LEN; + crp->crp_digest_start = uio.uio_resid - AES_GMAC_HASH_LEN; memcpy(crp->crp_iv, nonce, sizeof(nonce)); counter_u64_add(ocf_tls13_gcm_crypts, 1); diff --git a/sys/sys/bus_dma.h b/sys/sys/bus_dma.h index 2c14251571cf..4b7396e6def1 100644 --- a/sys/sys/bus_dma.h +++ b/sys/sys/bus_dma.h @@ -111,6 +111,7 @@ /* Forwards needed by prototypes below. */ union ccb; struct bio; +struct crypto_buffer; struct cryptop; struct mbuf; struct memdesc; @@ -270,6 +271,10 @@ int bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, int bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, bus_dmamap_callback_t *callback, void *callback_arg, int flags); +int bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, + struct crypto_buffer *cb, + bus_dmamap_callback_t *callback, + void *callback_arg, int flags); /* * Loads any memory descriptor.