Notable upstream pull request merges:
 #16209 --multi-- icp: rip out everything we don't use
 #16230 20c8bdd85 FreeBSD: Update use of UMA-related symbols in
                  arc_available_memory
 #16242 121a2d335 FreeBSD: unregister mountroot eventhandler on unload
 #16258 5de3ac223 vdev_open: clear async fault flag after reopen
 #16270 436731276 zvol: Fix suspend lock leaks
 #16273 c87cb22ba head_errlog: fix use-after-free
 #16284 f72e081fb FreeBSD: Use a statement expression to implement
                  SET_ERROR()
 #16300 a10faf5ce FreeBSD: Use the new freeuio() helper to free dynamically
                  allocated UIOs
 #16302 a7fc4c85e zstd: don't call zstd_mempool_reap if there are no buffers
 #16334 dc91e7452 zdb: dump ZAP_FLAG_UINT64_KEY ZAPs properly

Obtained from:	OpenZFS
OpenZFS commit: 1147a27978
This commit is contained in:
Martin Matuska 2024-07-18 09:44:47 +02:00
commit 75e1fea68a
70 changed files with 704 additions and 3329 deletions

View file

@ -68,13 +68,10 @@ KERNEL_C = \
algs/blake3/blake3_impl.c \
algs/edonr/edonr.c \
algs/modes/modes.c \
algs/modes/cbc.c \
algs/modes/gcm_generic.c \
algs/modes/gcm_pclmulqdq.c \
algs/modes/gcm.c \
algs/modes/ctr.c \
algs/modes/ccm.c \
algs/modes/ecb.c \
algs/sha2/sha2_generic.c \
algs/sha2/sha256_impl.c \
algs/sha2/sha512_impl.c \
@ -84,7 +81,6 @@ KERNEL_C = \
illumos-crypto.c \
io/aes.c \
io/sha2_mod.c \
io/skein_mod.c \
core/kcf_sched.c \
core/kcf_prov_lib.c \
core/kcf_callprov.c \

View file

@ -67,13 +67,10 @@ KERNEL_C = \
algs/blake3/blake3_impl.c \
algs/edonr/edonr.c \
algs/modes/modes.c \
algs/modes/cbc.c \
algs/modes/gcm_generic.c \
algs/modes/gcm_pclmulqdq.c \
algs/modes/gcm.c \
algs/modes/ctr.c \
algs/modes/ccm.c \
algs/modes/ecb.c \
algs/sha2/sha2_generic.c \
algs/sha2/sha256_impl.c \
algs/sha2/sha512_impl.c \
@ -81,7 +78,6 @@ KERNEL_C = \
illumos-crypto.c \
io/aes.c \
io/sha2_mod.c \
io/skein_mod.c \
core/kcf_sched.c \
core/kcf_prov_lib.c \
core/kcf_callprov.c \

View file

@ -6,5 +6,5 @@ Release: 1
Release-Tags: relext
License: CDDL
Author: OpenZFS
Linux-Maximum: 6.8
Linux-Maximum: 6.9
Linux-Minimum: 3.10

View file

@ -48,6 +48,7 @@
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/zap_impl.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_sa.h>
@ -89,6 +90,7 @@
#include <libnvpair.h>
#include <libzutil.h>
#include <libzfs_core.h>
#include <libzdb.h>
@ -1125,16 +1127,33 @@ dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
boolean_t key64 =
!!(zap_getflags(zc.zc_zap) & ZAP_FLAG_UINT64_KEY);
if (key64)
(void) printf("\t\t0x%010lx = ",
*(uint64_t *)attr.za_name);
else
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
prop = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
(void) zap_lookup(os, object, attr.za_name,
attr.za_integer_length, attr.za_num_integers, prop);
if (attr.za_integer_length == 1) {
if (key64)
(void) zap_lookup_uint64(os, object,
(const uint64_t *)attr.za_name, 1,
attr.za_integer_length, attr.za_num_integers,
prop);
else
(void) zap_lookup(os, object, attr.za_name,
attr.za_integer_length, attr.za_num_integers,
prop);
if (attr.za_integer_length == 1 && !key64) {
if (strcmp(attr.za_name,
DSL_CRYPTO_KEY_MASTER_KEY) == 0 ||
strcmp(attr.za_name,
@ -1153,6 +1172,10 @@ dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
} else {
for (i = 0; i < attr.za_num_integers; i++) {
switch (attr.za_integer_length) {
case 1:
(void) printf("%u ",
((uint8_t *)prop)[i]);
break;
case 2:
(void) printf("%u ",
((uint16_t *)prop)[i]);
@ -8924,6 +8947,9 @@ main(int argc, char **argv)
boolean_t target_is_spa = B_TRUE, dataset_lookup = B_FALSE;
nvlist_t *cfg = NULL;
struct sigaction action;
boolean_t force_import = B_FALSE;
boolean_t config_path_console = B_FALSE;
char pbuf[MAXPATHLEN];
dprintf_setup(&argc, argv);
@ -9094,6 +9120,7 @@ main(int argc, char **argv)
}
break;
case 'U':
config_path_console = B_TRUE;
spa_config_path = optarg;
if (spa_config_path[0] != '/') {
(void) fprintf(stderr,
@ -9153,9 +9180,6 @@ main(int argc, char **argv)
*/
spa_mode_readable_spacemaps = B_TRUE;
kernel_init(SPA_MODE_READ);
kernel_init_done = B_TRUE;
if (dump_all)
verbose = MAX(verbose, 1);
@ -9174,6 +9198,53 @@ main(int argc, char **argv)
if (argc < 2 && dump_opt['R'])
usage();
target = argv[0];
/*
* Automate cachefile
*/
if (!spa_config_path_env && !config_path_console && target &&
libzfs_core_init() == 0) {
char *pname = strdup(target);
const char *value;
nvlist_t *pnvl = NULL;
nvlist_t *vnvl = NULL;
if (strpbrk(pname, "/@") != NULL)
*strpbrk(pname, "/@") = '\0';
if (pname && lzc_get_props(pname, &pnvl) == 0) {
if (nvlist_lookup_nvlist(pnvl, "cachefile",
&vnvl) == 0) {
value = fnvlist_lookup_string(vnvl,
ZPROP_VALUE);
} else {
value = "-";
}
strlcpy(pbuf, value, sizeof (pbuf));
if (pbuf[0] != '\0') {
if (pbuf[0] == '/') {
if (access(pbuf, F_OK) == 0)
spa_config_path = pbuf;
else
force_import = B_TRUE;
} else if ((strcmp(pbuf, "-") == 0 &&
access(ZPOOL_CACHE, F_OK) != 0) ||
strcmp(pbuf, "none") == 0) {
force_import = B_TRUE;
}
}
nvlist_free(vnvl);
}
free(pname);
nvlist_free(pnvl);
libzfs_core_fini();
}
kernel_init(SPA_MODE_READ);
kernel_init_done = B_TRUE;
if (dump_opt['E']) {
if (argc != 1)
usage();
@ -9210,7 +9281,6 @@ main(int argc, char **argv)
fatal("internal error: %s", strerror(ENOMEM));
error = 0;
target = argv[0];
if (strpbrk(target, "/@") != NULL) {
size_t targetlen;
@ -9256,9 +9326,17 @@ main(int argc, char **argv)
target_pool = target;
}
if (dump_opt['e']) {
if (dump_opt['e'] || force_import) {
importargs_t args = { 0 };
/*
* If path is not provided, search in /dev
*/
if (searchdirs == NULL) {
searchdirs = umem_alloc(sizeof (char *), UMEM_NOFAIL);
searchdirs[nsearch++] = (char *)ZFS_DEVDIR;
}
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_TRUE;

View file

@ -332,7 +332,7 @@ AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS], [
ZFS_LINUX_TEST_RESULT([blk_queue_max_hw_sectors], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([blk_queue_max_hw_sectors])
AC_MSG_RESULT(no)
])
])
@ -355,7 +355,7 @@ AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS], [
ZFS_LINUX_TEST_RESULT([blk_queue_max_segments], [
AC_MSG_RESULT(yes)
], [
ZFS_LINUX_TEST_ERROR([blk_queue_max_segments])
AC_MSG_RESULT(no)
])
])

View file

@ -534,6 +534,30 @@ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_WHOLE], [
])
])
dnl #
dnl # 5.16 API change
dnl # Added bdev_nr_bytes() helper.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_NR_BYTES], [
ZFS_LINUX_TEST_SRC([bdev_nr_bytes], [
#include <linux/blkdev.h>
],[
struct block_device *bdev = NULL;
loff_t nr_bytes __attribute__ ((unused)) = 0;
nr_bytes = bdev_nr_bytes(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_NR_BYTES], [
AC_MSG_CHECKING([whether bdev_nr_bytes() is available])
ZFS_LINUX_TEST_RESULT([bdev_nr_bytes], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BDEV_NR_BYTES, 1, [bdev_nr_bytes() is available])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 5.20 API change,
dnl # Removed bdevname(), snprintf(.., %pg) should be used.
@ -747,6 +771,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV], [
ZFS_AC_KERNEL_SRC_BLKDEV_CHECK_DISK_CHANGE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_WHOLE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_NR_BYTES
ZFS_AC_KERNEL_SRC_BLKDEV_BDEVNAME
ZFS_AC_KERNEL_SRC_BLKDEV_ISSUE_DISCARD
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_KOBJ
@ -767,6 +792,7 @@ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV], [
ZFS_AC_KERNEL_BLKDEV_CHECK_DISK_CHANGE
ZFS_AC_KERNEL_BLKDEV_BDEV_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_BLKDEV_BDEV_WHOLE
ZFS_AC_KERNEL_BLKDEV_BDEV_NR_BYTES
ZFS_AC_KERNEL_BLKDEV_BDEVNAME
ZFS_AC_KERNEL_BLKDEV_GET_ERESTARTSYS
ZFS_AC_KERNEL_BLKDEV_ISSUE_DISCARD

View file

@ -0,0 +1,23 @@
dnl #
dnl # 5.11 API change
dnl # kmap_atomic() was deprecated in favor of kmap_local_page()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_KMAP_LOCAL_PAGE], [
ZFS_LINUX_TEST_SRC([kmap_local_page], [
#include <linux/highmem.h>
],[
struct page page;
kmap_local_page(&page);
])
])
AC_DEFUN([ZFS_AC_KERNEL_KMAP_LOCAL_PAGE], [
AC_MSG_CHECKING([whether kmap_local_page exists])
ZFS_LINUX_TEST_RESULT([kmap_local_page], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_KMAP_LOCAL_PAGE, 1,
[kernel has kmap_local_page])
],[
AC_MSG_RESULT(no)
])
])

View file

@ -125,6 +125,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_VFS_DEDUPE_FILE_RANGE
ZFS_AC_KERNEL_SRC_VFS_FILE_OPERATIONS_EXTEND
ZFS_AC_KERNEL_SRC_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_SRC_KMAP_LOCAL_PAGE
ZFS_AC_KERNEL_SRC_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN
ZFS_AC_KERNEL_SRC_GENERIC_IO_ACCT
@ -276,6 +277,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_VFS_DEDUPE_FILE_RANGE
ZFS_AC_KERNEL_VFS_FILE_OPERATIONS_EXTEND
ZFS_AC_KERNEL_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_KMAP_LOCAL_PAGE
ZFS_AC_KERNEL_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_MAKE_REQUEST_FN
ZFS_AC_KERNEL_GENERIC_IO_ACCT

View file

@ -516,6 +516,8 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
VENDOR=alpine ;
elif test -f /etc/arch-release ; then
VENDOR=arch ;
elif test -f /etc/artix-release ; then
VENDOR=artix ;
elif test -f /etc/fedora-release ; then
VENDOR=fedora ;
elif test -f /bin/freebsd-version ; then
@ -551,7 +553,7 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
AC_MSG_CHECKING([default package type])
case "$VENDOR" in
alpine|arch|gentoo|lunar|slackware)
alpine|arch|artix|gentoo|lunar|slackware)
DEFAULT_PACKAGE=tgz ;;
debian|ubuntu)
DEFAULT_PACKAGE=deb ;;
@ -576,6 +578,8 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
case "$VENDOR" in
alpine|gentoo) DEFAULT_INIT_SHELL=/sbin/openrc-run
IS_SYSV_RC=false ;;
artix) DEFAULT_INIT_SHELL=/usr/bin/openrc-run
IS_SYSV_RC=false ;;
*) DEFAULT_INIT_SHELL=/bin/sh
IS_SYSV_RC=true ;;
esac
@ -594,7 +598,7 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
AC_MSG_CHECKING([default init config directory])
case "$VENDOR" in
alpine|gentoo)
alpine|artix|gentoo)
initconfdir=/etc/conf.d
;;
fedora|openeuler|redhat|sles|toss)
@ -623,7 +627,7 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
AC_MSG_CHECKING([default bash completion directory])
case "$VENDOR" in
alpine|debian|gentoo|ubuntu)
alpine|artix|debian|gentoo|ubuntu)
bashcompletiondir=/usr/share/bash-completion/completions
;;
freebsd)

View file

@ -155,7 +155,7 @@ __zfs_list_volumes()
__zfs_argument_chosen()
{
local word property
for word in $(seq $((COMP_CWORD-1)) -1 2)
for word in $(seq $((COMP_CWORD-1)) -1 2 2>/dev/null)
do
local prev="${COMP_WORDS[$word]}"
if [[ ${COMP_WORDS[$word-1]} != -[tos] ]]

View file

@ -77,6 +77,7 @@ _LIBZFS_CORE_H int lzc_snaprange_space(const char *, const char *, uint64_t *);
_LIBZFS_CORE_H int lzc_hold(nvlist_t *, int, nvlist_t **);
_LIBZFS_CORE_H int lzc_release(nvlist_t *, nvlist_t **);
_LIBZFS_CORE_H int lzc_get_holds(const char *, nvlist_t **);
_LIBZFS_CORE_H int lzc_get_props(const char *, nvlist_t **);
enum lzc_send_flags {
LZC_SEND_FLAG_EMBED_DATA = 1 << 0,

View file

@ -34,7 +34,7 @@
/* BEGIN CSTYLED */
SDT_PROBE_DECLARE(sdt, , , set__error);
#define SET_ERROR(err) ({ \
#define SET_ERROR(err) ({ \
SDT_PROBE1(sdt, , , set__error, (uintptr_t)err); \
err; \
})

View file

@ -29,9 +29,15 @@
#include <linux/highmem.h>
#include <linux/uaccess.h>
#ifdef HAVE_KMAP_LOCAL_PAGE
/* 5.11 API change */
#define zfs_kmap_local(page) kmap_local_page(page)
#define zfs_kunmap_local(addr) kunmap_local(addr)
#else
/* 2.6.37 API change */
#define zfs_kmap_atomic(page) kmap_atomic(page)
#define zfs_kunmap_atomic(addr) kunmap_atomic(addr)
#define zfs_kmap_local(page) kmap_atomic(page)
#define zfs_kunmap_local(addr) kunmap_atomic(addr)
#endif
/* 5.0 API change - no more 'type' argument for access_ok() */
#ifdef HAVE_ACCESS_OK_TYPE

View file

@ -192,22 +192,25 @@ extern void spl_kmem_reap(void);
extern uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache);
extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache);
#ifndef SPL_KMEM_CACHE_IMPLEMENTING
/*
* Macros for the kmem_cache_* API expected by ZFS and SPL clients. We don't
* define them inside spl-kmem-cache.c, as that uses the kernel's incompatible
* kmem_cache_* facilities to implement ours.
*/
/* Avoid conflicts with kernel names that might be implemented as macros. */
#undef kmem_cache_alloc
#define kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) \
spl_kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl)
#define kmem_cache_set_move(skc, move) spl_kmem_cache_set_move(skc, move)
#define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
/*
* This is necessary to be compatible with other kernel modules
* or in-tree filesystem that may define kmem_cache_alloc,
* like bcachefs does it now.
*/
#ifdef kmem_cache_alloc
#undef kmem_cache_alloc
#endif
#define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
#define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
#define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
#define kmem_reap() spl_kmem_reap()
#endif
/*
* The following functions are only available for internal use.

View file

@ -80,7 +80,7 @@
snprintf(__get_str(msg), TRACE_DBUF_MSG_MAX, \
DBUF_TP_PRINTK_FMT, DBUF_TP_PRINTK_ARGS); \
} else { \
__assign_str(os_spa, "NULL") \
__assign_str(os_spa, "NULL"); \
__entry->ds_object = 0; \
__entry->db_object = 0; \
__entry->db_level = 0; \

View file

@ -34,6 +34,7 @@
*/
#include <sys/types.h>
#include <sys/sysmacros.h>
#ifdef __cplusplus
extern "C" {

View file

@ -51,12 +51,6 @@ typedef struct crypto_mechanism {
size_t cm_param_len; /* mech. parameter len */
} crypto_mechanism_t;
/* CK_AES_CTR_PARAMS provides parameters to the CKM_AES_CTR mechanism */
typedef struct CK_AES_CTR_PARAMS {
ulong_t ulCounterBits;
uint8_t cb[16];
} CK_AES_CTR_PARAMS;
/* CK_AES_CCM_PARAMS provides parameters to the CKM_AES_CCM mechanism */
typedef struct CK_AES_CCM_PARAMS {
ulong_t ulMACSize;
@ -77,13 +71,6 @@ typedef struct CK_AES_GCM_PARAMS {
ulong_t ulTagBits;
} CK_AES_GCM_PARAMS;
/* CK_AES_GMAC_PARAMS provides parameters to the CKM_AES_GMAC mechanism */
typedef struct CK_AES_GMAC_PARAMS {
uchar_t *pIv;
uchar_t *pAAD;
ulong_t ulAADLen;
} CK_AES_GMAC_PARAMS;
/*
* The measurement unit bit flag for a mechanism's minimum or maximum key size.
* The unit are mechanism dependent. It can be in bits or in bytes.
@ -92,23 +79,9 @@ typedef uint32_t crypto_keysize_unit_t;
/* Mechanisms supported out-of-the-box */
#define SUN_CKM_SHA256 "CKM_SHA256"
#define SUN_CKM_SHA256_HMAC "CKM_SHA256_HMAC"
#define SUN_CKM_SHA256_HMAC_GENERAL "CKM_SHA256_HMAC_GENERAL"
#define SUN_CKM_SHA384 "CKM_SHA384"
#define SUN_CKM_SHA384_HMAC "CKM_SHA384_HMAC"
#define SUN_CKM_SHA384_HMAC_GENERAL "CKM_SHA384_HMAC_GENERAL"
#define SUN_CKM_SHA512 "CKM_SHA512"
#define SUN_CKM_SHA512_HMAC "CKM_SHA512_HMAC"
#define SUN_CKM_SHA512_HMAC_GENERAL "CKM_SHA512_HMAC_GENERAL"
#define SUN_CKM_SHA512_224 "CKM_SHA512_224"
#define SUN_CKM_SHA512_256 "CKM_SHA512_256"
#define SUN_CKM_AES_CBC "CKM_AES_CBC"
#define SUN_CKM_AES_ECB "CKM_AES_ECB"
#define SUN_CKM_AES_CTR "CKM_AES_CTR"
#define SUN_CKM_AES_CCM "CKM_AES_CCM"
#define SUN_CKM_AES_GCM "CKM_AES_GCM"
#define SUN_CKM_AES_GMAC "CKM_AES_GMAC"
/* Data arguments of cryptographic operations */

View file

@ -32,9 +32,6 @@ int aes_mod_fini(void);
int sha2_mod_init(void);
int sha2_mod_fini(void);
int skein_mod_init(void);
int skein_mod_fini(void);
int icp_init(void);
void icp_fini(void);

View file

@ -86,30 +86,13 @@ typedef struct {
/* SHA2 algorithm types */
typedef enum sha2_mech_type {
SHA256_MECH_INFO_TYPE, /* SUN_CKM_SHA256 */
SHA256_HMAC_MECH_INFO_TYPE, /* SUN_CKM_SHA256_HMAC */
SHA256_HMAC_GEN_MECH_INFO_TYPE, /* SUN_CKM_SHA256_HMAC_GENERAL */
SHA384_MECH_INFO_TYPE, /* SUN_CKM_SHA384 */
SHA384_HMAC_MECH_INFO_TYPE, /* SUN_CKM_SHA384_HMAC */
SHA384_HMAC_GEN_MECH_INFO_TYPE, /* SUN_CKM_SHA384_HMAC_GENERAL */
SHA512_MECH_INFO_TYPE, /* SUN_CKM_SHA512 */
SHA512_HMAC_MECH_INFO_TYPE, /* SUN_CKM_SHA512_HMAC */
SHA512_HMAC_GEN_MECH_INFO_TYPE, /* SUN_CKM_SHA512_HMAC_GENERAL */
SHA512_224_MECH_INFO_TYPE, /* SUN_CKM_SHA512_224 */
SHA512_256_MECH_INFO_TYPE /* SUN_CKM_SHA512_256 */
} sha2_mech_type_t;
#define SHA256 0
#define SHA256_HMAC 1
#define SHA256_HMAC_GEN 2
#define SHA384 3
#define SHA384_HMAC 4
#define SHA384_HMAC_GEN 5
#define SHA512 6
#define SHA512_HMAC 7
#define SHA512_HMAC_GEN 8
#define SHA512_224 9
#define SHA512_256 10
/* Not true KCF mech types; used by direct callers to SHA2Init */
SHA256,
SHA512,
SHA512_256,
} sha2_mech_type_t;
/* SHA2 Init function */
extern void SHA2Init(int algotype, SHA2_CTX *ctx);

View file

@ -152,25 +152,16 @@ typedef struct skein_param {
/* Module definitions */
#ifdef SKEIN_MODULE_IMPL
#define CKM_SKEIN_256 "CKM_SKEIN_256"
#define CKM_SKEIN_512 "CKM_SKEIN_512"
#define CKM_SKEIN1024 "CKM_SKEIN1024"
#define CKM_SKEIN_256_MAC "CKM_SKEIN_256_MAC"
#define CKM_SKEIN_512_MAC "CKM_SKEIN_512_MAC"
#define CKM_SKEIN1024_MAC "CKM_SKEIN1024_MAC"
typedef enum skein_mech_type {
SKEIN_256_MECH_INFO_TYPE,
SKEIN_512_MECH_INFO_TYPE,
SKEIN1024_MECH_INFO_TYPE,
SKEIN_256_MAC_MECH_INFO_TYPE,
SKEIN_512_MAC_MECH_INFO_TYPE,
SKEIN1024_MAC_MECH_INFO_TYPE
} skein_mech_type_t;
#define VALID_SKEIN_DIGEST_MECH(__mech) \
((int)(__mech) >= SKEIN_256_MECH_INFO_TYPE && \
(__mech) <= SKEIN1024_MECH_INFO_TYPE)
#define VALID_SKEIN_MAC_MECH(__mech) \
((int)(__mech) >= SKEIN_256_MAC_MECH_INFO_TYPE && \
(__mech) <= SKEIN1024_MAC_MECH_INFO_TYPE)

View file

@ -35,11 +35,10 @@
#ifndef _SYS_SPA_H
#define _SYS_SPA_H
#include <sys/avl.h>
#include <sys/zfs_context.h>
#include <sys/avl.h>
#include <sys/kstat.h>
#include <sys/nvpair.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <sys/fs/zfs.h>
#include <sys/spa_checksum.h>

View file

@ -18,13 +18,10 @@ nodist_libicp_la_SOURCES = \
module/icp/algs/blake3/blake3_impl.c \
module/icp/algs/edonr/edonr.c \
module/icp/algs/modes/modes.c \
module/icp/algs/modes/cbc.c \
module/icp/algs/modes/gcm_generic.c \
module/icp/algs/modes/gcm_pclmulqdq.c \
module/icp/algs/modes/gcm.c \
module/icp/algs/modes/ctr.c \
module/icp/algs/modes/ccm.c \
module/icp/algs/modes/ecb.c \
module/icp/algs/sha2/sha2_generic.c \
module/icp/algs/sha2/sha256_impl.c \
module/icp/algs/sha2/sha512_impl.c \
@ -34,7 +31,6 @@ nodist_libicp_la_SOURCES = \
module/icp/illumos-crypto.c \
module/icp/io/aes.c \
module/icp/io/sha2_mod.c \
module/icp/io/skein_mod.c \
module/icp/core/kcf_sched.c \
module/icp/core/kcf_prov_lib.c \
module/icp/core/kcf_callprov.c \

View file

@ -4952,7 +4952,7 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
if (flags->verbose) {
(void) printf("%s %s%s stream of %s into %s\n",
flags->dryrun ? "would receive" : "receiving",
flags->heal ? " corrective" : "",
flags->heal ? "corrective " : "",
drrb->drr_fromguid ? "incremental" : "full",
drrb->drr_toname, destsnap);
(void) fflush(stdout);

View file

@ -168,6 +168,7 @@
<elf-symbol name='lzc_get_bookmarks' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_get_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_get_holds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_get_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_get_vdev_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_hold' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_initialize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -2694,6 +2695,11 @@
<parameter type-id='857bb57e' name='holdsp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_get_props' mangled-name='lzc_get_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_props'>
<parameter type-id='80f4b756' name='poolname'/>
<parameter type-id='857bb57e' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_send_wrapper' mangled-name='lzc_send_wrapper' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_wrapper'>
<parameter type-id='2e711a2a' name='func'/>
<parameter type-id='95e97e5e' name='orig_fd'/>

View file

@ -596,6 +596,12 @@ lzc_get_holds(const char *snapname, nvlist_t **holdsp)
return (lzc_ioctl(ZFS_IOC_GET_HOLDS, snapname, NULL, holdsp));
}
int
lzc_get_props(const char *poolname, nvlist_t **props)
{
return (lzc_ioctl(ZFS_IOC_POOL_GET_PROPS, poolname, NULL, props));
}
static unsigned int
max_pipe_buffer(int infd)
{

View file

@ -16,7 +16,7 @@
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\"
.Dd February 14, 2024
.Dd June 27, 2024
.Dt ZFS 4
.Os
.
@ -2113,7 +2113,7 @@ The default of
.Sy 32
was determined to be a reasonable compromise.
.
.It Sy zfs_txg_history Ns = Ns Sy 0 Pq uint
.It Sy zfs_txg_history Ns = Ns Sy 100 Pq uint
Historical statistics for this many latest TXGs will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /TXGs .
.

View file

@ -154,7 +154,7 @@ defaults to the current kernel release.
.
.It Sy bootfs.rollback Ns Op Sy = Ns Ar snapshot-name
Execute
.Nm zfs Cm snapshot Fl Rf Ar boot-dataset Ns Sy @ Ns Ar snapshot-name
.Nm zfs Cm rollback Fl Rf Ar boot-dataset Ns Sy @ Ns Ar snapshot-name
before pivoting to the real root.
.Ar snapshot-name
defaults to the current kernel release.

View file

@ -1639,7 +1639,7 @@ then only metadata are prefetched.
The default value is
.Sy all .
.Pp
Please note that the module parameter zfs_disable_prefetch=1 can
Please note that the module parameter zfs_prefetch_disable=1 can
be used to totally disable speculative prefetch, bypassing anything
this property does.
.It Sy setuid Ns = Ns Sy on Ns | Ns Sy off

View file

@ -314,7 +314,6 @@ fragmentation,
and free space histogram, as well as overall pool fragmentation and histogram.
.It Fl MM
"Special" vdevs are added to -M's normal output.
.It Fl O , -object-lookups Ns = Ns Ar dataset path
Also display information about the maximum contiguous free space and the
percentage of free space in each space map.
.It Fl MMM
@ -327,7 +326,7 @@ but force zdb to interpret the
in
.Op Ar poolname Ns Op / Ns Ar dataset Ns | Ns Ar objset-ID
as a numeric objset ID.
.It Fl O Ar dataset path
.It Fl O , -object-lookups Ns = Ns Ar dataset path
Look up the specified
.Ar path
inside of the

View file

@ -101,10 +101,7 @@ ICP_OBJS := \
algs/blake3/blake3_generic.o \
algs/blake3/blake3_impl.o \
algs/edonr/edonr.o \
algs/modes/cbc.o \
algs/modes/ccm.o \
algs/modes/ctr.o \
algs/modes/ecb.o \
algs/modes/gcm.o \
algs/modes/gcm_generic.o \
algs/modes/modes.o \
@ -125,7 +122,6 @@ ICP_OBJS := \
illumos-crypto.o \
io/aes.o \
io/sha2_mod.o \
io/skein_mod.o \
spi/kcf_spi.o
ICP_OBJS_X86_64 := \
@ -494,6 +490,7 @@ zfs-$(CONFIG_PPC64) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64))
UBSAN_SANITIZE_zap_leaf.o := n
UBSAN_SANITIZE_zap_micro.o := n
UBSAN_SANITIZE_sa.o := n
UBSAN_SANITIZE_zfs/zap_micro.o := n
# Suppress incorrect warnings from versions of objtool which are not
# aware of x86 EVEX prefix instructions used for AVX512.

View file

@ -75,25 +75,17 @@ aes_encrypt_contiguous_blocks(void *ctx, char *data, size_t length,
aes_ctx_t *aes_ctx = ctx;
int rv;
if (aes_ctx->ac_flags & CTR_MODE) {
rv = ctr_mode_contiguous_blocks(ctx, data, length, out,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
} else if (aes_ctx->ac_flags & CCM_MODE) {
if (aes_ctx->ac_flags & CCM_MODE) {
rv = ccm_mode_encrypt_contiguous_blocks(ctx, data, length,
out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
} else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
} else if (aes_ctx->ac_flags & GCM_MODE) {
rv = gcm_mode_encrypt_contiguous_blocks(ctx, data, length,
out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
} else if (aes_ctx->ac_flags & CBC_MODE) {
rv = cbc_encrypt_contiguous_blocks(ctx,
data, length, out, AES_BLOCK_LEN, aes_encrypt_block,
aes_copy_block, aes_xor_block);
} else {
rv = ecb_cipher_contiguous_blocks(ctx, data, length, out,
AES_BLOCK_LEN, aes_encrypt_block);
}
else
__builtin_unreachable();
return (rv);
}
@ -108,28 +100,15 @@ aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length,
aes_ctx_t *aes_ctx = ctx;
int rv;
if (aes_ctx->ac_flags & CTR_MODE) {
rv = ctr_mode_contiguous_blocks(ctx, data, length, out,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
if (rv == CRYPTO_DATA_LEN_RANGE)
rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
} else if (aes_ctx->ac_flags & CCM_MODE) {
if (aes_ctx->ac_flags & CCM_MODE) {
rv = ccm_mode_decrypt_contiguous_blocks(ctx, data, length,
out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
} else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
} else if (aes_ctx->ac_flags & GCM_MODE) {
rv = gcm_mode_decrypt_contiguous_blocks(ctx, data, length,
out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
} else if (aes_ctx->ac_flags & CBC_MODE) {
rv = cbc_decrypt_contiguous_blocks(ctx, data, length, out,
AES_BLOCK_LEN, aes_decrypt_block, aes_copy_block,
aes_xor_block);
} else {
rv = ecb_cipher_contiguous_blocks(ctx, data, length, out,
AES_BLOCK_LEN, aes_decrypt_block);
if (rv == CRYPTO_DATA_LEN_RANGE)
rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
}
} else
__builtin_unreachable();
return (rv);
}

View file

@ -1,264 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <modes/modes.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
/*
* Algorithm independent CBC functions.
*/
int
cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
crypto_data_t *out, size_t block_size,
int (*encrypt)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
size_t remainder = length;
size_t need = 0;
uint8_t *datap = (uint8_t *)data;
uint8_t *blockp;
uint8_t *lastp;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
if (length + ctx->cbc_remainder_len < block_size) {
/* accumulate bytes here and return */
memcpy((uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
datap,
length);
ctx->cbc_remainder_len += length;
ctx->cbc_copy_to = datap;
return (CRYPTO_SUCCESS);
}
lastp = (uint8_t *)ctx->cbc_iv;
crypto_init_ptrs(out, &iov_or_mp, &offset);
do {
/* Unprocessed data from last call. */
if (ctx->cbc_remainder_len > 0) {
need = block_size - ctx->cbc_remainder_len;
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
memcpy(&((uint8_t *)ctx->cbc_remainder)
[ctx->cbc_remainder_len], datap, need);
blockp = (uint8_t *)ctx->cbc_remainder;
} else {
blockp = datap;
}
/*
* XOR the previous cipher block or IV with the
* current clear block.
*/
xor_block(blockp, lastp);
encrypt(ctx->cbc_keysched, lastp, lastp);
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
/* copy block to where it belongs */
if (out_data_1_len == block_size) {
copy_block(lastp, out_data_1);
} else {
memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
memcpy(out_data_2,
lastp + out_data_1_len,
block_size - out_data_1_len);
}
}
/* update offset */
out->cd_offset += block_size;
/* Update pointer to next block of data to be processed. */
if (ctx->cbc_remainder_len != 0) {
datap += need;
ctx->cbc_remainder_len = 0;
} else {
datap += block_size;
}
remainder = (size_t)&data[length] - (size_t)datap;
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
memcpy(ctx->cbc_remainder, datap, remainder);
ctx->cbc_remainder_len = remainder;
ctx->cbc_copy_to = datap;
goto out;
}
ctx->cbc_copy_to = NULL;
} while (remainder > 0);
out:
/*
* Save the last encrypted block in the context.
*/
if (ctx->cbc_lastp != NULL) {
copy_block((uint8_t *)ctx->cbc_lastp, (uint8_t *)ctx->cbc_iv);
ctx->cbc_lastp = (uint8_t *)ctx->cbc_iv;
}
return (CRYPTO_SUCCESS);
}
#define OTHER(a, ctx) \
(((a) == (ctx)->cbc_lastblock) ? (ctx)->cbc_iv : (ctx)->cbc_lastblock)
int
cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
crypto_data_t *out, size_t block_size,
int (*decrypt)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
size_t remainder = length;
size_t need = 0;
uint8_t *datap = (uint8_t *)data;
uint8_t *blockp;
uint8_t *lastp;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
if (length + ctx->cbc_remainder_len < block_size) {
/* accumulate bytes here and return */
memcpy((uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
datap,
length);
ctx->cbc_remainder_len += length;
ctx->cbc_copy_to = datap;
return (CRYPTO_SUCCESS);
}
lastp = ctx->cbc_lastp;
crypto_init_ptrs(out, &iov_or_mp, &offset);
do {
/* Unprocessed data from last call. */
if (ctx->cbc_remainder_len > 0) {
need = block_size - ctx->cbc_remainder_len;
if (need > remainder)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
memcpy(&((uint8_t *)ctx->cbc_remainder)
[ctx->cbc_remainder_len], datap, need);
blockp = (uint8_t *)ctx->cbc_remainder;
} else {
blockp = datap;
}
/* LINTED: pointer alignment */
copy_block(blockp, (uint8_t *)OTHER((uint64_t *)lastp, ctx));
decrypt(ctx->cbc_keysched, blockp,
(uint8_t *)ctx->cbc_remainder);
blockp = (uint8_t *)ctx->cbc_remainder;
/*
* XOR the previous cipher block or IV with the
* currently decrypted block.
*/
xor_block(lastp, blockp);
/* LINTED: pointer alignment */
lastp = (uint8_t *)OTHER((uint64_t *)lastp, ctx);
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
memcpy(out_data_1, blockp, out_data_1_len);
if (out_data_2 != NULL) {
memcpy(out_data_2, blockp + out_data_1_len,
block_size - out_data_1_len);
}
/* update offset */
out->cd_offset += block_size;
/* Update pointer to next block of data to be processed. */
if (ctx->cbc_remainder_len != 0) {
datap += need;
ctx->cbc_remainder_len = 0;
} else {
datap += block_size;
}
remainder = (size_t)&data[length] - (size_t)datap;
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
memcpy(ctx->cbc_remainder, datap, remainder);
ctx->cbc_remainder_len = remainder;
ctx->cbc_lastp = lastp;
ctx->cbc_copy_to = datap;
return (CRYPTO_SUCCESS);
}
ctx->cbc_copy_to = NULL;
} while (remainder > 0);
ctx->cbc_lastp = lastp;
return (CRYPTO_SUCCESS);
}
int
cbc_init_ctx(cbc_ctx_t *cbc_ctx, char *param, size_t param_len,
size_t block_size, void (*copy_block)(uint8_t *, uint64_t *))
{
/* Copy IV into context. */
ASSERT3P(param, !=, NULL);
ASSERT3U(param_len, ==, block_size);
copy_block((uchar_t *)param, cbc_ctx->cbc_iv);
return (CRYPTO_SUCCESS);
}
void *
cbc_alloc_ctx(int kmflag)
{
cbc_ctx_t *cbc_ctx;
if ((cbc_ctx = kmem_zalloc(sizeof (cbc_ctx_t), kmflag)) == NULL)
return (NULL);
cbc_ctx->cbc_flags = CBC_MODE;
return (cbc_ctx);
}

View file

@ -1,227 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <modes/modes.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/byteorder.h>
/*
* Encrypt and decrypt multiple blocks of data in counter mode.
*/
int
ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
crypto_data_t *out, size_t block_size,
int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct),
void (*xor_block)(uint8_t *, uint8_t *))
{
size_t remainder = length;
size_t need = 0;
uint8_t *datap = (uint8_t *)data;
uint8_t *blockp;
uint8_t *lastp;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
uint64_t lower_counter, upper_counter;
if (length + ctx->ctr_remainder_len < block_size) {
/* accumulate bytes here and return */
memcpy((uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len,
datap,
length);
ctx->ctr_remainder_len += length;
ctx->ctr_copy_to = datap;
return (CRYPTO_SUCCESS);
}
crypto_init_ptrs(out, &iov_or_mp, &offset);
do {
/* Unprocessed data from last call. */
if (ctx->ctr_remainder_len > 0) {
need = block_size - ctx->ctr_remainder_len;
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
memcpy(&((uint8_t *)ctx->ctr_remainder)
[ctx->ctr_remainder_len], datap, need);
blockp = (uint8_t *)ctx->ctr_remainder;
} else {
blockp = datap;
}
/* ctr_cb is the counter block */
cipher(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb,
(uint8_t *)ctx->ctr_tmp);
lastp = (uint8_t *)ctx->ctr_tmp;
/*
* Increment Counter.
*/
lower_counter = ntohll(ctx->ctr_cb[1] & ctx->ctr_lower_mask);
lower_counter = htonll(lower_counter + 1);
lower_counter &= ctx->ctr_lower_mask;
ctx->ctr_cb[1] = (ctx->ctr_cb[1] & ~(ctx->ctr_lower_mask)) |
lower_counter;
/* wrap around */
if (lower_counter == 0) {
upper_counter =
ntohll(ctx->ctr_cb[0] & ctx->ctr_upper_mask);
upper_counter = htonll(upper_counter + 1);
upper_counter &= ctx->ctr_upper_mask;
ctx->ctr_cb[0] =
(ctx->ctr_cb[0] & ~(ctx->ctr_upper_mask)) |
upper_counter;
}
/*
* XOR encrypted counter block with the current clear block.
*/
xor_block(blockp, lastp);
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
/* copy block to where it belongs */
memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
memcpy(out_data_2, lastp + out_data_1_len,
block_size - out_data_1_len);
}
/* update offset */
out->cd_offset += block_size;
/* Update pointer to next block of data to be processed. */
if (ctx->ctr_remainder_len != 0) {
datap += need;
ctx->ctr_remainder_len = 0;
} else {
datap += block_size;
}
remainder = (size_t)&data[length] - (size_t)datap;
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
memcpy(ctx->ctr_remainder, datap, remainder);
ctx->ctr_remainder_len = remainder;
ctx->ctr_copy_to = datap;
goto out;
}
ctx->ctr_copy_to = NULL;
} while (remainder > 0);
out:
return (CRYPTO_SUCCESS);
}
int
ctr_mode_final(ctr_ctx_t *ctx, crypto_data_t *out,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
{
uint8_t *lastp;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
uint8_t *p;
int i;
if (out->cd_length < ctx->ctr_remainder_len)
return (CRYPTO_DATA_LEN_RANGE);
encrypt_block(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb,
(uint8_t *)ctx->ctr_tmp);
lastp = (uint8_t *)ctx->ctr_tmp;
p = (uint8_t *)ctx->ctr_remainder;
for (i = 0; i < ctx->ctr_remainder_len; i++) {
p[i] ^= lastp[i];
}
crypto_init_ptrs(out, &iov_or_mp, &offset);
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, ctx->ctr_remainder_len);
memcpy(out_data_1, p, out_data_1_len);
if (out_data_2 != NULL) {
memcpy(out_data_2,
(uint8_t *)p + out_data_1_len,
ctx->ctr_remainder_len - out_data_1_len);
}
out->cd_offset += ctx->ctr_remainder_len;
ctx->ctr_remainder_len = 0;
return (CRYPTO_SUCCESS);
}
int
ctr_init_ctx(ctr_ctx_t *ctr_ctx, ulong_t count, uint8_t *cb,
void (*copy_block)(uint8_t *, uint8_t *))
{
uint64_t upper_mask = 0;
uint64_t lower_mask = 0;
if (count == 0 || count > 128) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
/* upper 64 bits of the mask */
if (count >= 64) {
count -= 64;
upper_mask = (count == 64) ? UINT64_MAX : (1ULL << count) - 1;
lower_mask = UINT64_MAX;
} else {
/* now the lower 63 bits */
lower_mask = (1ULL << count) - 1;
}
ctr_ctx->ctr_lower_mask = htonll(lower_mask);
ctr_ctx->ctr_upper_mask = htonll(upper_mask);
copy_block(cb, (uchar_t *)ctr_ctx->ctr_cb);
ctr_ctx->ctr_lastp = (uint8_t *)&ctr_ctx->ctr_cb[0];
ctr_ctx->ctr_flags |= CTR_MODE;
return (CRYPTO_SUCCESS);
}
void *
ctr_alloc_ctx(int kmflag)
{
ctr_ctx_t *ctr_ctx;
if ((ctr_ctx = kmem_zalloc(sizeof (ctr_ctx_t), kmflag)) == NULL)
return (NULL);
ctr_ctx->ctr_flags = CTR_MODE;
return (ctr_ctx);
}

View file

@ -1,127 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <modes/modes.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
/*
* Algorithm independent ECB functions.
*/
int
ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
crypto_data_t *out, size_t block_size,
int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct))
{
size_t remainder = length;
size_t need = 0;
uint8_t *datap = (uint8_t *)data;
uint8_t *blockp;
uint8_t *lastp;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
if (length + ctx->ecb_remainder_len < block_size) {
/* accumulate bytes here and return */
memcpy((uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len,
datap,
length);
ctx->ecb_remainder_len += length;
ctx->ecb_copy_to = datap;
return (CRYPTO_SUCCESS);
}
lastp = (uint8_t *)ctx->ecb_iv;
crypto_init_ptrs(out, &iov_or_mp, &offset);
do {
/* Unprocessed data from last call. */
if (ctx->ecb_remainder_len > 0) {
need = block_size - ctx->ecb_remainder_len;
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
memcpy(&((uint8_t *)ctx->ecb_remainder)
[ctx->ecb_remainder_len], datap, need);
blockp = (uint8_t *)ctx->ecb_remainder;
} else {
blockp = datap;
}
cipher(ctx->ecb_keysched, blockp, lastp);
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
/* copy block to where it belongs */
memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
memcpy(out_data_2, lastp + out_data_1_len,
block_size - out_data_1_len);
}
/* update offset */
out->cd_offset += block_size;
/* Update pointer to next block of data to be processed. */
if (ctx->ecb_remainder_len != 0) {
datap += need;
ctx->ecb_remainder_len = 0;
} else {
datap += block_size;
}
remainder = (size_t)&data[length] - (size_t)datap;
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
memcpy(ctx->ecb_remainder, datap, remainder);
ctx->ecb_remainder_len = remainder;
ctx->ecb_copy_to = datap;
goto out;
}
ctx->ecb_copy_to = NULL;
} while (remainder > 0);
out:
return (CRYPTO_SUCCESS);
}
void *
ecb_alloc_ctx(int kmflag)
{
ecb_ctx_t *ecb_ctx;
if ((ecb_ctx = kmem_zalloc(sizeof (ecb_ctx_t), kmflag)) == NULL)
return (NULL);
ecb_ctx->ecb_flags = ECB_MODE;
return (ecb_ctx);
}

View file

@ -50,11 +50,6 @@
static uint32_t icp_gcm_impl = IMPL_FASTEST;
static uint32_t user_sel_impl = IMPL_FASTEST;
static inline int gcm_init_ctx_impl(boolean_t, gcm_ctx_t *, char *, size_t,
int (*)(const void *, const uint8_t *, uint8_t *),
void (*)(uint8_t *, uint8_t *),
void (*)(uint8_t *, uint8_t *));
#ifdef CAN_USE_GCM_ASM
/* Does the architecture we run on support the MOVBE instruction? */
boolean_t gcm_avx_can_use_movbe = B_FALSE;
@ -590,40 +585,11 @@ gcm_init(gcm_ctx_t *ctx, const uint8_t *iv, size_t iv_len,
return (CRYPTO_SUCCESS);
}
/*
* The following function is called at encrypt or decrypt init time
* for AES GCM mode.
*/
int
gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
return (gcm_init_ctx_impl(B_FALSE, gcm_ctx, param, block_size,
encrypt_block, copy_block, xor_block));
}
/*
* The following function is called at encrypt or decrypt init time
* for AES GMAC mode.
*/
int
gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
return (gcm_init_ctx_impl(B_TRUE, gcm_ctx, param, block_size,
encrypt_block, copy_block, xor_block));
}
/*
* Init the GCM context struct. Handle the cycle and avx implementations here.
* Initialization of a GMAC context differs slightly from a GCM context.
*/
static inline int
gcm_init_ctx_impl(boolean_t gmac_mode, gcm_ctx_t *gcm_ctx, char *param,
int
gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param,
size_t block_size, int (*encrypt_block)(const void *, const uint8_t *,
uint8_t *), void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
@ -635,22 +601,16 @@ gcm_init_ctx_impl(boolean_t gmac_mode, gcm_ctx_t *gcm_ctx, char *param,
if (param != NULL) {
gcm_param = (CK_AES_GCM_PARAMS *)(void *)param;
if (gmac_mode == B_FALSE) {
/* GCM mode. */
if ((rv = gcm_validate_args(gcm_param)) != 0) {
return (rv);
}
gcm_ctx->gcm_flags |= GCM_MODE;
size_t tbits = gcm_param->ulTagBits;
tag_len = CRYPTO_BITS2BYTES(tbits);
iv_len = gcm_param->ulIvLen;
} else {
/* GMAC mode. */
gcm_ctx->gcm_flags |= GMAC_MODE;
tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS);
iv_len = AES_GMAC_IV_LEN;
/* GCM mode. */
if ((rv = gcm_validate_args(gcm_param)) != 0) {
return (rv);
}
gcm_ctx->gcm_flags |= GCM_MODE;
size_t tbits = gcm_param->ulTagBits;
tag_len = CRYPTO_BITS2BYTES(tbits);
iv_len = gcm_param->ulIvLen;
gcm_ctx->gcm_tag_len = tag_len;
gcm_ctx->gcm_processed_data_len = 0;
@ -684,10 +644,9 @@ gcm_init_ctx_impl(boolean_t gmac_mode, gcm_ctx_t *gcm_ctx, char *param,
}
/*
* If this is a GCM context, use the MOVBE and the BSWAP
* variants alternately. GMAC contexts code paths do not
* use the MOVBE instruction.
* variants alternately.
*/
if (gcm_ctx->gcm_use_avx == B_TRUE && gmac_mode == B_FALSE &&
if (gcm_ctx->gcm_use_avx == B_TRUE &&
zfs_movbe_available() == B_TRUE) {
(void) atomic_toggle_boolean_nv(
(volatile boolean_t *)&gcm_avx_can_use_movbe);
@ -758,18 +717,6 @@ gcm_alloc_ctx(int kmflag)
return (gcm_ctx);
}
void *
gmac_alloc_ctx(int kmflag)
{
gcm_ctx_t *gcm_ctx;
if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
return (NULL);
gcm_ctx->gcm_flags = GMAC_MODE;
return (gcm_ctx);
}
/* GCM implementation that contains the fastest methods */
static gcm_impl_ops_t gcm_fastest_impl = {
.name = "fastest"

View file

@ -126,20 +126,7 @@ crypto_free_mode_ctx(void *ctx)
{
common_ctx_t *common_ctx = (common_ctx_t *)ctx;
switch (common_ctx->cc_flags &
(ECB_MODE|CBC_MODE|CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) {
case ECB_MODE:
kmem_free(common_ctx, sizeof (ecb_ctx_t));
break;
case CBC_MODE:
kmem_free(common_ctx, sizeof (cbc_ctx_t));
break;
case CTR_MODE:
kmem_free(common_ctx, sizeof (ctr_ctx_t));
break;
switch (common_ctx->cc_flags & (CCM_MODE|GCM_MODE)) {
case CCM_MODE:
if (((ccm_ctx_t *)ctx)->ccm_pt_buf != NULL)
vmem_free(((ccm_ctx_t *)ctx)->ccm_pt_buf,
@ -149,9 +136,12 @@ crypto_free_mode_ctx(void *ctx)
break;
case GCM_MODE:
case GMAC_MODE:
gcm_clear_ctx((gcm_ctx_t *)ctx);
kmem_free(ctx, sizeof (gcm_ctx_t));
break;
default:
__builtin_unreachable();
}
}

View file

@ -400,15 +400,13 @@ SHA2Init(int algotype, SHA2_CTX *ctx)
sha256_ctx *ctx256 = &ctx->sha256;
sha512_ctx *ctx512 = &ctx->sha512;
ASSERT3S(algotype, >=, SHA256_MECH_INFO_TYPE);
ASSERT3S(algotype, <=, SHA512_256_MECH_INFO_TYPE);
ASSERT3S(algotype, >=, SHA512_HMAC_MECH_INFO_TYPE);
ASSERT3S(algotype, <=, SHA512_256);
memset(ctx, 0, sizeof (*ctx));
ctx->algotype = algotype;
switch (ctx->algotype) {
case SHA256_MECH_INFO_TYPE:
case SHA256_HMAC_MECH_INFO_TYPE:
case SHA256_HMAC_GEN_MECH_INFO_TYPE:
case SHA256:
ctx256->state[0] = 0x6a09e667;
ctx256->state[1] = 0xbb67ae85;
ctx256->state[2] = 0x3c6ef372;
@ -420,24 +418,8 @@ SHA2Init(int algotype, SHA2_CTX *ctx)
ctx256->count[0] = 0;
ctx256->ops = sha256_get_ops();
break;
case SHA384_MECH_INFO_TYPE:
case SHA384_HMAC_MECH_INFO_TYPE:
case SHA384_HMAC_GEN_MECH_INFO_TYPE:
ctx512->state[0] = 0xcbbb9d5dc1059ed8ULL;
ctx512->state[1] = 0x629a292a367cd507ULL;
ctx512->state[2] = 0x9159015a3070dd17ULL;
ctx512->state[3] = 0x152fecd8f70e5939ULL;
ctx512->state[4] = 0x67332667ffc00b31ULL;
ctx512->state[5] = 0x8eb44a8768581511ULL;
ctx512->state[6] = 0xdb0c2e0d64f98fa7ULL;
ctx512->state[7] = 0x47b5481dbefa4fa4ULL;
ctx512->count[0] = 0;
ctx512->count[1] = 0;
ctx512->ops = sha512_get_ops();
break;
case SHA512_MECH_INFO_TYPE:
case SHA512:
case SHA512_HMAC_MECH_INFO_TYPE:
case SHA512_HMAC_GEN_MECH_INFO_TYPE:
ctx512->state[0] = 0x6a09e667f3bcc908ULL;
ctx512->state[1] = 0xbb67ae8584caa73bULL;
ctx512->state[2] = 0x3c6ef372fe94f82bULL;
@ -450,20 +432,7 @@ SHA2Init(int algotype, SHA2_CTX *ctx)
ctx512->count[1] = 0;
ctx512->ops = sha512_get_ops();
break;
case SHA512_224_MECH_INFO_TYPE:
ctx512->state[0] = 0x8c3d37c819544da2ULL;
ctx512->state[1] = 0x73e1996689dcd4d6ULL;
ctx512->state[2] = 0x1dfab7ae32ff9c82ULL;
ctx512->state[3] = 0x679dd514582f9fcfULL;
ctx512->state[4] = 0x0f6d2b697bd44da8ULL;
ctx512->state[5] = 0x77e36f7304c48942ULL;
ctx512->state[6] = 0x3f9d85a86a1d36c8ULL;
ctx512->state[7] = 0x1112e6ad91d692a1ULL;
ctx512->count[0] = 0;
ctx512->count[1] = 0;
ctx512->ops = sha512_get_ops();
break;
case SHA512_256_MECH_INFO_TYPE:
case SHA512_256:
ctx512->state[0] = 0x22312194fc2bf72cULL;
ctx512->state[1] = 0x9f555fa3c84c64c2ULL;
ctx512->state[2] = 0x2393b86b6f53b151ULL;
@ -490,25 +459,14 @@ SHA2Update(SHA2_CTX *ctx, const void *data, size_t len)
ASSERT3P(data, !=, NULL);
switch (ctx->algotype) {
case SHA256_MECH_INFO_TYPE:
case SHA256_HMAC_MECH_INFO_TYPE:
case SHA256_HMAC_GEN_MECH_INFO_TYPE:
case SHA256:
sha256_update(&ctx->sha256, data, len);
break;
case SHA384_MECH_INFO_TYPE:
case SHA384_HMAC_MECH_INFO_TYPE:
case SHA384_HMAC_GEN_MECH_INFO_TYPE:
sha512_update(&ctx->sha512, data, len);
break;
case SHA512_MECH_INFO_TYPE:
case SHA512:
case SHA512_HMAC_MECH_INFO_TYPE:
case SHA512_HMAC_GEN_MECH_INFO_TYPE:
sha512_update(&ctx->sha512, data, len);
break;
case SHA512_224_MECH_INFO_TYPE:
sha512_update(&ctx->sha512, data, len);
break;
case SHA512_256_MECH_INFO_TYPE:
case SHA512_256:
sha512_update(&ctx->sha512, data, len);
break;
}
@ -519,25 +477,14 @@ void
SHA2Final(void *digest, SHA2_CTX *ctx)
{
switch (ctx->algotype) {
case SHA256_MECH_INFO_TYPE:
case SHA256_HMAC_MECH_INFO_TYPE:
case SHA256_HMAC_GEN_MECH_INFO_TYPE:
case SHA256:
sha256_final(&ctx->sha256, digest, 256);
break;
case SHA384_MECH_INFO_TYPE:
case SHA384_HMAC_MECH_INFO_TYPE:
case SHA384_HMAC_GEN_MECH_INFO_TYPE:
sha512_final(&ctx->sha512, digest, 384);
break;
case SHA512_MECH_INFO_TYPE:
case SHA512:
case SHA512_HMAC_MECH_INFO_TYPE:
case SHA512_HMAC_GEN_MECH_INFO_TYPE:
sha512_final(&ctx->sha512, digest, 512);
break;
case SHA512_224_MECH_INFO_TYPE:
sha512_final(&ctx->sha512, digest, 224);
break;
case SHA512_256_MECH_INFO_TYPE:
case SHA512_256:
sha512_final(&ctx->sha512, digest, 256);
break;
}

View file

@ -41,7 +41,6 @@
* mech_index is the index for that mechanism in the table.
* A mechanism belongs to exactly 1 table.
* The tables are:
* . digest_mechs_tab[] for the msg digest mechs.
* . cipher_mechs_tab[] for encrypt/decrypt and wrap/unwrap mechs.
* . mac_mechs_tab[] for MAC mechs.
* . sign_mechs_tab[] for sign & verify mechs.
@ -75,13 +74,11 @@
/* RFE 4687834 Will deal with the extensibility of these tables later */
static kcf_mech_entry_t kcf_digest_mechs_tab[KCF_MAXDIGEST];
static kcf_mech_entry_t kcf_cipher_mechs_tab[KCF_MAXCIPHER];
static kcf_mech_entry_t kcf_mac_mechs_tab[KCF_MAXMAC];
const kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = {
{0, NULL}, /* No class zero */
{KCF_MAXDIGEST, kcf_digest_mechs_tab},
{KCF_MAXCIPHER, kcf_cipher_mechs_tab},
{KCF_MAXMAC, kcf_mac_mechs_tab},
};
@ -220,10 +217,7 @@ kcf_add_mech_provider(short mech_indx,
crypto_func_group_t fg = mech_info->cm_func_group_mask;
kcf_ops_class_t class;
if (fg & CRYPTO_FG_DIGEST || fg & CRYPTO_FG_DIGEST_ATOMIC)
class = KCF_DIGEST_CLASS;
else if (fg & CRYPTO_FG_ENCRYPT || fg & CRYPTO_FG_DECRYPT ||
fg & CRYPTO_FG_ENCRYPT_ATOMIC ||
if (fg & CRYPTO_FG_ENCRYPT_ATOMIC ||
fg & CRYPTO_FG_DECRYPT_ATOMIC)
class = KCF_CIPHER_CLASS;
else if (fg & CRYPTO_FG_MAC || fg & CRYPTO_FG_MAC_ATOMIC)

View file

@ -107,7 +107,6 @@
void
icp_fini(void)
{
skein_mod_fini();
sha2_mod_fini();
aes_mod_fini();
kcf_sched_destroy();
@ -134,12 +133,6 @@ icp_init(void)
/* initialize algorithms */
aes_mod_init();
sha2_mod_init();
skein_mod_init();
return (0);
}
#if defined(_KERNEL) && defined(__FreeBSD__)
module_exit(icp_fini);
module_init(icp_init);
#endif

View file

@ -149,13 +149,8 @@ extern int aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length,
#ifdef _AES_IMPL
typedef enum aes_mech_type {
AES_ECB_MECH_INFO_TYPE, /* SUN_CKM_AES_ECB */
AES_CBC_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC */
AES_CBC_PAD_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC_PAD */
AES_CTR_MECH_INFO_TYPE, /* SUN_CKM_AES_CTR */
AES_CCM_MECH_INFO_TYPE, /* SUN_CKM_AES_CCM */
AES_GCM_MECH_INFO_TYPE, /* SUN_CKM_AES_GCM */
AES_GMAC_MECH_INFO_TYPE /* SUN_CKM_AES_GMAC */
} aes_mech_type_t;
#endif /* _AES_IMPL */

View file

@ -45,12 +45,8 @@ extern "C" {
extern boolean_t gcm_avx_can_use_movbe;
#endif
#define ECB_MODE 0x00000002
#define CBC_MODE 0x00000004
#define CTR_MODE 0x00000008
#define CCM_MODE 0x00000010
#define GCM_MODE 0x00000020
#define GMAC_MODE 0x00000040
/*
* cc_keysched: Pointer to key schedule.
@ -76,7 +72,7 @@ extern boolean_t gcm_avx_can_use_movbe;
* by the caller, or internally, e.g. an init routine.
* If allocated by the latter, then it needs to be freed.
*
* ECB_MODE, CBC_MODE, CTR_MODE, or CCM_MODE
* CCM_MODE
*/
struct common_ctx {
void *cc_keysched;
@ -91,57 +87,6 @@ struct common_ctx {
typedef struct common_ctx common_ctx_t;
typedef struct ecb_ctx {
struct common_ctx ecb_common;
uint64_t ecb_lastblock[2];
} ecb_ctx_t;
#define ecb_keysched ecb_common.cc_keysched
#define ecb_keysched_len ecb_common.cc_keysched_len
#define ecb_iv ecb_common.cc_iv
#define ecb_remainder ecb_common.cc_remainder
#define ecb_remainder_len ecb_common.cc_remainder_len
#define ecb_lastp ecb_common.cc_lastp
#define ecb_copy_to ecb_common.cc_copy_to
#define ecb_flags ecb_common.cc_flags
typedef struct cbc_ctx {
struct common_ctx cbc_common;
uint64_t cbc_lastblock[2];
} cbc_ctx_t;
#define cbc_keysched cbc_common.cc_keysched
#define cbc_keysched_len cbc_common.cc_keysched_len
#define cbc_iv cbc_common.cc_iv
#define cbc_remainder cbc_common.cc_remainder
#define cbc_remainder_len cbc_common.cc_remainder_len
#define cbc_lastp cbc_common.cc_lastp
#define cbc_copy_to cbc_common.cc_copy_to
#define cbc_flags cbc_common.cc_flags
/*
* ctr_lower_mask Bit-mask for lower 8 bytes of counter block.
* ctr_upper_mask Bit-mask for upper 8 bytes of counter block.
*/
typedef struct ctr_ctx {
struct common_ctx ctr_common;
uint64_t ctr_lower_mask;
uint64_t ctr_upper_mask;
uint32_t ctr_tmp[4];
} ctr_ctx_t;
/*
* ctr_cb Counter block.
*/
#define ctr_keysched ctr_common.cc_keysched
#define ctr_keysched_len ctr_common.cc_keysched_len
#define ctr_cb ctr_common.cc_iv
#define ctr_remainder ctr_common.cc_remainder
#define ctr_remainder_len ctr_common.cc_remainder_len
#define ctr_lastp ctr_common.cc_lastp
#define ctr_copy_to ctr_common.cc_copy_to
#define ctr_flags ctr_common.cc_flags
/*
*
* ccm_mac_len: Stores length of the MAC in CCM mode.
@ -241,27 +186,21 @@ typedef struct gcm_ctx {
#define gcm_copy_to gcm_common.cc_copy_to
#define gcm_flags gcm_common.cc_flags
#define AES_GMAC_IV_LEN 12
#define AES_GMAC_TAG_BITS 128
void gcm_clear_ctx(gcm_ctx_t *ctx);
typedef struct aes_ctx {
union {
ecb_ctx_t acu_ecb;
cbc_ctx_t acu_cbc;
ctr_ctx_t acu_ctr;
ccm_ctx_t acu_ccm;
gcm_ctx_t acu_gcm;
} acu;
} aes_ctx_t;
#define ac_flags acu.acu_ecb.ecb_common.cc_flags
#define ac_remainder_len acu.acu_ecb.ecb_common.cc_remainder_len
#define ac_keysched acu.acu_ecb.ecb_common.cc_keysched
#define ac_keysched_len acu.acu_ecb.ecb_common.cc_keysched_len
#define ac_iv acu.acu_ecb.ecb_common.cc_iv
#define ac_lastp acu.acu_ecb.ecb_common.cc_lastp
#define ac_flags acu.acu_ccm.ccm_common.cc_flags
#define ac_remainder_len acu.acu_ccm.ccm_common.cc_remainder_len
#define ac_keysched acu.acu_ccm.ccm_common.cc_keysched
#define ac_keysched_len acu.acu_ccm.ccm_common.cc_keysched_len
#define ac_iv acu.acu_ccm.ccm_common.cc_iv
#define ac_lastp acu.acu_ccm.ccm_common.cc_lastp
#define ac_pt_buf acu.acu_ccm.ccm_pt_buf
#define ac_mac_len acu.acu_ccm.ccm_mac_len
#define ac_data_len acu.acu_ccm.ccm_data_len
@ -269,55 +208,6 @@ typedef struct aes_ctx {
#define ac_processed_data_len acu.acu_ccm.ccm_processed_data_len
#define ac_tag_len acu.acu_gcm.gcm_tag_len
typedef struct blowfish_ctx {
union {
ecb_ctx_t bcu_ecb;
cbc_ctx_t bcu_cbc;
} bcu;
} blowfish_ctx_t;
#define bc_flags bcu.bcu_ecb.ecb_common.cc_flags
#define bc_remainder_len bcu.bcu_ecb.ecb_common.cc_remainder_len
#define bc_keysched bcu.bcu_ecb.ecb_common.cc_keysched
#define bc_keysched_len bcu.bcu_ecb.ecb_common.cc_keysched_len
#define bc_iv bcu.bcu_ecb.ecb_common.cc_iv
#define bc_lastp bcu.bcu_ecb.ecb_common.cc_lastp
typedef struct des_ctx {
union {
ecb_ctx_t dcu_ecb;
cbc_ctx_t dcu_cbc;
} dcu;
} des_ctx_t;
#define dc_flags dcu.dcu_ecb.ecb_common.cc_flags
#define dc_remainder_len dcu.dcu_ecb.ecb_common.cc_remainder_len
#define dc_keysched dcu.dcu_ecb.ecb_common.cc_keysched
#define dc_keysched_len dcu.dcu_ecb.ecb_common.cc_keysched_len
#define dc_iv dcu.dcu_ecb.ecb_common.cc_iv
#define dc_lastp dcu.dcu_ecb.ecb_common.cc_lastp
extern int ecb_cipher_contiguous_blocks(ecb_ctx_t *, char *, size_t,
crypto_data_t *, size_t, int (*cipher)(const void *, const uint8_t *,
uint8_t *));
extern int cbc_encrypt_contiguous_blocks(cbc_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
int (*encrypt)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int cbc_decrypt_contiguous_blocks(cbc_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
int (*decrypt)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int ctr_mode_contiguous_blocks(ctr_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
int (*cipher)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
@ -360,15 +250,6 @@ extern int gcm_decrypt_final(gcm_ctx_t *, crypto_data_t *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int ctr_mode_final(ctr_ctx_t *, crypto_data_t *,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *));
extern int cbc_init_ctx(cbc_ctx_t *, char *, size_t, size_t,
void (*copy_block)(uint8_t *, uint64_t *));
extern int ctr_init_ctx(ctr_ctx_t *, ulong_t, uint8_t *,
void (*copy_block)(uint8_t *, uint8_t *));
extern int ccm_init_ctx(ccm_ctx_t *, char *, int, boolean_t, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
@ -378,11 +259,6 @@ extern int gcm_init_ctx(gcm_ctx_t *, char *, size_t,
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int gmac_init_ctx(gcm_ctx_t *, char *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern void calculate_ccm_mac(ccm_ctx_t *, uint8_t *,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *));
@ -392,12 +268,8 @@ extern void crypto_init_ptrs(crypto_data_t *, void **, offset_t *);
extern void crypto_get_ptrs(crypto_data_t *, void **, offset_t *,
uint8_t **, size_t *, uint8_t **, size_t);
extern void *ecb_alloc_ctx(int);
extern void *cbc_alloc_ctx(int);
extern void *ctr_alloc_ctx(int);
extern void *ccm_alloc_ctx(int);
extern void *gcm_alloc_ctx(int);
extern void *gmac_alloc_ctx(int);
extern void crypto_free_mode_ctx(void *);
#ifdef __cplusplus

View file

@ -55,7 +55,7 @@ extern "C" {
* When impl.h is broken up (bug# 4703218), this will be done. For now,
* we hardcode these values.
*/
#define KCF_OPS_CLASSSIZE 4
#define KCF_OPS_CLASSSIZE 3
#define KCF_MAXMECHTAB 32
/*
@ -187,28 +187,6 @@ typedef struct kcf_mech_entry {
avl_node_t me_node;
} kcf_mech_entry_t;
/*
* If a component has a reference to a kcf_policy_desc_t,
* it REFHOLD()s. A new policy descriptor which is referenced only
* by the policy table has a reference count of one.
*/
#define KCF_POLICY_REFHOLD(desc) { \
int newval = atomic_add_32_nv(&(desc)->pd_refcnt, 1); \
ASSERT(newval != 0); \
}
/*
* Releases a reference to a policy descriptor. When the last
* reference is released, the descriptor is freed.
*/
#define KCF_POLICY_REFRELE(desc) { \
membar_producer(); \
int newval = atomic_add_32_nv(&(desc)->pd_refcnt, -1); \
ASSERT(newval != -1); \
if (newval == 0) \
kcf_policy_free_desc(desc); \
}
/*
* Global tables. The sizes are from the predefined PKCS#11 v2.20 mechanisms,
* with a margin of few extra empty entry points
@ -222,12 +200,11 @@ _Static_assert(KCF_MAXCIPHER == KCF_MAXMECHTAB,
"KCF_MAXCIPHER != KCF_MAXMECHTAB"); /* See KCF_MAXMECHTAB comment */
typedef enum {
KCF_DIGEST_CLASS = 1,
KCF_CIPHER_CLASS,
KCF_CIPHER_CLASS = 1,
KCF_MAC_CLASS,
} kcf_ops_class_t;
#define KCF_FIRST_OPSCLASS KCF_DIGEST_CLASS
#define KCF_FIRST_OPSCLASS KCF_CIPHER_CLASS
#define KCF_LAST_OPSCLASS KCF_MAC_CLASS
_Static_assert(
KCF_OPS_CLASSSIZE == (KCF_LAST_OPSCLASS - KCF_FIRST_OPSCLASS + 2),
@ -275,29 +252,14 @@ extern const kcf_mech_entry_tab_t kcf_mech_tabs_tab[];
* of type kcf_prov_desc_t.
*/
#define KCF_PROV_DIGEST_OPS(pd) ((pd)->pd_ops_vector->co_digest_ops)
#define KCF_PROV_CIPHER_OPS(pd) ((pd)->pd_ops_vector->co_cipher_ops)
#define KCF_PROV_MAC_OPS(pd) ((pd)->pd_ops_vector->co_mac_ops)
#define KCF_PROV_CTX_OPS(pd) ((pd)->pd_ops_vector->co_ctx_ops)
/*
* Wrappers for crypto_digest_ops(9S) entry points.
*/
#define KCF_PROV_DIGEST_INIT(pd, ctx, mech) ( \
(KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest_init) ? \
KCF_PROV_DIGEST_OPS(pd)->digest_init(ctx, mech) : \
CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_cipher_ops(9S) entry points.
*/
#define KCF_PROV_ENCRYPT_INIT(pd, ctx, mech, key, template) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_init) ? \
KCF_PROV_CIPHER_OPS(pd)->encrypt_init(ctx, mech, key, template) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_ENCRYPT_ATOMIC(pd, mech, key, plaintext, ciphertext, \
template) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_atomic) ? \

View file

@ -66,22 +66,6 @@ typedef struct crypto_ctx {
void *cc_framework_private; /* owned by framework */
} crypto_ctx_t;
/*
* The crypto_digest_ops structure contains pointers to digest
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_digest_ops {
int (*digest_init)(crypto_ctx_t *, crypto_mechanism_t *);
int (*digest)(crypto_ctx_t *, crypto_data_t *, crypto_data_t *);
int (*digest_update)(crypto_ctx_t *, crypto_data_t *);
int (*digest_key)(crypto_ctx_t *, crypto_key_t *);
int (*digest_final)(crypto_ctx_t *, crypto_data_t *);
int (*digest_atomic)(crypto_mechanism_t *, crypto_data_t *,
crypto_data_t *);
} __no_const crypto_digest_ops_t;
/*
* The crypto_cipher_ops structure contains pointers to encryption
* and decryption operations for cryptographic providers. It is
@ -89,27 +73,8 @@ typedef struct crypto_digest_ops {
* with the kernel using crypto_register_provider(9F).
*/
typedef struct crypto_cipher_ops {
int (*encrypt_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *,
crypto_spi_ctx_template_t);
int (*encrypt)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *);
int (*encrypt_update)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *);
int (*encrypt_final)(crypto_ctx_t *,
crypto_data_t *);
int (*encrypt_atomic)(crypto_mechanism_t *, crypto_key_t *,
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t);
int (*decrypt_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *,
crypto_spi_ctx_template_t);
int (*decrypt)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *);
int (*decrypt_update)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *);
int (*decrypt_final)(crypto_ctx_t *,
crypto_data_t *);
int (*decrypt_atomic)(crypto_mechanism_t *, crypto_key_t *,
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t);
} __no_const crypto_cipher_ops_t;
@ -156,7 +121,6 @@ typedef struct crypto_ctx_ops {
* by calling crypto_register_provider(9F).
*/
typedef struct crypto_ops {
const crypto_digest_ops_t *co_digest_ops;
const crypto_cipher_ops_t *co_cipher_ops;
const crypto_mac_ops_t *co_mac_ops;
const crypto_ctx_ops_t *co_ctx_ops;
@ -172,14 +136,10 @@ typedef struct crypto_ops {
typedef uint32_t crypto_func_group_t;
#define CRYPTO_FG_ENCRYPT 0x00000001 /* encrypt_init() */
#define CRYPTO_FG_DECRYPT 0x00000002 /* decrypt_init() */
#define CRYPTO_FG_DIGEST 0x00000004 /* digest_init() */
#define CRYPTO_FG_MAC 0x00001000 /* mac_init() */
#define CRYPTO_FG_ENCRYPT_ATOMIC 0x00008000 /* encrypt_atomic() */
#define CRYPTO_FG_DECRYPT_ATOMIC 0x00010000 /* decrypt_atomic() */
#define CRYPTO_FG_MAC_ATOMIC 0x00020000 /* mac_atomic() */
#define CRYPTO_FG_DIGEST_ATOMIC 0x00040000 /* digest_atomic() */
/*
* Maximum length of the pi_provider_description field of the

View file

@ -1,36 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_STACK_H
#define _SYS_STACK_H
#if defined(__i386) || defined(__amd64)
#include <sys/ia32/stack.h> /* XX64 x86/sys/stack.h */
#endif
#endif /* _SYS_STACK_H */

View file

@ -1,36 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_TRAP_H
#define _SYS_TRAP_H
#if defined(__i386) || defined(__amd64)
#include <sys/ia32/trap.h> /* XX64 x86/sys/trap.h */
#endif
#endif /* _SYS_TRAP_H */

File diff suppressed because it is too large Load diff

View file

@ -60,48 +60,9 @@
* Mechanism info structure passed to KCF during registration.
*/
static const crypto_mech_info_t sha2_mech_info_tab[] = {
/* SHA256 */
{SUN_CKM_SHA256, SHA256_MECH_INFO_TYPE,
CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC},
/* SHA256-HMAC */
{SUN_CKM_SHA256_HMAC, SHA256_HMAC_MECH_INFO_TYPE,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC},
/* SHA256-HMAC GENERAL */
{SUN_CKM_SHA256_HMAC_GENERAL, SHA256_HMAC_GEN_MECH_INFO_TYPE,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC},
/* SHA384 */
{SUN_CKM_SHA384, SHA384_MECH_INFO_TYPE,
CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC},
/* SHA384-HMAC */
{SUN_CKM_SHA384_HMAC, SHA384_HMAC_MECH_INFO_TYPE,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC},
/* SHA384-HMAC GENERAL */
{SUN_CKM_SHA384_HMAC_GENERAL, SHA384_HMAC_GEN_MECH_INFO_TYPE,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC},
/* SHA512 */
{SUN_CKM_SHA512, SHA512_MECH_INFO_TYPE,
CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC},
/* SHA512-HMAC */
{SUN_CKM_SHA512_HMAC, SHA512_HMAC_MECH_INFO_TYPE,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC},
/* SHA512-HMAC GENERAL */
{SUN_CKM_SHA512_HMAC_GENERAL, SHA512_HMAC_GEN_MECH_INFO_TYPE,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC},
};
static int sha2_digest_init(crypto_ctx_t *, crypto_mechanism_t *);
static int sha2_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *);
static int sha2_digest_update(crypto_ctx_t *, crypto_data_t *);
static int sha2_digest_final(crypto_ctx_t *, crypto_data_t *);
static int sha2_digest_atomic(crypto_mechanism_t *, crypto_data_t *,
crypto_data_t *);
static const crypto_digest_ops_t sha2_digest_ops = {
.digest_init = sha2_digest_init,
.digest = sha2_digest,
.digest_update = sha2_digest_update,
.digest_final = sha2_digest_final,
.digest_atomic = sha2_digest_atomic
};
static int sha2_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
@ -132,7 +93,6 @@ static const crypto_ctx_ops_t sha2_ctx_ops = {
};
static const crypto_ops_t sha2_crypto_ops = {
&sha2_digest_ops,
NULL,
&sha2_mac_ops,
&sha2_ctx_ops,
@ -184,27 +144,6 @@ sha2_mod_fini(void)
return (ret);
}
/*
* KCF software provider digest entry points.
*/
static int
sha2_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism)
{
/*
* Allocate and initialize SHA2 context.
*/
ctx->cc_provider_private = kmem_alloc(sizeof (sha2_ctx_t), KM_SLEEP);
if (ctx->cc_provider_private == NULL)
return (CRYPTO_HOST_MEMORY);
PROV_SHA2_CTX(ctx)->sc_mech_type = mechanism->cm_type;
SHA2Init(mechanism->cm_type, &PROV_SHA2_CTX(ctx)->sc_sha2_ctx);
return (CRYPTO_SUCCESS);
}
/*
* Helper SHA2 digest update function for uio data.
*/
@ -297,10 +236,8 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
* The computed SHA2 digest will fit in the current
* iovec.
*/
if (((sha2_ctx->algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) &&
(digest_len != SHA256_DIGEST_LENGTH)) ||
((sha2_ctx->algotype > SHA256_HMAC_GEN_MECH_INFO_TYPE) &&
(digest_len != SHA512_DIGEST_LENGTH))) {
ASSERT3U(sha2_ctx->algotype, ==, SHA512_HMAC_MECH_INFO_TYPE);
if (digest_len != SHA512_DIGEST_LENGTH) {
/*
* The caller requested a short digest. Digest
* into a scratch buffer and return to
@ -360,246 +297,6 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
return (CRYPTO_SUCCESS);
}
static int
sha2_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest)
{
int ret = CRYPTO_SUCCESS;
uint_t sha_digest_len;
ASSERT(ctx->cc_provider_private != NULL);
switch (PROV_SHA2_CTX(ctx)->sc_mech_type) {
case SHA256_MECH_INFO_TYPE:
sha_digest_len = SHA256_DIGEST_LENGTH;
break;
case SHA384_MECH_INFO_TYPE:
sha_digest_len = SHA384_DIGEST_LENGTH;
break;
case SHA512_MECH_INFO_TYPE:
sha_digest_len = SHA512_DIGEST_LENGTH;
break;
default:
return (CRYPTO_MECHANISM_INVALID);
}
/*
* We need to just return the length needed to store the output.
* We should not destroy the context for the following cases.
*/
if ((digest->cd_length == 0) ||
(digest->cd_length < sha_digest_len)) {
digest->cd_length = sha_digest_len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
/*
* Do the SHA2 update on the specified input data.
*/
switch (data->cd_format) {
case CRYPTO_DATA_RAW:
SHA2Update(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
(uint8_t *)data->cd_raw.iov_base + data->cd_offset,
data->cd_length);
break;
case CRYPTO_DATA_UIO:
ret = sha2_digest_update_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
data);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
if (ret != CRYPTO_SUCCESS) {
/* the update failed, free context and bail */
kmem_free(ctx->cc_provider_private, sizeof (sha2_ctx_t));
ctx->cc_provider_private = NULL;
digest->cd_length = 0;
return (ret);
}
/*
* Do a SHA2 final, must be done separately since the digest
* type can be different than the input data type.
*/
switch (digest->cd_format) {
case CRYPTO_DATA_RAW:
SHA2Final((unsigned char *)digest->cd_raw.iov_base +
digest->cd_offset, &PROV_SHA2_CTX(ctx)->sc_sha2_ctx);
break;
case CRYPTO_DATA_UIO:
ret = sha2_digest_final_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
digest, sha_digest_len, NULL);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
/* all done, free context and return */
if (ret == CRYPTO_SUCCESS)
digest->cd_length = sha_digest_len;
else
digest->cd_length = 0;
kmem_free(ctx->cc_provider_private, sizeof (sha2_ctx_t));
ctx->cc_provider_private = NULL;
return (ret);
}
static int
sha2_digest_update(crypto_ctx_t *ctx, crypto_data_t *data)
{
int ret = CRYPTO_SUCCESS;
ASSERT(ctx->cc_provider_private != NULL);
/*
* Do the SHA2 update on the specified input data.
*/
switch (data->cd_format) {
case CRYPTO_DATA_RAW:
SHA2Update(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
(uint8_t *)data->cd_raw.iov_base + data->cd_offset,
data->cd_length);
break;
case CRYPTO_DATA_UIO:
ret = sha2_digest_update_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
data);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
return (ret);
}
static int
sha2_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest)
{
int ret = CRYPTO_SUCCESS;
uint_t sha_digest_len;
ASSERT(ctx->cc_provider_private != NULL);
switch (PROV_SHA2_CTX(ctx)->sc_mech_type) {
case SHA256_MECH_INFO_TYPE:
sha_digest_len = SHA256_DIGEST_LENGTH;
break;
case SHA384_MECH_INFO_TYPE:
sha_digest_len = SHA384_DIGEST_LENGTH;
break;
case SHA512_MECH_INFO_TYPE:
sha_digest_len = SHA512_DIGEST_LENGTH;
break;
default:
return (CRYPTO_MECHANISM_INVALID);
}
/*
* We need to just return the length needed to store the output.
* We should not destroy the context for the following cases.
*/
if ((digest->cd_length == 0) ||
(digest->cd_length < sha_digest_len)) {
digest->cd_length = sha_digest_len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
/*
* Do a SHA2 final.
*/
switch (digest->cd_format) {
case CRYPTO_DATA_RAW:
SHA2Final((unsigned char *)digest->cd_raw.iov_base +
digest->cd_offset, &PROV_SHA2_CTX(ctx)->sc_sha2_ctx);
break;
case CRYPTO_DATA_UIO:
ret = sha2_digest_final_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
digest, sha_digest_len, NULL);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
/* all done, free context and return */
if (ret == CRYPTO_SUCCESS)
digest->cd_length = sha_digest_len;
else
digest->cd_length = 0;
kmem_free(ctx->cc_provider_private, sizeof (sha2_ctx_t));
ctx->cc_provider_private = NULL;
return (ret);
}
static int
sha2_digest_atomic(crypto_mechanism_t *mechanism, crypto_data_t *data,
crypto_data_t *digest)
{
int ret = CRYPTO_SUCCESS;
SHA2_CTX sha2_ctx;
uint32_t sha_digest_len;
/*
* Do the SHA inits.
*/
SHA2Init(mechanism->cm_type, &sha2_ctx);
switch (data->cd_format) {
case CRYPTO_DATA_RAW:
SHA2Update(&sha2_ctx, (uint8_t *)data->
cd_raw.iov_base + data->cd_offset, data->cd_length);
break;
case CRYPTO_DATA_UIO:
ret = sha2_digest_update_uio(&sha2_ctx, data);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
/*
* Do the SHA updates on the specified input data.
*/
if (ret != CRYPTO_SUCCESS) {
/* the update failed, bail */
digest->cd_length = 0;
return (ret);
}
if (mechanism->cm_type <= SHA256_HMAC_GEN_MECH_INFO_TYPE)
sha_digest_len = SHA256_DIGEST_LENGTH;
else
sha_digest_len = SHA512_DIGEST_LENGTH;
/*
* Do a SHA2 final, must be done separately since the digest
* type can be different than the input data type.
*/
switch (digest->cd_format) {
case CRYPTO_DATA_RAW:
SHA2Final((unsigned char *)digest->cd_raw.iov_base +
digest->cd_offset, &sha2_ctx);
break;
case CRYPTO_DATA_UIO:
ret = sha2_digest_final_uio(&sha2_ctx, digest,
sha_digest_len, NULL);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
if (ret == CRYPTO_SUCCESS)
digest->cd_length = sha_digest_len;
else
digest->cd_length = 0;
return (ret);
}
/*
* KCF software provider mac entry points.
*
@ -635,13 +332,9 @@ sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
int i, block_size, blocks_per_int64;
/* Determine the block size */
if (ctx->hc_mech_type <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
block_size = SHA256_HMAC_BLOCK_SIZE;
blocks_per_int64 = SHA256_HMAC_BLOCK_SIZE / sizeof (uint64_t);
} else {
block_size = SHA512_HMAC_BLOCK_SIZE;
blocks_per_int64 = SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t);
}
ASSERT3U(ctx->hc_mech_type, ==, SHA512_HMAC_MECH_INFO_TYPE);
block_size = SHA512_HMAC_BLOCK_SIZE;
blocks_per_int64 = SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t);
(void) memset(ipad, 0, block_size);
(void) memset(opad, 0, block_size);
@ -683,15 +376,7 @@ sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
* mechanism
*/
switch (mechanism->cm_type) {
case SHA256_HMAC_MECH_INFO_TYPE:
case SHA256_HMAC_GEN_MECH_INFO_TYPE:
sha_digest_len = SHA256_DIGEST_LENGTH;
sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE;
break;
case SHA384_HMAC_MECH_INFO_TYPE:
case SHA384_HMAC_GEN_MECH_INFO_TYPE:
case SHA512_HMAC_MECH_INFO_TYPE:
case SHA512_HMAC_GEN_MECH_INFO_TYPE:
sha_digest_len = SHA512_DIGEST_LENGTH;
sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE;
break;
@ -731,22 +416,6 @@ sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
}
}
/*
* Get the mechanism parameters, if applicable.
*/
if (mechanism->cm_type % 3 == 2) {
if (mechanism->cm_param == NULL ||
mechanism->cm_param_len != sizeof (ulong_t)) {
ret = CRYPTO_MECHANISM_PARAM_INVALID;
} else {
PROV_SHA2_GET_DIGEST_LEN(mechanism,
PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len);
if (PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len >
sha_digest_len)
ret = CRYPTO_MECHANISM_PARAM_INVALID;
}
}
if (ret != CRYPTO_SUCCESS) {
memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t));
kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
@ -795,24 +464,9 @@ sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac)
/* Set the digest lengths to values appropriate to the mechanism */
switch (PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type) {
case SHA256_HMAC_MECH_INFO_TYPE:
sha_digest_len = digest_len = SHA256_DIGEST_LENGTH;
break;
case SHA384_HMAC_MECH_INFO_TYPE:
sha_digest_len = digest_len = SHA384_DIGEST_LENGTH;
break;
case SHA512_HMAC_MECH_INFO_TYPE:
sha_digest_len = digest_len = SHA512_DIGEST_LENGTH;
break;
case SHA256_HMAC_GEN_MECH_INFO_TYPE:
sha_digest_len = SHA256_DIGEST_LENGTH;
digest_len = PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len;
break;
case SHA384_HMAC_GEN_MECH_INFO_TYPE:
case SHA512_HMAC_GEN_MECH_INFO_TYPE:
sha_digest_len = SHA512_DIGEST_LENGTH;
digest_len = PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len;
break;
default:
return (CRYPTO_ARGUMENTS_BAD);
}
@ -912,15 +566,7 @@ sha2_mac_atomic(crypto_mechanism_t *mechanism,
* mechanism
*/
switch (mechanism->cm_type) {
case SHA256_HMAC_MECH_INFO_TYPE:
case SHA256_HMAC_GEN_MECH_INFO_TYPE:
sha_digest_len = digest_len = SHA256_DIGEST_LENGTH;
sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE;
break;
case SHA384_HMAC_MECH_INFO_TYPE:
case SHA384_HMAC_GEN_MECH_INFO_TYPE:
case SHA512_HMAC_MECH_INFO_TYPE:
case SHA512_HMAC_GEN_MECH_INFO_TYPE:
sha_digest_len = digest_len = SHA512_DIGEST_LENGTH;
sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE;
break;
@ -951,20 +597,6 @@ sha2_mac_atomic(crypto_mechanism_t *mechanism,
}
}
/* get the mechanism parameters, if applicable */
if ((mechanism->cm_type % 3) == 2) {
if (mechanism->cm_param == NULL ||
mechanism->cm_param_len != sizeof (ulong_t)) {
ret = CRYPTO_MECHANISM_PARAM_INVALID;
goto bail;
}
PROV_SHA2_GET_DIGEST_LEN(mechanism, digest_len);
if (digest_len > sha_digest_len) {
ret = CRYPTO_MECHANISM_PARAM_INVALID;
goto bail;
}
}
/* do a SHA2 update of the inner context using the specified data */
SHA2_MAC_UPDATE(data, sha2_hmac_ctx, ret);
if (ret != CRYPTO_SUCCESS)
@ -979,16 +611,9 @@ sha2_mac_atomic(crypto_mechanism_t *mechanism,
/*
* Do an SHA2 update on the outer context, feeding the inner
* digest as data.
*
* HMAC-SHA384 needs special handling as the outer hash needs only 48
* bytes of the inner hash value.
*/
if (mechanism->cm_type == SHA384_HMAC_MECH_INFO_TYPE ||
mechanism->cm_type == SHA384_HMAC_GEN_MECH_INFO_TYPE)
SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest,
SHA384_DIGEST_LENGTH);
else
SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len);
ASSERT3U(mechanism->cm_type, ==, SHA512_HMAC_MECH_INFO_TYPE);
SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len);
/*
* Do a SHA2 final on the outer context, storing the computed
@ -1044,15 +669,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
* mechanism
*/
switch (mechanism->cm_type) {
case SHA256_HMAC_MECH_INFO_TYPE:
case SHA256_HMAC_GEN_MECH_INFO_TYPE:
sha_digest_len = digest_len = SHA256_DIGEST_LENGTH;
sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE;
break;
case SHA384_HMAC_MECH_INFO_TYPE:
case SHA384_HMAC_GEN_MECH_INFO_TYPE:
case SHA512_HMAC_MECH_INFO_TYPE:
case SHA512_HMAC_GEN_MECH_INFO_TYPE:
sha_digest_len = digest_len = SHA512_DIGEST_LENGTH;
sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE;
break;
@ -1083,20 +700,6 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
}
}
/* get the mechanism parameters, if applicable */
if (mechanism->cm_type % 3 == 2) {
if (mechanism->cm_param == NULL ||
mechanism->cm_param_len != sizeof (ulong_t)) {
ret = CRYPTO_MECHANISM_PARAM_INVALID;
goto bail;
}
PROV_SHA2_GET_DIGEST_LEN(mechanism, digest_len);
if (digest_len > sha_digest_len) {
ret = CRYPTO_MECHANISM_PARAM_INVALID;
goto bail;
}
}
if (mac->cd_length != digest_len) {
ret = CRYPTO_INVALID_MAC;
goto bail;
@ -1114,16 +717,9 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
/*
* Do an SHA2 update on the outer context, feeding the inner
* digest as data.
*
* HMAC-SHA384 needs special handling as the outer hash needs only 48
* bytes of the inner hash value.
*/
if (mechanism->cm_type == SHA384_HMAC_MECH_INFO_TYPE ||
mechanism->cm_type == SHA384_HMAC_GEN_MECH_INFO_TYPE)
SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest,
SHA384_DIGEST_LENGTH);
else
SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len);
ASSERT3U(mechanism->cm_type, ==, SHA512_HMAC_MECH_INFO_TYPE);
SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len);
/*
* Do a SHA2 final on the outer context, storing the computed
@ -1215,15 +811,7 @@ sha2_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key,
* mechanism
*/
switch (mechanism->cm_type) {
case SHA256_HMAC_MECH_INFO_TYPE:
case SHA256_HMAC_GEN_MECH_INFO_TYPE:
sha_digest_len = SHA256_DIGEST_LENGTH;
sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE;
break;
case SHA384_HMAC_MECH_INFO_TYPE:
case SHA384_HMAC_GEN_MECH_INFO_TYPE:
case SHA512_HMAC_MECH_INFO_TYPE:
case SHA512_HMAC_GEN_MECH_INFO_TYPE:
sha_digest_len = SHA512_DIGEST_LENGTH;
sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE;
break;
@ -1272,17 +860,9 @@ sha2_free_context(crypto_ctx_t *ctx)
if (ctx->cc_provider_private == NULL)
return (CRYPTO_SUCCESS);
/*
* We have to free either SHA2 or SHA2-HMAC contexts, which
* have different lengths.
*
* Note: Below is dependent on the mechanism ordering.
*/
if (PROV_SHA2_CTX(ctx)->sc_mech_type % 3 == 0)
ctx_len = sizeof (sha2_ctx_t);
else
ctx_len = sizeof (sha2_hmac_ctx_t);
ASSERT3U(PROV_SHA2_CTX(ctx)->sc_mech_type, ==,
SHA512_HMAC_MECH_INFO_TYPE);
ctx_len = sizeof (sha2_hmac_ctx_t);
memset(ctx->cc_provider_private, 0, ctx_len);
kmem_free(ctx->cc_provider_private, ctx_len);

View file

@ -1,656 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2013 Saso Kiselkov. All rights reserved.
*/
#include <sys/crypto/common.h>
#include <sys/crypto/icp.h>
#include <sys/crypto/spi.h>
#include <sys/sysmacros.h>
#define SKEIN_MODULE_IMPL
#include <sys/skein.h>
static const crypto_mech_info_t skein_mech_info_tab[] = {
{CKM_SKEIN_256, SKEIN_256_MECH_INFO_TYPE,
CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC},
{CKM_SKEIN_256_MAC, SKEIN_256_MAC_MECH_INFO_TYPE,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC},
{CKM_SKEIN_512, SKEIN_512_MECH_INFO_TYPE,
CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC},
{CKM_SKEIN_512_MAC, SKEIN_512_MAC_MECH_INFO_TYPE,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC},
{CKM_SKEIN1024, SKEIN1024_MECH_INFO_TYPE,
CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC},
{CKM_SKEIN1024_MAC, SKEIN1024_MAC_MECH_INFO_TYPE,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC},
};
static int skein_digest_init(crypto_ctx_t *, crypto_mechanism_t *);
static int skein_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *);
static int skein_update(crypto_ctx_t *, crypto_data_t *);
static int skein_final(crypto_ctx_t *, crypto_data_t *);
static int skein_digest_atomic(crypto_mechanism_t *, crypto_data_t *,
crypto_data_t *);
static const crypto_digest_ops_t skein_digest_ops = {
.digest_init = skein_digest_init,
.digest = skein_digest,
.digest_update = skein_update,
.digest_final = skein_final,
.digest_atomic = skein_digest_atomic
};
static int skein_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
crypto_spi_ctx_template_t);
static int skein_mac_atomic(crypto_mechanism_t *, crypto_key_t *,
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t);
static const crypto_mac_ops_t skein_mac_ops = {
.mac_init = skein_mac_init,
.mac = NULL,
.mac_update = skein_update, /* using regular digest update is OK here */
.mac_final = skein_final, /* using regular digest final is OK here */
.mac_atomic = skein_mac_atomic,
.mac_verify_atomic = NULL
};
static int skein_create_ctx_template(crypto_mechanism_t *, crypto_key_t *,
crypto_spi_ctx_template_t *, size_t *);
static int skein_free_context(crypto_ctx_t *);
static const crypto_ctx_ops_t skein_ctx_ops = {
.create_ctx_template = skein_create_ctx_template,
.free_context = skein_free_context
};
static const crypto_ops_t skein_crypto_ops = {
&skein_digest_ops,
NULL,
&skein_mac_ops,
&skein_ctx_ops,
};
static const crypto_provider_info_t skein_prov_info = {
"Skein Software Provider",
&skein_crypto_ops,
sizeof (skein_mech_info_tab) / sizeof (crypto_mech_info_t),
skein_mech_info_tab
};
static crypto_kcf_provider_handle_t skein_prov_handle = 0;
typedef struct skein_ctx {
skein_mech_type_t sc_mech_type;
size_t sc_digest_bitlen;
/*LINTED(E_ANONYMOUS_UNION_DECL)*/
union {
Skein_256_Ctxt_t sc_256;
Skein_512_Ctxt_t sc_512;
Skein1024_Ctxt_t sc_1024;
};
} skein_ctx_t;
#define SKEIN_CTX(_ctx_) ((skein_ctx_t *)((_ctx_)->cc_provider_private))
#define SKEIN_CTX_LVALUE(_ctx_) (_ctx_)->cc_provider_private
#define SKEIN_OP(_skein_ctx, _op, ...) \
do { \
skein_ctx_t *sc = (_skein_ctx); \
switch (sc->sc_mech_type) { \
case SKEIN_256_MECH_INFO_TYPE: \
case SKEIN_256_MAC_MECH_INFO_TYPE: \
(void) Skein_256_ ## _op(&sc->sc_256, __VA_ARGS__);\
break; \
case SKEIN_512_MECH_INFO_TYPE: \
case SKEIN_512_MAC_MECH_INFO_TYPE: \
(void) Skein_512_ ## _op(&sc->sc_512, __VA_ARGS__);\
break; \
case SKEIN1024_MECH_INFO_TYPE: \
case SKEIN1024_MAC_MECH_INFO_TYPE: \
(void) Skein1024_ ## _op(&sc->sc_1024, __VA_ARGS__);\
break; \
} \
} while (0)
static int
skein_get_digest_bitlen(const crypto_mechanism_t *mechanism, size_t *result)
{
if (mechanism->cm_param != NULL) {
/*LINTED(E_BAD_PTR_CAST_ALIGN)*/
skein_param_t *param = (skein_param_t *)mechanism->cm_param;
if (mechanism->cm_param_len != sizeof (*param) ||
param->sp_digest_bitlen == 0) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
*result = param->sp_digest_bitlen;
} else {
switch (mechanism->cm_type) {
case SKEIN_256_MECH_INFO_TYPE:
*result = 256;
break;
case SKEIN_512_MECH_INFO_TYPE:
*result = 512;
break;
case SKEIN1024_MECH_INFO_TYPE:
*result = 1024;
break;
default:
return (CRYPTO_MECHANISM_INVALID);
}
}
return (CRYPTO_SUCCESS);
}
int
skein_mod_init(void)
{
/*
* Try to register with KCF - failure shouldn't unload us, since we
* still may want to continue providing misc/skein functionality.
*/
(void) crypto_register_provider(&skein_prov_info, &skein_prov_handle);
return (0);
}
int
skein_mod_fini(void)
{
int ret = 0;
if (skein_prov_handle != 0) {
if ((ret = crypto_unregister_provider(skein_prov_handle)) !=
CRYPTO_SUCCESS) {
cmn_err(CE_WARN,
"skein _fini: crypto_unregister_provider() "
"failed (0x%x)", ret);
return (EBUSY);
}
skein_prov_handle = 0;
}
return (0);
}
/*
* General Skein hashing helper functions.
*/
/*
* Performs an Update on a context with uio input data.
*/
static int
skein_digest_update_uio(skein_ctx_t *ctx, const crypto_data_t *data)
{
off_t offset = data->cd_offset;
size_t length = data->cd_length;
uint_t vec_idx = 0;
size_t cur_len;
zfs_uio_t *uio = data->cd_uio;
/* we support only kernel buffer */
if (zfs_uio_segflg(uio) != UIO_SYSSPACE)
return (CRYPTO_ARGUMENTS_BAD);
/*
* Jump to the first iovec containing data to be
* digested.
*/
offset = zfs_uio_index_at_offset(uio, offset, &vec_idx);
if (vec_idx == zfs_uio_iovcnt(uio)) {
/*
* The caller specified an offset that is larger than the
* total size of the buffers it provided.
*/
return (CRYPTO_DATA_LEN_RANGE);
}
/*
* Now do the digesting on the iovecs.
*/
while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) {
cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset, length);
SKEIN_OP(ctx, Update, (uint8_t *)zfs_uio_iovbase(uio, vec_idx)
+ offset, cur_len);
length -= cur_len;
vec_idx++;
offset = 0;
}
if (vec_idx == zfs_uio_iovcnt(uio) && length > 0) {
/*
* The end of the specified iovec's was reached but
* the length requested could not be processed, i.e.
* The caller requested to digest more data than it provided.
*/
return (CRYPTO_DATA_LEN_RANGE);
}
return (CRYPTO_SUCCESS);
}
/*
* Performs a Final on a context and writes to a uio digest output.
*/
static int
skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest)
{
off_t offset = digest->cd_offset;
uint_t vec_idx = 0;
zfs_uio_t *uio = digest->cd_uio;
/* we support only kernel buffer */
if (zfs_uio_segflg(uio) != UIO_SYSSPACE)
return (CRYPTO_ARGUMENTS_BAD);
/*
* Jump to the first iovec containing ptr to the digest to be returned.
*/
offset = zfs_uio_index_at_offset(uio, offset, &vec_idx);
if (vec_idx == zfs_uio_iovcnt(uio)) {
/*
* The caller specified an offset that is larger than the
* total size of the buffers it provided.
*/
return (CRYPTO_DATA_LEN_RANGE);
}
if (offset + CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen) <=
zfs_uio_iovlen(uio, vec_idx)) {
/* The computed digest will fit in the current iovec. */
SKEIN_OP(ctx, Final,
(uchar_t *)zfs_uio_iovbase(uio, vec_idx) + offset);
} else {
uint8_t *digest_tmp;
off_t scratch_offset = 0;
size_t length = CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen);
size_t cur_len;
digest_tmp = kmem_alloc(CRYPTO_BITS2BYTES(
ctx->sc_digest_bitlen), KM_SLEEP);
if (digest_tmp == NULL)
return (CRYPTO_HOST_MEMORY);
SKEIN_OP(ctx, Final, digest_tmp);
while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) {
cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset,
length);
memcpy(zfs_uio_iovbase(uio, vec_idx) + offset,
digest_tmp + scratch_offset, cur_len);
length -= cur_len;
vec_idx++;
scratch_offset += cur_len;
offset = 0;
}
kmem_free(digest_tmp, CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen));
if (vec_idx == zfs_uio_iovcnt(uio) && length > 0) {
/*
* The end of the specified iovec's was reached but
* the length requested could not be processed, i.e.
* The caller requested to digest more data than it
* provided.
*/
return (CRYPTO_DATA_LEN_RANGE);
}
}
return (CRYPTO_SUCCESS);
}
/*
* KCF software provider digest entry points.
*/
/*
* Initializes a skein digest context to the configuration in `mechanism'.
* The mechanism cm_type must be one of SKEIN_*_MECH_INFO_TYPE. The cm_param
* field may contain a skein_param_t structure indicating the length of the
* digest the algorithm should produce. Otherwise the default output lengths
* are applied (32 bytes for Skein-256, 64 bytes for Skein-512 and 128 bytes
* for Skein-1024).
*/
static int
skein_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism)
{
int error = CRYPTO_SUCCESS;
if (!VALID_SKEIN_DIGEST_MECH(mechanism->cm_type))
return (CRYPTO_MECHANISM_INVALID);
SKEIN_CTX_LVALUE(ctx) = kmem_alloc(sizeof (*SKEIN_CTX(ctx)), KM_SLEEP);
if (SKEIN_CTX(ctx) == NULL)
return (CRYPTO_HOST_MEMORY);
SKEIN_CTX(ctx)->sc_mech_type = mechanism->cm_type;
error = skein_get_digest_bitlen(mechanism,
&SKEIN_CTX(ctx)->sc_digest_bitlen);
if (error != CRYPTO_SUCCESS)
goto errout;
SKEIN_OP(SKEIN_CTX(ctx), Init, SKEIN_CTX(ctx)->sc_digest_bitlen);
return (CRYPTO_SUCCESS);
errout:
memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
SKEIN_CTX_LVALUE(ctx) = NULL;
return (error);
}
/*
* Executes a skein_update and skein_digest on a pre-initialized crypto
* context in a single step. See the documentation to these functions to
* see what to pass here.
*/
static int
skein_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest)
{
int error = CRYPTO_SUCCESS;
ASSERT(SKEIN_CTX(ctx) != NULL);
if (digest->cd_length <
CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen)) {
digest->cd_length =
CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen);
return (CRYPTO_BUFFER_TOO_SMALL);
}
error = skein_update(ctx, data);
if (error != CRYPTO_SUCCESS) {
memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
SKEIN_CTX_LVALUE(ctx) = NULL;
digest->cd_length = 0;
return (error);
}
error = skein_final(ctx, digest);
return (error);
}
/*
* Performs a skein Update with the input message in `data' (successive calls
* can push more data). This is used both for digest and MAC operation.
* Supported input data formats are raw, uio and mblk.
*/
static int
skein_update(crypto_ctx_t *ctx, crypto_data_t *data)
{
int error = CRYPTO_SUCCESS;
ASSERT(SKEIN_CTX(ctx) != NULL);
switch (data->cd_format) {
case CRYPTO_DATA_RAW:
SKEIN_OP(SKEIN_CTX(ctx), Update,
(uint8_t *)data->cd_raw.iov_base + data->cd_offset,
data->cd_length);
break;
case CRYPTO_DATA_UIO:
error = skein_digest_update_uio(SKEIN_CTX(ctx), data);
break;
default:
error = CRYPTO_ARGUMENTS_BAD;
}
return (error);
}
/*
* Performs a skein Final, writing the output to `digest'. This is used both
* for digest and MAC operation.
* Supported output digest formats are raw, uio and mblk.
*/
static int
skein_final_nofree(crypto_ctx_t *ctx, crypto_data_t *digest)
{
int error = CRYPTO_SUCCESS;
ASSERT(SKEIN_CTX(ctx) != NULL);
if (digest->cd_length <
CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen)) {
digest->cd_length =
CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen);
return (CRYPTO_BUFFER_TOO_SMALL);
}
switch (digest->cd_format) {
case CRYPTO_DATA_RAW:
SKEIN_OP(SKEIN_CTX(ctx), Final,
(uint8_t *)digest->cd_raw.iov_base + digest->cd_offset);
break;
case CRYPTO_DATA_UIO:
error = skein_digest_final_uio(SKEIN_CTX(ctx), digest);
break;
default:
error = CRYPTO_ARGUMENTS_BAD;
}
if (error == CRYPTO_SUCCESS)
digest->cd_length =
CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen);
else
digest->cd_length = 0;
return (error);
}
static int
skein_final(crypto_ctx_t *ctx, crypto_data_t *digest)
{
int error = skein_final_nofree(ctx, digest);
if (error == CRYPTO_BUFFER_TOO_SMALL)
return (error);
memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*(SKEIN_CTX(ctx))));
SKEIN_CTX_LVALUE(ctx) = NULL;
return (error);
}
/*
* Performs a full skein digest computation in a single call, configuring the
* algorithm according to `mechanism', reading the input to be digested from
* `data' and writing the output to `digest'.
* Supported input/output formats are raw, uio and mblk.
*/
static int
skein_digest_atomic(crypto_mechanism_t *mechanism, crypto_data_t *data,
crypto_data_t *digest)
{
int error;
skein_ctx_t skein_ctx;
crypto_ctx_t ctx;
SKEIN_CTX_LVALUE(&ctx) = &skein_ctx;
/* Init */
if (!VALID_SKEIN_DIGEST_MECH(mechanism->cm_type))
return (CRYPTO_MECHANISM_INVALID);
skein_ctx.sc_mech_type = mechanism->cm_type;
error = skein_get_digest_bitlen(mechanism, &skein_ctx.sc_digest_bitlen);
if (error != CRYPTO_SUCCESS)
goto out;
SKEIN_OP(&skein_ctx, Init, skein_ctx.sc_digest_bitlen);
if ((error = skein_update(&ctx, data)) != CRYPTO_SUCCESS)
goto out;
if ((error = skein_final_nofree(&ctx, data)) != CRYPTO_SUCCESS)
goto out;
out:
if (error == CRYPTO_SUCCESS)
digest->cd_length =
CRYPTO_BITS2BYTES(skein_ctx.sc_digest_bitlen);
else
digest->cd_length = 0;
memset(&skein_ctx, 0, sizeof (skein_ctx));
return (error);
}
/*
* Helper function that builds a Skein MAC context from the provided
* mechanism and key.
*/
static int
skein_mac_ctx_build(skein_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_key_t *key)
{
int error;
if (!VALID_SKEIN_MAC_MECH(mechanism->cm_type))
return (CRYPTO_MECHANISM_INVALID);
ctx->sc_mech_type = mechanism->cm_type;
error = skein_get_digest_bitlen(mechanism, &ctx->sc_digest_bitlen);
if (error != CRYPTO_SUCCESS)
return (error);
SKEIN_OP(ctx, InitExt, ctx->sc_digest_bitlen, 0, key->ck_data,
CRYPTO_BITS2BYTES(key->ck_length));
return (CRYPTO_SUCCESS);
}
/*
* KCF software provide mac entry points.
*/
/*
* Initializes a skein MAC context. You may pass a ctx_template, in which
* case the template will be reused to make initialization more efficient.
* Otherwise a new context will be constructed. The mechanism cm_type must
* be one of SKEIN_*_MAC_MECH_INFO_TYPE. Same as in skein_digest_init, you
* may pass a skein_param_t in cm_param to configure the length of the
* digest. The key must be in raw format.
*/
static int
skein_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_spi_ctx_template_t ctx_template)
{
int error;
SKEIN_CTX_LVALUE(ctx) = kmem_alloc(sizeof (*SKEIN_CTX(ctx)), KM_SLEEP);
if (SKEIN_CTX(ctx) == NULL)
return (CRYPTO_HOST_MEMORY);
if (ctx_template != NULL) {
memcpy(SKEIN_CTX(ctx), ctx_template,
sizeof (*SKEIN_CTX(ctx)));
} else {
error = skein_mac_ctx_build(SKEIN_CTX(ctx), mechanism, key);
if (error != CRYPTO_SUCCESS)
goto errout;
}
return (CRYPTO_SUCCESS);
errout:
memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
return (error);
}
/*
* The MAC update and final calls are reused from the regular digest code.
*/
/*
* Same as skein_digest_atomic, performs an atomic Skein MAC operation in
* one step. All the same properties apply to the arguments of this
* function as to those of the partial operations above.
*/
static int
skein_mac_atomic(crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
crypto_spi_ctx_template_t ctx_template)
{
/* faux crypto context just for skein_digest_{update,final} */
int error;
crypto_ctx_t ctx;
skein_ctx_t skein_ctx;
SKEIN_CTX_LVALUE(&ctx) = &skein_ctx;
if (ctx_template != NULL) {
memcpy(&skein_ctx, ctx_template, sizeof (skein_ctx));
} else {
error = skein_mac_ctx_build(&skein_ctx, mechanism, key);
if (error != CRYPTO_SUCCESS)
goto errout;
}
if ((error = skein_update(&ctx, data)) != CRYPTO_SUCCESS)
goto errout;
if ((error = skein_final_nofree(&ctx, mac)) != CRYPTO_SUCCESS)
goto errout;
return (CRYPTO_SUCCESS);
errout:
memset(&skein_ctx, 0, sizeof (skein_ctx));
return (error);
}
/*
* KCF software provider context management entry points.
*/
/*
* Constructs a context template for the Skein MAC algorithm. The same
* properties apply to the arguments of this function as to those of
* skein_mac_init.
*/
static int
skein_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key,
crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size)
{
int error;
skein_ctx_t *ctx_tmpl;
ctx_tmpl = kmem_alloc(sizeof (*ctx_tmpl), KM_SLEEP);
if (ctx_tmpl == NULL)
return (CRYPTO_HOST_MEMORY);
error = skein_mac_ctx_build(ctx_tmpl, mechanism, key);
if (error != CRYPTO_SUCCESS)
goto errout;
*ctx_template = ctx_tmpl;
*ctx_template_size = sizeof (*ctx_tmpl);
return (CRYPTO_SUCCESS);
errout:
memset(ctx_tmpl, 0, sizeof (*ctx_tmpl));
kmem_free(ctx_tmpl, sizeof (*ctx_tmpl));
return (error);
}
/*
* Frees a skein context in a parent crypto context.
*/
static int
skein_free_context(crypto_ctx_t *ctx)
{
if (SKEIN_CTX(ctx) != NULL) {
memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
SKEIN_CTX_LVALUE(ctx) = NULL;
}
return (CRYPTO_SUCCESS);
}

View file

@ -45,6 +45,16 @@
#include <sys/vnode.h>
#include <sys/zfs_znode.h>
static void
zfs_freeuio(struct uio *uio)
{
#if __FreeBSD_version > 1500013
freeuio(uio);
#else
free(uio, M_IOV);
#endif
}
int
zfs_uiomove(void *cp, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio)
{
@ -77,7 +87,7 @@ zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, size_t *cbytes)
error = vn_io_fault_uiomove(p, n, uio_clone);
*cbytes = zfs_uio_resid(uio) - uio_clone->uio_resid;
if (uio_clone != &small_uio_clone)
free(uio_clone, M_IOV);
zfs_freeuio(uio_clone);
return (error);
}

View file

@ -91,15 +91,15 @@ arc_available_memory(void)
}
#if !defined(UMA_MD_SMALL_ALLOC) && !defined(UMA_USE_DMAP)
/*
* If we're on an i386 platform, it's possible that we'll exhaust the
* kernel heap space before we ever run out of available physical
* memory. Most checks of the size of the heap_area compare against
* tune.t_minarmem, which is the minimum available real memory that we
* can have in the system. However, this is generally fixed at 25 pages
* which is so low that it's useless. In this comparison, we seek to
* calculate the total heap-size, and reclaim if more than 3/4ths of the
* heap is allocated. (Or, in the calculation, if less than 1/4th is
* free)
* If we're on a platform without a direct map, it's possible that we'll
* exhaust the kernel heap space before we ever run out of available
* physical memory. Most checks of the size of the heap_area compare
* against tune.t_minarmem, which is the minimum available real memory
* that we can have in the system. However, this is generally fixed at
* 25 pages which is so low that it's useless. In this comparison, we
* seek to calculate the total heap-size, and reclaim if more than
* 3/4ths of the heap is allocated. (Or, in the calculation, if less
* than 1/4th is free)
*/
n = uma_avail() - (long)(uma_limit() / 4);
if (n < lowest) {

View file

@ -111,6 +111,7 @@ static int zfs__fini(void);
static void zfs_shutdown(void *, int);
static eventhandler_tag zfs_shutdown_event_tag;
static eventhandler_tag zfs_mountroot_event_tag;
#define ZFS_MIN_KSTACK_PAGES 4
@ -305,16 +306,25 @@ zfs_modevent(module_t mod, int type, void *unused __unused)
switch (type) {
case MOD_LOAD:
err = zfs__init();
if (err == 0)
if (err == 0) {
zfs_shutdown_event_tag = EVENTHANDLER_REGISTER(
shutdown_post_sync, zfs_shutdown, NULL,
SHUTDOWN_PRI_FIRST);
zfs_mountroot_event_tag = EVENTHANDLER_REGISTER(
mountroot, spa_boot_init, NULL,
SI_ORDER_ANY);
}
return (err);
case MOD_UNLOAD:
err = zfs__fini();
if (err == 0 && zfs_shutdown_event_tag != NULL)
EVENTHANDLER_DEREGISTER(shutdown_post_sync,
zfs_shutdown_event_tag);
if (err == 0) {
if (zfs_shutdown_event_tag != NULL)
EVENTHANDLER_DEREGISTER(shutdown_post_sync,
zfs_shutdown_event_tag);
if (zfs_mountroot_event_tag != NULL)
EVENTHANDLER_DEREGISTER(mountroot,
zfs_mountroot_event_tag);
}
return (err);
case MOD_SHUTDOWN:
return (0);
@ -330,9 +340,6 @@ static moduledata_t zfs_mod = {
0
};
#ifdef _KERNEL
EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0);
#endif
FEATURE(zfs, "OpenZFS support");

View file

@ -292,6 +292,7 @@ zvol_geom_open(struct g_provider *pp, int flag, int count)
if (!mutex_tryenter(&spa_namespace_lock)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
kern_yield(PRI_USER);
goto retry;
} else {
@ -983,6 +984,7 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
if (!mutex_tryenter(&spa_namespace_lock)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
kern_yield(PRI_USER);
goto retry;
} else {

View file

@ -21,6 +21,8 @@
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#define SPL_KMEM_CACHE_IMPLEMENTING
#include <linux/percpu_compat.h>
#include <sys/kmem.h>
#include <sys/kmem_cache.h>
@ -33,16 +35,6 @@
#include <linux/swap.h>
#include <linux/prefetch.h>
/*
* Within the scope of spl-kmem.c file the kmem_cache_* definitions
* are removed to allow access to the real Linux slab allocator.
*/
#undef kmem_cache_destroy
#undef kmem_cache_create
#undef kmem_cache_alloc
#undef kmem_cache_free
/*
* Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
* with smp_mb__{before,after}_atomic() because they were redundant. This is

View file

@ -526,8 +526,8 @@ abd_alloc_zero_scatter(void)
#define PAGE_SHIFT (highbit64(PAGESIZE)-1)
#endif
#define zfs_kmap_atomic(chunk) ((void *)chunk)
#define zfs_kunmap_atomic(addr) do { (void)(addr); } while (0)
#define zfs_kmap_local(chunk) ((void *)chunk)
#define zfs_kunmap_local(addr) do { (void)(addr); } while (0)
#define local_irq_save(flags) do { (void)(flags); } while (0)
#define local_irq_restore(flags) do { (void)(flags); } while (0)
#define nth_page(pg, i) \
@ -980,7 +980,7 @@ abd_iter_map(struct abd_iter *aiter)
aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset,
aiter->iter_abd->abd_size - aiter->iter_pos);
paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg));
paddr = zfs_kmap_local(sg_page(aiter->iter_sg));
}
aiter->iter_mapaddr = (char *)paddr + offset;
@ -999,7 +999,7 @@ abd_iter_unmap(struct abd_iter *aiter)
if (!abd_is_linear(aiter->iter_abd)) {
/* LINTED E_FUNC_SET_NOT_USED */
zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset);
zfs_kunmap_local(aiter->iter_mapaddr - aiter->iter_offset);
}
ASSERT3P(aiter->iter_mapaddr, !=, NULL);

View file

@ -150,7 +150,11 @@ vdev_bdev_mode(spa_mode_t smode)
static uint64_t
bdev_capacity(struct block_device *bdev)
{
#ifdef HAVE_BDEV_NR_BYTES
return (bdev_nr_bytes(bdev));
#else
return (i_size_read(bdev->bd_inode));
#endif
}
#if !defined(HAVE_BDEV_WHOLE)
@ -209,7 +213,7 @@ bdev_max_capacity(struct block_device *bdev, uint64_t wholedisk)
* "reserved" EFI partition: in such cases return the device
* usable capacity.
*/
available = i_size_read(bdev_whole(bdev)->bd_inode) -
available = bdev_capacity(bdev_whole(bdev)) -
((EFI_MIN_RESV_SIZE + NEW_START_BLOCK +
PARTITION_END_ALIGNMENT) << SECTOR_BITS);
psize = MAX(available, bdev_capacity(bdev));
@ -925,12 +929,12 @@ vdev_disk_io_rw(zio_t *zio)
/*
* Accessing outside the block device is never allowed.
*/
if (zio->io_offset + zio->io_size > bdev->bd_inode->i_size) {
if (zio->io_offset + zio->io_size > bdev_capacity(bdev)) {
vdev_dbgmsg(zio->io_vd,
"Illegal access %llu size %llu, device size %llu",
(u_longlong_t)zio->io_offset,
(u_longlong_t)zio->io_size,
(u_longlong_t)i_size_read(bdev->bd_inode));
(u_longlong_t)bdev_capacity(bdev));
return (SET_ERROR(EIO));
}
@ -1123,12 +1127,12 @@ vdev_classic_physio(zio_t *zio)
/*
* Accessing outside the block device is never allowed.
*/
if (io_offset + io_size > bdev->bd_inode->i_size) {
if (io_offset + io_size > bdev_capacity(bdev)) {
vdev_dbgmsg(zio->io_vd,
"Illegal access %llu size %llu, device size %llu",
(u_longlong_t)io_offset,
(u_longlong_t)io_size,
(u_longlong_t)i_size_read(bdev->bd_inode));
(u_longlong_t)bdev_capacity(bdev));
return (SET_ERROR(EIO));
}

View file

@ -136,7 +136,7 @@ zfs_uiomove_bvec_impl(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
void *paddr;
cnt = MIN(bv->bv_len - skip, n);
paddr = zfs_kmap_atomic(bv->bv_page);
paddr = zfs_kmap_local(bv->bv_page);
if (rw == UIO_READ) {
/* Copy from buffer 'p' to the bvec data */
memcpy(paddr + bv->bv_offset + skip, p, cnt);
@ -144,7 +144,7 @@ zfs_uiomove_bvec_impl(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
/* Copy from bvec data to buffer 'p' */
memcpy(p, paddr + bv->bv_offset + skip, cnt);
}
zfs_kunmap_atomic(paddr);
zfs_kunmap_local(paddr);
skip += cnt;
if (skip == bv->bv_len) {
@ -168,7 +168,7 @@ zfs_copy_bvec(void *p, size_t skip, size_t cnt, zfs_uio_rw_t rw,
{
void *paddr;
paddr = zfs_kmap_atomic(bv->bv_page);
paddr = zfs_kmap_local(bv->bv_page);
if (rw == UIO_READ) {
/* Copy from buffer 'p' to the bvec data */
memcpy(paddr + bv->bv_offset + skip, p, cnt);
@ -176,7 +176,7 @@ zfs_copy_bvec(void *p, size_t skip, size_t cnt, zfs_uio_rw_t rw,
/* Copy from bvec data to buffer 'p' */
memcpy(p, paddr + bv->bv_offset + skip, cnt);
}
zfs_kunmap_atomic(paddr);
zfs_kunmap_local(paddr);
}
/*

View file

@ -41,6 +41,7 @@
#include <linux/blkdev_compat.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/workqueue.h>
#ifdef HAVE_BLK_MQ
#include <linux/blk-mq.h>
@ -790,6 +791,7 @@ zvol_open(struct block_device *bdev, fmode_t flag)
if (!mutex_tryenter(&spa_namespace_lock)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
#ifdef HAVE_BLKDEV_GET_ERESTARTSYS
schedule();
@ -1074,8 +1076,106 @@ static const struct block_device_operations zvol_ops = {
#endif
};
typedef struct zvol_queue_limits {
unsigned int zql_max_hw_sectors;
unsigned short zql_max_segments;
unsigned int zql_max_segment_size;
unsigned int zql_io_opt;
} zvol_queue_limits_t;
static void
zvol_queue_limits_init(zvol_queue_limits_t *limits, zvol_state_t *zv,
boolean_t use_blk_mq)
{
limits->zql_max_hw_sectors = (DMU_MAX_ACCESS / 4) >> 9;
if (use_blk_mq) {
/*
* IO requests can be really big (1MB). When an IO request
* comes in, it is passed off to zvol_read() or zvol_write()
* in a new thread, where it is chunked up into 'volblocksize'
* sized pieces and processed. So for example, if the request
* is a 1MB write and your volblocksize is 128k, one zvol_write
* thread will take that request and sequentially do ten 128k
* IOs. This is due to the fact that the thread needs to lock
* each volblocksize sized block. So you might be wondering:
* "instead of passing the whole 1MB request to one thread,
* why not pass ten individual 128k chunks to ten threads and
* process the whole write in parallel?" The short answer is
* that there's a sweet spot number of chunks that balances
* the greater parallelism with the added overhead of more
* threads. The sweet spot can be different depending on if you
* have a read or write heavy workload. Writes typically want
* high chunk counts while reads typically want lower ones. On
* a test pool with 6 NVMe drives in a 3x 2-disk mirror
* configuration, with volblocksize=8k, the sweet spot for good
* sequential reads and writes was at 8 chunks.
*/
/*
* Below we tell the kernel how big we want our requests
* to be. You would think that blk_queue_io_opt() would be
* used to do this since it is used to "set optimal request
* size for the queue", but that doesn't seem to do
* anything - the kernel still gives you huge requests
* with tons of little PAGE_SIZE segments contained within it.
*
* Knowing that the kernel will just give you PAGE_SIZE segments
* no matter what, you can say "ok, I want PAGE_SIZE byte
* segments, and I want 'N' of them per request", where N is
* the correct number of segments for the volblocksize and
* number of chunks you want.
*/
#ifdef HAVE_BLK_MQ
if (zvol_blk_mq_blocks_per_thread != 0) {
unsigned int chunks;
chunks = MIN(zvol_blk_mq_blocks_per_thread, UINT16_MAX);
limits->zql_max_segment_size = PAGE_SIZE;
limits->zql_max_segments =
(zv->zv_volblocksize * chunks) / PAGE_SIZE;
} else {
/*
* Special case: zvol_blk_mq_blocks_per_thread = 0
* Max everything out.
*/
limits->zql_max_segments = UINT16_MAX;
limits->zql_max_segment_size = UINT_MAX;
}
} else {
#endif
limits->zql_max_segments = UINT16_MAX;
limits->zql_max_segment_size = UINT_MAX;
}
limits->zql_io_opt = zv->zv_volblocksize;
}
#ifdef HAVE_BLK_ALLOC_DISK_2ARG
static void
zvol_queue_limits_convert(zvol_queue_limits_t *limits,
struct queue_limits *qlimits)
{
memset(qlimits, 0, sizeof (struct queue_limits));
qlimits->max_hw_sectors = limits->zql_max_hw_sectors;
qlimits->max_segments = limits->zql_max_segments;
qlimits->max_segment_size = limits->zql_max_segment_size;
qlimits->io_opt = limits->zql_io_opt;
}
#else
static void
zvol_queue_limits_apply(zvol_queue_limits_t *limits,
struct request_queue *queue)
{
blk_queue_max_hw_sectors(queue, limits->zql_max_hw_sectors);
blk_queue_max_segments(queue, limits->zql_max_segments);
blk_queue_max_segment_size(queue, limits->zql_max_segment_size);
blk_queue_io_opt(queue, limits->zql_io_opt);
}
#endif
static int
zvol_alloc_non_blk_mq(struct zvol_state_os *zso)
zvol_alloc_non_blk_mq(struct zvol_state_os *zso, zvol_queue_limits_t *limits)
{
#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS)
#if defined(HAVE_BLK_ALLOC_DISK)
@ -1085,8 +1185,11 @@ zvol_alloc_non_blk_mq(struct zvol_state_os *zso)
zso->zvo_disk->minors = ZVOL_MINORS;
zso->zvo_queue = zso->zvo_disk->queue;
zvol_queue_limits_apply(limits, zso->zvo_queue);
#elif defined(HAVE_BLK_ALLOC_DISK_2ARG)
struct gendisk *disk = blk_alloc_disk(NULL, NUMA_NO_NODE);
struct queue_limits qlimits;
zvol_queue_limits_convert(limits, &qlimits);
struct gendisk *disk = blk_alloc_disk(&qlimits, NUMA_NO_NODE);
if (IS_ERR(disk)) {
zso->zvo_disk = NULL;
return (1);
@ -1107,6 +1210,7 @@ zvol_alloc_non_blk_mq(struct zvol_state_os *zso)
}
zso->zvo_disk->queue = zso->zvo_queue;
zvol_queue_limits_apply(limits, zso->zvo_queue);
#endif /* HAVE_BLK_ALLOC_DISK */
#else
zso->zvo_queue = blk_generic_alloc_queue(zvol_request, NUMA_NO_NODE);
@ -1120,13 +1224,14 @@ zvol_alloc_non_blk_mq(struct zvol_state_os *zso)
}
zso->zvo_disk->queue = zso->zvo_queue;
zvol_queue_limits_apply(limits, zso->zvo_queue);
#endif /* HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
return (0);
}
static int
zvol_alloc_blk_mq(zvol_state_t *zv)
zvol_alloc_blk_mq(zvol_state_t *zv, zvol_queue_limits_t *limits)
{
#ifdef HAVE_BLK_MQ
struct zvol_state_os *zso = zv->zv_zso;
@ -1142,9 +1247,12 @@ zvol_alloc_blk_mq(zvol_state_t *zv)
return (1);
}
zso->zvo_queue = zso->zvo_disk->queue;
zvol_queue_limits_apply(limits, zso->zvo_queue);
zso->zvo_disk->minors = ZVOL_MINORS;
#elif defined(HAVE_BLK_ALLOC_DISK_2ARG)
struct gendisk *disk = blk_mq_alloc_disk(&zso->tag_set, NULL, zv);
struct queue_limits qlimits;
zvol_queue_limits_convert(limits, &qlimits);
struct gendisk *disk = blk_mq_alloc_disk(&zso->tag_set, &qlimits, zv);
if (IS_ERR(disk)) {
zso->zvo_disk = NULL;
blk_mq_free_tag_set(&zso->tag_set);
@ -1170,6 +1278,7 @@ zvol_alloc_blk_mq(zvol_state_t *zv)
/* Our queue is now created, assign it to our disk */
zso->zvo_disk->queue = zso->zvo_queue;
zvol_queue_limits_apply(limits, zso->zvo_queue);
#endif
#endif
@ -1209,6 +1318,9 @@ zvol_alloc(dev_t dev, const char *name)
zv->zv_zso->use_blk_mq = zvol_use_blk_mq;
#endif
zvol_queue_limits_t limits;
zvol_queue_limits_init(&limits, zv, zv->zv_zso->use_blk_mq);
/*
* The block layer has 3 interfaces for getting BIOs:
*
@ -1225,10 +1337,10 @@ zvol_alloc(dev_t dev, const char *name)
* disk and the queue separately. (5.13 kernel or older)
*/
if (zv->zv_zso->use_blk_mq) {
ret = zvol_alloc_blk_mq(zv);
ret = zvol_alloc_blk_mq(zv, &limits);
zso->zvo_disk->fops = &zvol_ops_blk_mq;
} else {
ret = zvol_alloc_non_blk_mq(zso);
ret = zvol_alloc_non_blk_mq(zso, &limits);
zso->zvo_disk->fops = &zvol_ops;
}
if (ret != 0)
@ -1338,6 +1450,101 @@ zvol_wait_close(zvol_state_t *zv)
{
}
struct add_disk_work {
struct delayed_work work;
struct gendisk *disk;
int error;
};
static int
__zvol_os_add_disk(struct gendisk *disk)
{
int error = 0;
#ifdef HAVE_ADD_DISK_RET
error = add_disk(disk);
#else
add_disk(disk);
#endif
return (error);
}
#if defined(HAVE_BDEV_FILE_OPEN_BY_PATH)
static void
zvol_os_add_disk_work(struct work_struct *work)
{
struct add_disk_work *add_disk_work;
add_disk_work = container_of(work, struct add_disk_work, work.work);
add_disk_work->error = __zvol_os_add_disk(add_disk_work->disk);
}
#endif
/*
* SPECIAL CASE:
*
* This function basically calls add_disk() from a workqueue. You may be
* thinking: why not just call add_disk() directly?
*
* When you call add_disk(), the zvol appears to the world. When this happens,
* the kernel calls disk_scan_partitions() on the zvol, which behaves
* differently on the 6.9+ kernels:
*
* - 6.8 and older kernels -
* disk_scan_partitions()
* handle = bdev_open_by_dev(
* zvol_open()
* bdev_release(handle);
* zvol_release()
*
*
* - 6.9+ kernels -
* disk_scan_partitions()
* file = bdev_file_open_by_dev()
* zvol_open()
* fput(file)
* < wait for return to userspace >
* zvol_release()
*
* The difference is that the bdev_release() from the 6.8 kernel is synchronous
* while the fput() from the 6.9 kernel is async. Or more specifically it's
* async that has to wait until we return to userspace (since it adds the fput
* into the caller's work queue with the TWA_RESUME flag set). This is not the
* behavior we want, since we want do things like create+destroy a zvol within
* a single ZFS_IOC_CREATE ioctl, and the "create" part needs to release the
* reference to the zvol while we're in the IOCTL, which can't wait until we
* return to userspace.
*
* We can get around this since fput() has a special codepath for when it's
* running in a kernel thread or interrupt. In those cases, it just puts the
* fput into the system workqueue, which we can force to run with
* __flush_workqueue(). That is why we call add_disk() from a workqueue - so it
* run from a kernel thread and "tricks" the fput() codepaths.
*
* Note that __flush_workqueue() is slowly getting deprecated. This may be ok
* though, since our IOCTL will spin on EBUSY waiting for the zvol release (via
* fput) to happen, which it eventually, naturally, will from the system_wq
* without us explicitly calling __flush_workqueue().
*/
static int
zvol_os_add_disk(struct gendisk *disk)
{
#if defined(HAVE_BDEV_FILE_OPEN_BY_PATH) /* 6.9+ kernel */
struct add_disk_work add_disk_work;
INIT_DELAYED_WORK(&add_disk_work.work, zvol_os_add_disk_work);
add_disk_work.disk = disk;
add_disk_work.error = 0;
/* Use *_delayed_work functions since they're not GPL'd */
schedule_delayed_work(&add_disk_work.work, 0);
flush_delayed_work(&add_disk_work.work);
__flush_workqueue(system_wq);
return (add_disk_work.error);
#else /* <= 6.8 kernel */
return (__zvol_os_add_disk(disk));
#endif
}
/*
* Create a block device minor node and setup the linkage between it
* and the specified volume. Once this function returns the block
@ -1417,74 +1624,10 @@ zvol_os_create_minor(const char *name)
set_capacity(zv->zv_zso->zvo_disk, zv->zv_volsize >> 9);
blk_queue_max_hw_sectors(zv->zv_zso->zvo_queue,
(DMU_MAX_ACCESS / 4) >> 9);
if (zv->zv_zso->use_blk_mq) {
/*
* IO requests can be really big (1MB). When an IO request
* comes in, it is passed off to zvol_read() or zvol_write()
* in a new thread, where it is chunked up into 'volblocksize'
* sized pieces and processed. So for example, if the request
* is a 1MB write and your volblocksize is 128k, one zvol_write
* thread will take that request and sequentially do ten 128k
* IOs. This is due to the fact that the thread needs to lock
* each volblocksize sized block. So you might be wondering:
* "instead of passing the whole 1MB request to one thread,
* why not pass ten individual 128k chunks to ten threads and
* process the whole write in parallel?" The short answer is
* that there's a sweet spot number of chunks that balances
* the greater parallelism with the added overhead of more
* threads. The sweet spot can be different depending on if you
* have a read or write heavy workload. Writes typically want
* high chunk counts while reads typically want lower ones. On
* a test pool with 6 NVMe drives in a 3x 2-disk mirror
* configuration, with volblocksize=8k, the sweet spot for good
* sequential reads and writes was at 8 chunks.
*/
/*
* Below we tell the kernel how big we want our requests
* to be. You would think that blk_queue_io_opt() would be
* used to do this since it is used to "set optimal request
* size for the queue", but that doesn't seem to do
* anything - the kernel still gives you huge requests
* with tons of little PAGE_SIZE segments contained within it.
*
* Knowing that the kernel will just give you PAGE_SIZE segments
* no matter what, you can say "ok, I want PAGE_SIZE byte
* segments, and I want 'N' of them per request", where N is
* the correct number of segments for the volblocksize and
* number of chunks you want.
*/
#ifdef HAVE_BLK_MQ
if (zvol_blk_mq_blocks_per_thread != 0) {
unsigned int chunks;
chunks = MIN(zvol_blk_mq_blocks_per_thread, UINT16_MAX);
blk_queue_max_segment_size(zv->zv_zso->zvo_queue,
PAGE_SIZE);
blk_queue_max_segments(zv->zv_zso->zvo_queue,
(zv->zv_volblocksize * chunks) / PAGE_SIZE);
} else {
/*
* Special case: zvol_blk_mq_blocks_per_thread = 0
* Max everything out.
*/
blk_queue_max_segments(zv->zv_zso->zvo_queue,
UINT16_MAX);
blk_queue_max_segment_size(zv->zv_zso->zvo_queue,
UINT_MAX);
}
#endif
} else {
blk_queue_max_segments(zv->zv_zso->zvo_queue, UINT16_MAX);
blk_queue_max_segment_size(zv->zv_zso->zvo_queue, UINT_MAX);
}
blk_queue_physical_block_size(zv->zv_zso->zvo_queue,
zv->zv_volblocksize);
blk_queue_io_opt(zv->zv_zso->zvo_queue, zv->zv_volblocksize);
blk_queue_max_discard_sectors(zv->zv_zso->zvo_queue,
(zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
blk_queue_discard_granularity(zv->zv_zso->zvo_queue,
@ -1549,11 +1692,7 @@ zvol_os_create_minor(const char *name)
rw_enter(&zvol_state_lock, RW_WRITER);
zvol_insert(zv);
rw_exit(&zvol_state_lock);
#ifdef HAVE_ADD_DISK_RET
error = add_disk(zv->zv_zso->zvo_disk);
#else
add_disk(zv->zv_zso->zvo_disk);
#endif
error = zvol_os_add_disk(zv->zv_zso->zvo_disk);
} else {
ida_simple_remove(&zvol_ida, idx);
}

View file

@ -3712,16 +3712,19 @@ dsl_dataset_promote_sync(void *arg, dmu_tx_t *tx)
spa_history_log_internal_ds(hds, "promote", tx, " ");
dsl_dir_rele(odd, FTAG);
promote_rele(ddpa, FTAG);
/*
* Transfer common error blocks from old head to new head.
* Transfer common error blocks from old head to new head, before
* calling promote_rele() on ddpa since we need to dereference
* origin_head and hds.
*/
if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_HEAD_ERRLOG)) {
uint64_t old_head = origin_head->ds_object;
uint64_t new_head = hds->ds_object;
spa_swap_errlog(dp->dp_spa, new_head, old_head, tx);
}
promote_rele(ddpa, FTAG);
}
/*

View file

@ -2021,6 +2021,7 @@ vdev_open(vdev_t *vd)
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_fault_wanted = B_FALSE;
vd->vdev_min_asize = vdev_get_min_asize(vd);
/*

View file

@ -26,6 +26,7 @@ PACKAGE_VERSION="${pkgver}"
PACKAGE_CONFIG="${pkgcfg}"
NO_WEAK_MODULES="yes"
PRE_BUILD="configure
--disable-dependency-tracking
--prefix=/usr
--with-config=kernel
--with-linux=\$(

View file

@ -121,7 +121,7 @@ tests = ['auto_offline_001_pos', 'auto_online_001_pos', 'auto_online_002_pos',
'auto_replace_001_pos', 'auto_replace_002_pos', 'auto_spare_001_pos',
'auto_spare_002_pos', 'auto_spare_multiple', 'auto_spare_ashift',
'auto_spare_shared', 'decrypt_fault', 'decompress_fault',
'scrub_after_resilver', 'zpool_status_-s']
'scrub_after_resilver', 'suspend_resume_single', 'zpool_status_-s']
tags = ['functional', 'fault']
[tests/functional/features/large_dnode:Linux]

View file

@ -379,6 +379,7 @@ if os.environ.get('CI') == 'true':
'fault/auto_replace_002_pos': ['SKIP', ci_reason],
'fault/auto_spare_ashift': ['SKIP', ci_reason],
'fault/auto_spare_shared': ['SKIP', ci_reason],
'fault/suspend_resume_single': ['SKIP', ci_reason],
'procfs/pool_state': ['SKIP', ci_reason],
})

View file

@ -72,31 +72,6 @@ static const uint8_t sha256_test_digests[][32] = {
/* no test vector for test_msg2 */
};
static const uint8_t sha384_test_digests[][48] = {
{
/* for test_msg0 */
0xCB, 0x00, 0x75, 0x3F, 0x45, 0xA3, 0x5E, 0x8B,
0xB5, 0xA0, 0x3D, 0x69, 0x9A, 0xC6, 0x50, 0x07,
0x27, 0x2C, 0x32, 0xAB, 0x0E, 0xDE, 0xD1, 0x63,
0x1A, 0x8B, 0x60, 0x5A, 0x43, 0xFF, 0x5B, 0xED,
0x80, 0x86, 0x07, 0x2B, 0xA1, 0xE7, 0xCC, 0x23,
0x58, 0xBA, 0xEC, 0xA1, 0x34, 0xC8, 0x25, 0xA7
},
{
/* no test vector for test_msg1 */
0
},
{
/* for test_msg2 */
0x09, 0x33, 0x0C, 0x33, 0xF7, 0x11, 0x47, 0xE8,
0x3D, 0x19, 0x2F, 0xC7, 0x82, 0xCD, 0x1B, 0x47,
0x53, 0x11, 0x1B, 0x17, 0x3B, 0x3B, 0x05, 0xD2,
0x2F, 0xA0, 0x80, 0x86, 0xE3, 0xB0, 0xF7, 0x12,
0xFC, 0xC7, 0xC7, 0x1A, 0x55, 0x7E, 0x2D, 0xB9,
0x66, 0xC3, 0xE9, 0xFA, 0x91, 0x74, 0x60, 0x39
}
};
static const uint8_t sha512_test_digests[][64] = {
{
/* for test_msg0 */
@ -126,27 +101,6 @@ static const uint8_t sha512_test_digests[][64] = {
}
};
static const uint8_t sha512_224_test_digests[][28] = {
{
/* for test_msg0 */
0x46, 0x34, 0x27, 0x0F, 0x70, 0x7B, 0x6A, 0x54,
0xDA, 0xAE, 0x75, 0x30, 0x46, 0x08, 0x42, 0xE2,
0x0E, 0x37, 0xED, 0x26, 0x5C, 0xEE, 0xE9, 0xA4,
0x3E, 0x89, 0x24, 0xAA
},
{
/* no test vector for test_msg1 */
0
},
{
/* for test_msg2 */
0x23, 0xFE, 0xC5, 0xBB, 0x94, 0xD6, 0x0B, 0x23,
0x30, 0x81, 0x92, 0x64, 0x0B, 0x0C, 0x45, 0x33,
0x35, 0xD6, 0x64, 0x73, 0x4F, 0xE4, 0x0E, 0x72,
0x68, 0x67, 0x4A, 0xF9
}
};
static const uint8_t sha512_256_test_digests[][32] = {
{
/* for test_msg0 */
@ -191,7 +145,7 @@ main(int argc, char *argv[])
do { \
SHA2_CTX ctx; \
uint8_t digest[diglen / 8]; \
SHA2Init(SHA ## mode ## _MECH_INFO_TYPE, &ctx); \
SHA2Init(mode, &ctx); \
SHA2Update(&ctx, _m, strlen(_m)); \
SHA2Final(digest, &ctx); \
(void) printf("SHA%-9sMessage: " #_m \
@ -215,7 +169,7 @@ main(int argc, char *argv[])
struct timeval start, end; \
memset(block, 0, sizeof (block)); \
(void) gettimeofday(&start, NULL); \
SHA2Init(SHA ## mode ## _MECH_INFO_TYPE, &ctx); \
SHA2Init(mode, &ctx); \
for (i = 0; i < 8192; i++) \
SHA2Update(&ctx, block, sizeof (block)); \
SHA2Final(digest, &ctx); \
@ -231,16 +185,12 @@ main(int argc, char *argv[])
} while (0)
(void) printf("Running algorithm correctness tests:\n");
SHA2_ALGO_TEST(test_msg0, 256, 256, sha256_test_digests[0]);
SHA2_ALGO_TEST(test_msg1, 256, 256, sha256_test_digests[1]);
SHA2_ALGO_TEST(test_msg0, 384, 384, sha384_test_digests[0]);
SHA2_ALGO_TEST(test_msg2, 384, 384, sha384_test_digests[2]);
SHA2_ALGO_TEST(test_msg0, 512, 512, sha512_test_digests[0]);
SHA2_ALGO_TEST(test_msg2, 512, 512, sha512_test_digests[2]);
SHA2_ALGO_TEST(test_msg0, 512_224, 224, sha512_224_test_digests[0]);
SHA2_ALGO_TEST(test_msg2, 512_224, 224, sha512_224_test_digests[2]);
SHA2_ALGO_TEST(test_msg0, 512_256, 256, sha512_256_test_digests[0]);
SHA2_ALGO_TEST(test_msg2, 512_256, 256, sha512_256_test_digests[2]);
SHA2_ALGO_TEST(test_msg0, SHA256, 256, sha256_test_digests[0]);
SHA2_ALGO_TEST(test_msg1, SHA256, 256, sha256_test_digests[1]);
SHA2_ALGO_TEST(test_msg0, SHA512, 512, sha512_test_digests[0]);
SHA2_ALGO_TEST(test_msg2, SHA512, 512, sha512_test_digests[2]);
SHA2_ALGO_TEST(test_msg0, SHA512_256, 256, sha512_256_test_digests[0]);
SHA2_ALGO_TEST(test_msg2, SHA512_256, 256, sha512_256_test_digests[2]);
if (failed)
return (1);
@ -251,13 +201,13 @@ main(int argc, char *argv[])
for (id = 0; id < sha256->getcnt(); id++) {
sha256->setid(id);
const char *name = sha256->getname();
SHA2_PERF_TEST(256, 256, name);
SHA2_PERF_TEST(SHA256, 256, name);
}
for (id = 0; id < sha512->getcnt(); id++) {
sha512->setid(id);
const char *name = sha512->getname();
SHA2_PERF_TEST(512, 512, name);
SHA2_PERF_TEST(SHA512, 512, name);
}
return (0);

View file

@ -62,11 +62,39 @@ function compare_version_gte
}
# Helper function used by linux_version() and freebsd_version()
# $1, if provided, should be a MAJOR, MAJOR.MINOR or MAJOR.MINOR.PATCH
# version number
function kernel_version
{
typeset ver="$1"
[ -z "$ver" ] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
[ -z "$ver" ] && case "$UNAME" in
Linux)
# Linux version numbers are X.Y.Z followed by optional
# vendor/distro specific stuff
# RHEL7: 3.10.0-1160.108.1.el7.x86_64
# Fedora 37: 6.5.12-100.fc37.x86_64
# Debian 12.6: 6.1.0-22-amd64
ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
;;
FreeBSD)
# FreeBSD version numbers are X.Y-BRANCH-pZ. Depending on
# branch, -pZ may not be present, but this is typically only
# on pre-release or true .0 releases, so can be assumed 0
# if not present.
# eg:
# 13.2-RELEASE-p4
# 14.1-RELEASE
# 15.0-CURRENT
ver=$(uname -r | \
grep -Eo "[0-9]+\.[0-9]+(-[A-Z0-9]+-p[0-9]+)?" | \
sed -E "s/-[^-]+-p/./")
;;
*)
# Unknown system
log_fail "Don't know how to get kernel version for '$UNAME'"
;;
esac
typeset version major minor _
IFS='.' read -r version major minor _ <<<"$ver"

View file

@ -1483,6 +1483,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/fault/decompress_fault.ksh \
functional/fault/decrypt_fault.ksh \
functional/fault/scrub_after_resilver.ksh \
functional/fault/suspend_resume_single.ksh \
functional/fault/setup.ksh \
functional/fault/zpool_status_-s.ksh \
functional/features/async_destroy/async_destroy_001_pos.ksh \

View file

@ -0,0 +1,102 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2024, Klara Inc.
#
. $STF_SUITE/include/libtest.shlib
set -x
DATAFILE="$TMPDIR/datafile"
function cleanup
{
destroy_pool $TESTPOOL
unload_scsi_debug
rm -f $DATA_FILE
}
log_onexit cleanup
log_assert "ensure single-disk pool resumes properly after suspend and clear"
# create a file, and take a checksum, so we can compare later
log_must dd if=/dev/random of=$DATAFILE bs=128K count=1
typeset sum1=$(cat $DATAFILE | md5sum)
# make a debug device that we can "unplug"
load_scsi_debug 100 1 1 1 '512b'
sd=$(get_debug_device)
# create a single-device pool
log_must zpool create $TESTPOOL $sd
log_must zpool sync
# "pull" the disk
log_must eval "echo offline > /sys/block/$sd/device/state"
# copy data onto the pool. it'll appear to succeed, but only be in memory
log_must cp $DATAFILE /$TESTPOOL/file
# wait until sync starts, and the pool suspends
log_note "waiting for pool to suspend"
typeset -i tries=10
until [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) == "SUSPENDED" ]] ; do
if ((tries-- == 0)); then
log_fail "pool didn't suspend"
fi
sleep 1
done
# return the disk
log_must eval "echo running > /sys/block/$sd/device/state"
# clear the error states, which should reopen the vdev, get the pool back
# online, and replay the failed IO
log_must zpool clear $TESTPOOL
# wait a while for everything to sync out. if something is going to go wrong,
# this is where it will happen
log_note "giving pool time to settle and complete txg"
sleep 7
# if the pool suspended, then everything is bad
if [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) == "SUSPENDED" ]] ; then
log_fail "pool suspended"
fi
# export the pool, to make sure it exports clean, and also to clear the file
# out of the cache
log_must zpool export $TESTPOOL
# import the pool
log_must zpool import $TESTPOOL
# sum the file we wrote earlier
typeset sum2=$(cat /$TESTPOOL/file | md5sum)
# make sure the checksums match
log_must test "$sum1" = "$sum2"
log_pass "single-disk pool resumes properly after disk suspend and clear"

View file

@ -96,7 +96,7 @@ log_must zfs destroy -R $clone2
log_must eval "zfs send -i $sendfs#book2 --redact book3 $sendfs@snap2 >$stream"
log_must eval "zfs recv $recvfs <$stream"
log_must mount_redacted -f $recvfs
log_must diff <(ls $send_mnt) <(ls $recv_mnt)
log_must [ "$(ls $send_mnt)" == "$(ls $recv_mnt)" ]
log_must zfs destroy -R $recvfs
log_must zfs rollback -R $sendfs@snap

View file

@ -71,8 +71,7 @@ log_must ismounted $recvfs
# deleted.
contents=$(log_must find $recv_mnt)
contents_orig=$(log_must find $send_mnt)
log_must diff <(echo ${contents//$recv_mnt/}) \
<(echo ${contents_orig//$send_mnt/})
log_must [ "${contents//$recv_mnt/}" == "${contents_orig//$send_mnt/}" ]
log_must zfs redact $sendvol@snap book2 $clonevol@snap
log_must eval "zfs send --redact book2 $sendvol@snap >$stream"
log_must eval "zfs receive $recvvol <$stream"
@ -103,7 +102,6 @@ log_must mount_redacted -f $recvfs
log_must ismounted $recvfs
contents=$(log_must find $recv_mnt)
contents_orig=$(log_must find $send_mnt)
log_must diff <(echo ${contents//$recv_mnt/}) \
<(echo ${contents_orig//$send_mnt/})
log_must [ "${contents//$recv_mnt/}" == "${contents_orig//$send_mnt/}" ]
log_pass "Received redacted streams can be mounted."

View file

@ -114,6 +114,9 @@
/* bdev_max_secure_erase_sectors() is available */
/* #undef HAVE_BDEV_MAX_SECURE_ERASE_SECTORS */
/* bdev_nr_bytes() is available */
/* #undef HAVE_BDEV_NR_BYTES */
/* bdev_open_by_path() exists */
/* #undef HAVE_BDEV_OPEN_BY_PATH */
@ -634,6 +637,9 @@
/* kernel_write() take loff_t pointer */
/* #undef HAVE_KERNEL_WRITE_PPOS */
/* kernel has kmap_local_page */
/* #undef HAVE_KMAP_LOCAL_PAGE */
/* kmem_cache_create_usercopy() exists */
/* #undef HAVE_KMEM_CACHE_CREATE_USERCOPY */
@ -1194,7 +1200,7 @@
/* #undef ZFS_IS_GPL_COMPATIBLE */
/* Define the project alias string. */
#define ZFS_META_ALIAS "zfs-2.2.99-517-FreeBSD_ge2357561b"
#define ZFS_META_ALIAS "zfs-2.2.99-559-FreeBSD_g1147a2797"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
@ -1203,7 +1209,7 @@
/* #undef ZFS_META_DATA */
/* Define the maximum compatible kernel version. */
#define ZFS_META_KVER_MAX "6.8"
#define ZFS_META_KVER_MAX "6.9"
/* Define the minimum compatible kernel version. */
#define ZFS_META_KVER_MIN "3.10"
@ -1224,7 +1230,7 @@
#define ZFS_META_NAME "zfs"
/* Define the project release. */
#define ZFS_META_RELEASE "517-FreeBSD_ge2357561b"
#define ZFS_META_RELEASE "559-FreeBSD_g1147a2797"
/* Define the project version. */
#define ZFS_META_VERSION "2.2.99"

View file

@ -1 +1 @@
#define ZFS_META_GITREV "zfs-2.2.99-517-ge2357561b"
#define ZFS_META_GITREV "zfs-2.2.99-559-g1147a2797"