commit a41df0dd1b4c7fbc54d26f3517c814136963a4d7 Author: Drew Gallatin Date: Wed Aug 14 13:22:48 2019 -0400 TLS 1.3 support 1) Add support to ISA-L for TLS 1.3 2) Add support to OpenSSL for offloading TLS 1.3 to ktls 3) Reject TLS 1.3 in Boring JIRA: NCD-5361 diff --git a/FreeBSD/crypto/openssl/ssl/record/rec_layer_s3.c b/FreeBSD/crypto/openssl/ssl/record/rec_layer_s3.c index 8d967689e49b..012823d457b7 100644 --- a/FreeBSD/crypto/openssl/ssl/record/rec_layer_s3.c +++ b/FreeBSD/crypto/openssl/ssl/record/rec_layer_s3.c @@ -919,6 +919,7 @@ int do_ssl3_write(SSL *s, int type, const unsigned char *buf, } if (SSL_TREAT_AS_TLS13(s) + && !BIO_get_ktls_send(s->wbio) && s->enc_write_ctx != NULL && (s->statem.enc_write_state != ENC_WRITE_STATE_WRITE_PLAIN_ALERTS || type != SSL3_RT_ALERT)) { diff --git a/FreeBSD/crypto/openssl/ssl/tls13_enc.c b/FreeBSD/crypto/openssl/ssl/tls13_enc.c index 1f956e61e9c1..c242e2f069af 100644 --- a/FreeBSD/crypto/openssl/ssl/tls13_enc.c +++ b/FreeBSD/crypto/openssl/ssl/tls13_enc.c @@ -9,6 +9,8 @@ #include #include "ssl_locl.h" +#include "record/record_locl.h" +#include "internal/ktls.h" #include "internal/cryptlib.h" #include #include @@ -343,9 +345,9 @@ static int derive_secret_key_and_iv(SSL *s, int sending, const EVP_MD *md, const unsigned char *hash, const unsigned char *label, size_t labellen, unsigned char *secret, + unsigned char *key, unsigned char *iv, EVP_CIPHER_CTX *ciph_ctx) { - unsigned char key[EVP_MAX_KEY_LENGTH]; size_t ivlen, keylen, taglen; int hashleni = EVP_MD_size(md); size_t hashlen; @@ -431,6 +433,14 @@ int tls13_change_cipher_state(SSL *s, int which) int ret = 0; const EVP_MD *md = NULL; const EVP_CIPHER *cipher = NULL; + unsigned char key[EVP_MAX_KEY_LENGTH]; +#ifndef OPENSSL_NO_KTLS +# if defined (__FreeBSD__) + struct tls_enable tls_en; + BIO *wbio; + int try_ktls = 0; +#endif +#endif if (which & SSL3_CC_READ) { if (s->enc_read_ctx != NULL) { @@ -451,6 +461,11 @@ int tls13_change_cipher_state(SSL *s, int which) s->statem.enc_write_state = ENC_WRITE_STATE_INVALID; if (s->enc_write_ctx != NULL) { EVP_CIPHER_CTX_reset(s->enc_write_ctx); +#ifndef OPENSSL_NO_KTLS +# if defined (__FreeBSD__) + try_ktls = 1; +#endif +#endif } else { s->enc_write_ctx = EVP_CIPHER_CTX_new(); if (s->enc_write_ctx == NULL) { @@ -634,12 +649,77 @@ int tls13_change_cipher_state(SSL *s, int which) } if (!derive_secret_key_and_iv(s, which & SSL3_CC_WRITE, md, cipher, - insecret, hash, label, labellen, secret, iv, + insecret, hash, label, labellen, secret, key, iv, ciph_ctx)) { /* SSLfatal() already called */ goto err; } +#ifndef OPENSSL_NO_KTLS +#if defined (__FreeBSD__) + if (!try_ktls || + (which & SSL3_CC_READ) || + ((which & SSL3_CC_WRITE) && (s->mode & SSL_MODE_NO_KTLS_TX))) + goto skip_ktls; + + /* ktls supports only the maximum fragment size */ + if (ssl_get_max_send_fragment(s) != SSL3_RT_MAX_PLAIN_LENGTH) + goto skip_ktls; + + memset(&tls_en, 0, sizeof(tls_en)); + if (EVP_CIPHER_mode(cipher) == EVP_CIPH_GCM_MODE) { + tls_en.cipher_algorithm = CRYPTO_AES_NIST_GCM_16; + tls_en.iv_len = EVP_CIPHER_CTX_iv_length(ciph_ctx); + switch (8 * EVP_CIPHER_key_length(cipher)) { + case 128: + tls_en.auth_algorithm = CRYPTO_AES_128_NIST_GMAC; + break; + case 192: + tls_en.auth_algorithm = CRYPTO_AES_192_NIST_GMAC; + break; + case 256: + tls_en.auth_algorithm = CRYPTO_AES_256_NIST_GMAC; + break; + default: + goto skip_ktls; + } + } else { + goto skip_ktls; + } + tls_en.tls_vmajor = (s->version >> 8) & 0x000000ff; + tls_en.tls_vminor = (s->version & 0x000000ff); + tls_en.cipher_key_len = EVP_CIPHER_key_length(cipher); + tls_en.cipher_key = key; + tls_en.iv = iv; + + + wbio = s->wbio; + if (!ossl_assert(wbio != NULL)) { + SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, + ERR_R_INTERNAL_ERROR); + goto err; + } + /* All future data will get encrypted by ktls. Flush the BIO or skip ktls */ + if (BIO_flush(wbio) <= 0) + goto skip_ktls; + + /* ktls doesn't support renegotiation */ + if (BIO_get_ktls_send(s->wbio)) { + SSLfatal(s, SSL_AD_NO_RENEGOTIATION, SSL_F_TLS1_CHANGE_CIPHER_STATE, + ERR_R_INTERNAL_ERROR); + goto err; + } + + if (BIO_set_ktls(wbio, &tls_en, which & SSL3_CC_WRITE)) { + s->wbio = NULL; + ssl3_release_write_buffer(s); + s->wbio = wbio; + SSL_set_options(s, SSL_OP_NO_RENEGOTIATION); + } + +skip_ktls: +#endif +#endif if (label == server_application_traffic) { memcpy(s->server_app_traffic_secret, secret, hashlen); /* Now we create the exporter master secret */ @@ -689,6 +769,7 @@ int tls13_update_key(SSL *s, int sending) size_t hashlen = EVP_MD_size(md); unsigned char *insecret, *iv; unsigned char secret[EVP_MAX_MD_SIZE]; + unsigned char key[EVP_MAX_KEY_LENGTH]; EVP_CIPHER_CTX *ciph_ctx; int ret = 0; @@ -711,7 +792,7 @@ int tls13_update_key(SSL *s, int sending) if (!derive_secret_key_and_iv(s, sending, ssl_handshake_md(s), s->s3->tmp.new_sym_enc, insecret, NULL, application_traffic, - sizeof(application_traffic) - 1, secret, iv, + sizeof(application_traffic) - 1, secret, key, iv, ciph_ctx)) { /* SSLfatal() already called */ goto err; diff --git a/FreeBSD/sys/kern/uipc_ktls.c b/FreeBSD/sys/kern/uipc_ktls.c index 62838a356f55..77c48b774283 100644 --- a/FreeBSD/sys/kern/uipc_ktls.c +++ b/FreeBSD/sys/kern/uipc_ktls.c @@ -389,14 +389,14 @@ ktls_create_session(struct socket *so, struct tls_enable *en, if (en->tls_vmajor != TLS_MAJOR_VER_ONE) return (EINVAL); if (en->tls_vminor < TLS_MINOR_VER_ZERO || - en->tls_vminor > TLS_MINOR_VER_TWO) + en->tls_vminor > TLS_MINOR_VER_THREE) return (EINVAL); if (en->auth_key_len < 0 || en->auth_key_len > TLS_MAX_PARAM_SIZE) return (EINVAL); if (en->cipher_key_len < 0 || en->cipher_key_len > TLS_MAX_PARAM_SIZE) return (EINVAL); - if (en->iv_len < 0 || en->iv_len > TLS_MAX_PARAM_SIZE) + if (en->iv_len < 0 || en->iv_len > sizeof(tls->params.iv)) return (EINVAL); /* All supported algorithms require a cipher key. */ @@ -425,7 +425,8 @@ ktls_create_session(struct socket *so, struct tls_enable *en, } if (en->auth_key_len != 0) return (EINVAL); - if (en->iv_len != TLS_AEAD_GCM_LEN) + if (en->tls_vminor == TLS_MINOR_VER_TWO && + en->iv_len != TLS_AEAD_GCM_LEN) return (EINVAL); break; case CRYPTO_AES_CBC: @@ -477,8 +478,22 @@ ktls_create_session(struct socket *so, struct tls_enable *en, tls->params.tls_hlen = sizeof(struct tls_record_layer); switch (en->cipher_algorithm) { case CRYPTO_AES_NIST_GCM_16: - tls->params.tls_hlen += 8; + /* + * TLS 1.2 includes the sequence number in plain + * text, TLS 1.3 XORs it into the IV + */ + if (en->tls_vminor < TLS_MINOR_VER_THREE) + tls->params.tls_hlen += sizeof (uint64_t); tls->params.tls_tlen = AES_GMAC_HASH_LEN; + + /* + * TLS 1.3 includes optional padding which we + * do not support, and also puts the "real" record + * type at the end of the encrypted data + */ + if (en->tls_vminor == TLS_MINOR_VER_THREE) + tls->params.tls_tlen += sizeof (uint8_t); + tls->params.tls_bs = 1; break; case CRYPTO_AES_CBC: @@ -539,7 +554,6 @@ ktls_create_session(struct socket *so, struct tls_enable *en, * of the IV are generated in ktls_frame() and ktls_seq(). */ if (en->iv_len != 0) { - MPASS(en->iv_len <= sizeof(tls->params.iv)); tls->params.iv_len = en->iv_len; error = copyin(en->iv, tls->params.iv, en->iv_len); if (error) @@ -1188,8 +1202,21 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt, /* Populate the TLS header. */ tlshdr = (void *)pgs->hdr; tlshdr->tls_vmajor = tls->params.tls_vmajor; - tlshdr->tls_vminor = tls->params.tls_vminor; - tlshdr->tls_type = record_type; + + /* + * TLS 1.3 masquarades as TLS 1.2 with a record type + * of TLS_RLTYPE_APP. + */ + if (tls->params.tls_vminor == TLS_MINOR_VER_THREE && + tls->params.tls_vmajor == TLS_MAJOR_VER_ONE) { + tlshdr->tls_vminor = TLS_MINOR_VER_TWO; + tlshdr->tls_type = TLS_RLTYPE_APP; + /* save the real record type for later */ + pgs->record_type = record_type; + } else { + tlshdr->tls_vminor = tls->params.tls_vminor; + tlshdr->tls_type = record_type; + } tlshdr->tls_length = htons(m->m_len - sizeof(*tlshdr)); /* @@ -1365,7 +1392,8 @@ ktls_encrypt(struct mbuf_ext_pgs *pgs) error = (*tls->sw_encrypt)(tls, (const struct tls_record_layer *)pgs->hdr, - pgs->trail, src_iov, dst_iov, i, pgs->seqno); + pgs->trail, src_iov, dst_iov, i, pgs->seqno, + pgs->record_type); if (error) { counter_u64_add(ktls_offload_failed_crypto, 1); break; diff --git a/FreeBSD/sys/opencrypto/ktls_ocf.c b/FreeBSD/sys/opencrypto/ktls_ocf.c index 953fc1c9b6e3..d9e76e1d0ffd 100644 --- a/FreeBSD/sys/opencrypto/ktls_ocf.c +++ b/FreeBSD/sys/opencrypto/ktls_ocf.c @@ -86,7 +86,7 @@ ktls_ocf_callback(struct cryptop *crp) static int ktls_ocf_encrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov, struct iovec *outiov, int iovcnt, - uint64_t seqno) + uint64_t seqno, uint8_t record_type __unused) { struct uio uio; struct tls_aead_data ad; diff --git a/FreeBSD/sys/sys/ktls.h b/FreeBSD/sys/sys/ktls.h index 079d4448bd8d..eac9c39784f7 100644 --- a/FreeBSD/sys/sys/ktls.h +++ b/FreeBSD/sys/sys/ktls.h @@ -85,6 +85,7 @@ struct tls_mac_data { #define TLS_MINOR_VER_ZERO 1 /* 3, 1 */ #define TLS_MINOR_VER_ONE 2 /* 3, 2 */ #define TLS_MINOR_VER_TWO 3 /* 3, 3 */ +#define TLS_MINOR_VER_THREE 4 /* 3, 4 */ /* For TCP_TXTLS_ENABLE */ struct tls_enable { @@ -121,7 +122,7 @@ struct tls_session_params { #ifdef _KERNEL -#define KTLS_API_VERSION 5 +#define KTLS_API_VERSION 6 struct iovec; struct ktls_session; @@ -144,7 +145,7 @@ struct ktls_session { int (*sw_encrypt)(struct ktls_session *tls, const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *src, struct iovec *dst, int iovcnt, - uint64_t seqno); + uint64_t seqno, uint8_t record_type); union { void *cipher; struct m_snd_tag *snd_tag; diff --git a/FreeBSD/sys/sys/mbuf.h b/FreeBSD/sys/sys/mbuf.h index e7f39cf42042..1dc830d8e171 100644 --- a/FreeBSD/sys/sys/mbuf.h +++ b/FreeBSD/sys/sys/mbuf.h @@ -354,6 +354,7 @@ struct mbuf_ext_pgs { union { char trail[MBUF_PEXT_TRAIL_LEN]; /* TLS trailer */ struct { + uint8_t record_type; struct socket *so; struct mbuf *mbuf; uint64_t seqno; diff --git a/netflix/kmod/boringssl/dist/boring_kern.c b/netflix/kmod/boringssl/dist/boring_kern.c index 5fd9f30ec172..8f8308f08894 100644 --- a/netflix/kmod/boringssl/dist/boring_kern.c +++ b/netflix/kmod/boringssl/dist/boring_kern.c @@ -85,7 +85,7 @@ static void ktls_boring_free(struct ktls_session *tls); static int ktls_boring_aead_encrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov, - struct iovec *outiov, int iovcnt, uint64_t seqno) + struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t tls_rtype __unused) { size_t taglen; struct evp_aead_ctx_st *bssl; @@ -246,7 +246,7 @@ ktls_boring_cbc_fixup(struct ktls_session *tls, static int ktls_boring_cbc_encrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov, - struct iovec *outiov, int iovcnt, uint64_t seqno) + struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t tls_rtype __unused) { size_t taglen; struct tls_mac_data mac; @@ -302,6 +302,15 @@ ktls_boring_try(struct socket *so, struct ktls_session *tls) choice = NULL; switch (tls->params.cipher_algorithm) { case CRYPTO_AES_NIST_GCM_16: + /* + * TLS 1.3 adds a byte at the end for the real record + * type, and does not include the seqno in the clear. + * We would need major changes to support TLS 1.3 + */ + if (tls->params.tls_vminor >= TLS_MINOR_VER_THREE) { + return(ENOTSUP); + } + if (tls->params.iv_len != TLS_AEAD_GCM_LEN) { return (EINVAL); } diff --git a/netflix/kmod/intel-isa-aes/dist/aes/intelisa_kern.c b/netflix/kmod/intel-isa-aes/dist/aes/intelisa_kern.c index e3a5feec1cf8..fd846724a00d 100644 --- a/netflix/kmod/intel-isa-aes/dist/aes/intelisa_kern.c +++ b/netflix/kmod/intel-isa-aes/dist/aes/intelisa_kern.c @@ -90,6 +90,16 @@ SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_aead_crypts, CTLFLAG_RD, &ktls_offload_isa_aead, "Total number of Intel ISA TLS AEAD encrypts called"); +static counter_u64_t ktls_offload_isa_tls_13; +SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_tls_13_crypts, + CTLFLAG_RD, &ktls_offload_isa_tls_13, + "Total number of Intel ISA TLS 1.3 encrypts called"); + +static counter_u64_t ktls_offload_isa_tls_12; +SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_tls_12_crypts, + CTLFLAG_RD, &ktls_offload_isa_tls_12, + "Total number of Intel ISA TLS 1.2 encrypts called"); + static counter_u64_t intelisa_unaligned_mem_b; SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_unaligned_bytes, CTLFLAG_RD, &intelisa_unaligned_mem_b, @@ -115,26 +125,46 @@ static MALLOC_DEFINE(M_INTEL_ISA, "isal_tls", "Intel ISA-L TLS"); static int intel_isa_seal(struct isa_gcm_struct *isa, struct iovec *outiov, int numiovs, - uint8_t * nd, int noncelen, + uint8_t *static_iv, int iv_len, uint64_t seq, struct iovec *iniov, uint8_t * ad, int adlen, - uint8_t * tagout, size_t *taglen) + uint8_t * tagout, size_t *taglen, + bool tls_13, uint8_t tls_13_rtype) { int i; bool nt = true; bool misaligned_len, misaligned_start; int fixup = 0; + size_t offset; uint8_t *in; uint8_t *out; uint64_t len; uint8_t iv[32]; - uint8_t const IVend[] = GCM_IV_END_MARK; + uint8_t seq_num[sizeof(seq)]; - if (noncelen > 28) { + if (iv_len > 32 - sizeof(seq)) { return (-1); } - memcpy(iv, nd, noncelen); - memcpy(&iv[noncelen], IVend, sizeof(IVend)); + + if (tls_13) { + /* + * RFC 8446 5.3: left pad the 64b seqno + * with 0s, and xor with the IV + * + * gcm_init does not provde a way to specify the + * length of the iv, so we have hard-coded it to 12 in + * openssl + */ + memcpy(seq_num, &seq, sizeof(seq)); + + offset = iv_len - sizeof(seq); + memcpy(iv, static_iv, offset); + for (i = 0; i < sizeof(seq); i++) + iv[i + offset] = static_iv[i + offset] ^ seq_num[i]; + } else { + memcpy(iv, static_iv, iv_len); + memcpy(iv + iv_len, &seq, sizeof(seq)); + } isa->gcm_init(&isa->key_data, &isa->ctx_data, iv, ad, (size_t)adlen); for (i = 0; i < numiovs; i++) { in = iniov[i].iov_base; @@ -176,6 +206,12 @@ fixup_done: goto fixup_done; } } + if (tls_13) { + *tagout = tls_13_rtype; + isa->gcm_upd(&isa->key_data, &isa->ctx_data, tagout, + tagout, 1); + tagout += 1; + } isa->gcm_final(&isa->key_data, &isa->ctx_data, tagout, *taglen); return (0); } @@ -183,14 +219,16 @@ fixup_done: static int ktls_intelisa_aead_encrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov, - struct iovec *outiov, int iovcnt, uint64_t seqno) + struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t tls_rtype) { struct isa_gcm_struct *isa; struct tls_aead_data ad; struct tls_nonce_data nd; - size_t noncelen, adlen, taglen; + size_t adlen, taglen; + uint8_t *adptr; int ret; uint16_t tls_comp_len; + bool tls_13; isa = (struct isa_gcm_struct *)tls->cipher; @@ -198,22 +236,33 @@ ktls_intelisa_aead_encrypt(struct ktls_session *tls, counter_u64_add(ktls_offload_isa_aead, 1); taglen = KTLS_INTELISA_AEAD_TAGLEN; - /* Setup the nonce */ - memcpy(nd.fixed, tls->params.iv, TLS_AEAD_GCM_LEN); - memcpy(&nd.seq, hdr + 1, sizeof(nd.seq)); - noncelen = sizeof(nd); + if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) { + tls_13 = true; + counter_u64_add(ktls_offload_isa_tls_13, 1); + adlen = sizeof(ad) - sizeof(ad.seq); + adptr = &ad.type; + ad.tls_length = hdr->tls_length; + + } else { + tls_13 = false; + counter_u64_add(ktls_offload_isa_tls_12, 1); + tls_comp_len = ntohs(hdr->tls_length) - + (KTLS_INTELISA_AEAD_TAGLEN + sizeof(nd.seq)); + adlen = sizeof(ad); + adptr = (uint8_t *)&ad; + ad.tls_length = htons(tls_comp_len); + } /* Setup the associated data */ - tls_comp_len = ntohs(hdr->tls_length) - - (KTLS_INTELISA_AEAD_TAGLEN + sizeof(nd.seq)); ad.seq = htobe64(seqno); ad.type = hdr->tls_type; ad.tls_vmajor = hdr->tls_vmajor; ad.tls_vminor = hdr->tls_vminor; - ad.tls_length = htons(tls_comp_len); - adlen = sizeof(ad); + ret = intel_isa_seal(isa, outiov, iovcnt, - (uint8_t *) & nd, noncelen, iniov, - (uint8_t *) & ad, adlen, trailer, &taglen); + tls->params.iv, tls->params.iv_len, + htobe64(seqno), iniov, + adptr, adlen, trailer, &taglen, + tls_13, tls_rtype); return(ret); } @@ -305,6 +354,8 @@ static int intelisa_init(void) { ktls_offload_isa_aead = counter_u64_alloc(M_WAITOK); + ktls_offload_isa_tls_12 = counter_u64_alloc(M_WAITOK); + ktls_offload_isa_tls_13 = counter_u64_alloc(M_WAITOK); intelisa_aligned_mem = counter_u64_alloc(M_WAITOK); intelisa_aligned_mem_b = counter_u64_alloc(M_WAITOK); intelisa_unaligned_mem = counter_u64_alloc(M_WAITOK);