diff --git a/sys/conf/files.powerpc b/sys/conf/files.powerpc index 6d584b5..17f1c70 100644 --- a/sys/conf/files.powerpc +++ b/sys/conf/files.powerpc @@ -40,6 +40,7 @@ dev/powermac_nvram/powermac_nvram.c optional powermac_nvram powermac dev/quicc/quicc_bfe_ocp.c optional quicc mpc85xx dev/rtc/ds1553_bus_lbc.c optional ds1553 dev/scc/scc_bfe_macio.c optional scc powermac +dev/sec/sec.c optional sec mpc85xx dev/sound/macio/aoa.c optional snd_davbus | snd_ai2s powermac dev/sound/macio/davbus.c optional snd_davbus powermac dev/sound/macio/i2s.c optional snd_ai2s powermac diff --git a/sys/dev/sec/sec.c b/sys/dev/sec/sec.c new file mode 100644 index 0000000..3f26c70 --- /dev/null +++ b/sys/dev/sec/sec.c @@ -0,0 +1,1872 @@ +/*- + * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and + * 3.0 are supported. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include "cryptodev_if.h" + +#include + +static int sec_probe(device_t dev); +static int sec_attach(device_t dev); +static int sec_detach(device_t dev); +static int sec_suspend(device_t dev); +static int sec_resume(device_t dev); +static void sec_shutdown(device_t dev); +static void sec_primary_intr(void *arg); +static void sec_secondary_intr(void *arg); +static int sec_setup_intr(struct sec_softc *sc, struct resource **ires, + void **ihand, int *irid, driver_intr_t handler, const char *iname); +static void sec_release_intr(struct sec_softc *sc, struct resource *ires, + void *ihand, int irid, const char *iname); +static int sec_controller_reset(struct sec_softc *sc); +static int sec_channel_reset(struct sec_softc *sc, int channel, int full); +static int sec_init(struct sec_softc *sc); +static int sec_alloc_dma_mem(struct sec_softc *sc, + struct sec_dma_mem *dma_mem, bus_size_t size); +static int sec_desc_map_dma(struct sec_softc *sc, + struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type, + struct sec_desc_map_info *sdmi); +static void sec_free_dma_mem(struct sec_dma_mem *dma_mem); +static void sec_enqueue(struct sec_softc *sc); +static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, + int channel); +static int sec_eu_channel(struct sec_softc *sc, int eu); +static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, + u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype); +static int sec_make_pointer_direct(struct sec_softc *sc, + struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize); +static int sec_alloc_session(struct sec_softc *sc); +static int sec_newsession(device_t dev, u_int32_t *sidp, + struct cryptoini *cri); +static int sec_freesession(device_t dev, uint64_t tid); +static int sec_process(device_t dev, struct cryptop *crp, int hint); +static int sec_split_cri(struct cryptoini *cri, struct cryptoini **enc, + struct cryptoini **mac); +static int sec_split_crp(struct cryptop *crp, struct cryptodesc **enc, + struct cryptodesc **mac); +static int sec_build_common_ns_desc(struct sec_softc *sc, + struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp, + struct cryptodesc *enc, int buftype); +static int sec_build_common_s_desc(struct sec_softc *sc, + struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp, + struct cryptodesc *enc, struct cryptodesc *mac, int buftype); + +static struct sec_session *sec_get_session(struct sec_softc *sc, u_int sid); +static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr); + +/* AESU */ +static int sec_aesu_newsession(struct sec_softc *sc, + struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac); +static int sec_aesu_make_desc(struct sec_softc *sc, + struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, + int buftype); + +/* DEU */ +static int sec_deu_newsession(struct sec_softc *sc, + struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac); +static int sec_deu_make_desc(struct sec_softc *sc, + struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, + int buftype); + +/* MDEU */ +static int sec_mdeu_can_handle(u_int alg); +static int sec_mdeu_config(struct cryptodesc *crd, + u_int *eu, u_int *mode, u_int *hashlen); +static int sec_mdeu_newsession(struct sec_softc *sc, + struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac); +static int sec_mdeu_make_desc(struct sec_softc *sc, + struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, + int buftype); + +static device_method_t sec_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, sec_probe), + DEVMETHOD(device_attach, sec_attach), + DEVMETHOD(device_detach, sec_detach), + + DEVMETHOD(device_suspend, sec_suspend), + DEVMETHOD(device_resume, sec_resume), + DEVMETHOD(device_shutdown, sec_shutdown), + + /* Bus interface */ + DEVMETHOD(bus_print_child, bus_generic_print_child), + DEVMETHOD(bus_driver_added, bus_generic_driver_added), + + /* Crypto methods */ + DEVMETHOD(cryptodev_newsession, sec_newsession), + DEVMETHOD(cryptodev_freesession,sec_freesession), + DEVMETHOD(cryptodev_process, sec_process), + + { 0, 0 } +}; +static driver_t sec_driver = { + "sec", + sec_methods, + sizeof(struct sec_softc), +}; + +static devclass_t sec_devclass; +DRIVER_MODULE(sec, ocpbus, sec_driver, sec_devclass, 0, 0); +MODULE_DEPEND(sec, crypto, 1, 1, 1); + +static struct sec_eu_methods sec_eus[] = { + { + sec_aesu_newsession, + sec_aesu_make_desc, + }, + { + sec_deu_newsession, + sec_deu_make_desc, + }, + { + sec_mdeu_newsession, + sec_mdeu_make_desc, + }, + { NULL, NULL } +}; + +static inline void +sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op) +{ + + /* Sync only if dma memory is valid */ + if (dma_mem->dma_vaddr != NULL) + bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op); +} + +static inline void +sec_free_session(struct sec_softc *sc, struct sec_session *ses) +{ + + SEC_LOCK(sc, sessions); + ses->ss_used = 0; + SEC_UNLOCK(sc, sessions); +} + +static inline void * +sec_get_pointer_data(struct sec_desc *desc, u_int n) +{ + + return (desc->sd_ptr_dmem[n].dma_vaddr); +} + +static int +sec_probe(device_t dev) +{ + struct sec_softc *sc; + device_t parent; + uintptr_t devtype; + uint64_t id; + int error; + + parent = device_get_parent(dev); + error = BUS_READ_IVAR(parent, dev, OCPBUS_IVAR_DEVTYPE, &devtype); + if (error) + return (error); + + if (devtype != OCPBUS_DEVTYPE_SEC) + return (ENXIO); + + sc = device_get_softc(dev); + + sc->sc_rrid = 0; + sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid, + 0ul, ~0ul, SEC_IO_SIZE, RF_ACTIVE); + + if (sc->sc_rres == NULL) + return (ENXIO); + + sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); + sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); + + id = SEC_READ(sc, SEC_ID); + + bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); + + switch (id) { + case SEC_20_ID: + device_set_desc(dev, "Freescale Security Engine 2.0"); + sc->sc_version = 2; + break; + case SEC_30_ID: + device_set_desc(dev, "Freescale Security Engine 3.0"); + sc->sc_version = 3; + break; + default: + device_printf(dev, "unknown SEC ID 0x%016llx!\n", id); + return (ENXIO); + } + + return (0); +} + +static int +sec_attach(device_t dev) +{ + struct sec_softc *sc; + struct sec_hw_lt *lt; + int error = 0; + int i; + + sc = device_get_softc(dev); + sc->sc_dev = dev; + sc->sc_blocked = 0; + sc->sc_shutdown = 0; + + sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); + if (sc->sc_cid < 0) { + device_printf(dev, "could not get crypto driver ID!\n"); + return (ENXIO); + } + + /* Init locks */ + mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev), + "SEC Controller lock", MTX_DEF); + mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev), + "SEC Descriptors lock", MTX_DEF); + mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev), + "SEC Sessions lock", MTX_DEF); + + /* Allocate I/O memory for SEC registers */ + sc->sc_rrid = 0; + sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid, + 0ul, ~0ul, SEC_IO_SIZE, RF_ACTIVE); + + if (sc->sc_rres == NULL) { + device_printf(dev, "could not allocate I/O memory!\n"); + goto fail1; + } + + sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); + sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); + + /* Setup interrupts */ + sc->sc_pri_irid = 0; + error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand, + &sc->sc_pri_irid, sec_primary_intr, "primary"); + + if (error) + goto fail2; + + sc->sc_sec_irid = 1; + error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand, + &sc->sc_sec_irid, sec_secondary_intr, "secondary"); + + if (error) + goto fail3; + + /* Alloc DMA memory for descriptors and link tables */ + error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem), + SEC_DESCRIPTORS * sizeof(struct sec_hw_desc)); + + if (error) + goto fail4; + + error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem), + (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt)); + + if (error) + goto fail5; + + /* Fill in descriptors and link tables */ + for (i = 0; i < SEC_DESCRIPTORS; i++) { + sc->sc_desc[i].sd_desc = + (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i; + sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr + + (i * sizeof(struct sec_hw_desc)); + } + + for (i = 0; i < SEC_LT_ENTRIES + 1; i++) { + sc->sc_lt[i].sl_lt = + (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i; + sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr + + (i * sizeof(struct sec_hw_lt)); + } + + /* Last entry in link table is used to create a circle */ + lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt; + lt->shl_length = 0; + lt->shl_r = 0; + lt->shl_n = 1; + lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr; + + /* Init descriptor and link table queues pointers */ + SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS); + SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS); + SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS); + SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS); + SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS); + SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS); + SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES); + SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES); + + /* Create masks for fast checks */ + sc->sc_int_error_mask = 0; + for (i = 0; i < SEC_CHANNELS; i++) + sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i)); + + switch (sc->sc_version) { + case 2: + sc->sc_channel_idle_mask = + (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) | + (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) | + (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) | + (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S); + break; + case 3: + sc->sc_channel_idle_mask = + (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) | + (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) | + (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) | + (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S); + break; + } + + /* Init hardware */ + error = sec_init(sc); + + if (error) + goto fail6; + + /* Register in OCF (AESU) */ + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); + + /* Register in OCF (DEU) */ + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); + + /* Register in OCF (MDEU) */ + crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0); + if (sc->sc_version >= 3) { + crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0); + } + + return (0); + +fail6: + sec_free_dma_mem(&(sc->sc_lt_dmem)); +fail5: + sec_free_dma_mem(&(sc->sc_desc_dmem)); +fail4: + sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, + sc->sc_sec_irid, "secondary"); +fail3: + sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, + sc->sc_pri_irid, "primary"); +fail2: + bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); +fail1: + mtx_destroy(&sc->sc_controller_lock); + mtx_destroy(&sc->sc_descriptors_lock); + mtx_destroy(&sc->sc_sessions_lock); + + return (ENXIO); +} + +static int +sec_detach(device_t dev) +{ + struct sec_softc *sc = device_get_softc(dev); + int i, error, timeout = SEC_TIMEOUT; + + /* Prepare driver to shutdown */ + SEC_LOCK(sc, descriptors); + sc->sc_shutdown = 1; + SEC_UNLOCK(sc, descriptors); + + /* Wait until all queued processing finishes */ + while (1) { + SEC_LOCK(sc, descriptors); + i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc); + SEC_UNLOCK(sc, descriptors); + + if (i == 0) + break; + + if (timeout < 0) { + device_printf(dev, "queue flush timeout!\n"); + + /* DMA can be still active - stop it */ + for (i = 0; i < SEC_CHANNELS; i++) + sec_channel_reset(sc, i, 1); + + break; + } + + timeout -= 1000; + DELAY(1000); + } + + /* Disable interrupts */ + SEC_WRITE(sc, SEC_IER, 0); + + /* Unregister from OCF */ + crypto_unregister_all(sc->sc_cid); + + /* Free DMA memory */ + for (i = 0; i < SEC_DESCRIPTORS; i++) + SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i])); + + sec_free_dma_mem(&(sc->sc_lt_dmem)); + sec_free_dma_mem(&(sc->sc_desc_dmem)); + + /* Release interrupts */ + sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, + sc->sc_pri_irid, "primary"); + sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, + sc->sc_sec_irid, "secondary"); + + /* Release memory */ + if (sc->sc_rres) { + error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, + sc->sc_rres); + if (error) + device_printf(dev, "bus_release_resource() failed for" + " I/O memory, error %d\n", error); + + sc->sc_rres = NULL; + } + + mtx_destroy(&sc->sc_controller_lock); + mtx_destroy(&sc->sc_descriptors_lock); + mtx_destroy(&sc->sc_sessions_lock); + + return (0); +} + +static int +sec_suspend(device_t dev) +{ + + return (0); +} + +static int +sec_resume(device_t dev) +{ + + return (0); +} + +static void +sec_shutdown(device_t dev) +{ +} + +static int +sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand, + int *irid, driver_intr_t handler, const char *iname) +{ + int error; + + (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid, + RF_ACTIVE); + + if ((*ires) == NULL) { + device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname); + return (ENXIO); + } + + error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET, + NULL, handler, sc, ihand); + + if (error) { + device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname); + if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires)) + device_printf(sc->sc_dev, "could not release %s IRQ\n", + iname); + + (*ires) = NULL; + return (error); + } + + return (0); +} + +static void +sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand, + int irid, const char *iname) +{ + int error; + + if (ires == NULL) + return; + + error = bus_teardown_intr(sc->sc_dev, ires, ihand); + if (error) + device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s" + " IRQ, error %d\n", iname, error); + + error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires); + if (error) + device_printf(sc->sc_dev, "bus_release_resource() failed for %s" + " IRQ, error %d\n", iname, error); +} + +static void +sec_primary_intr(void *arg) +{ + struct sec_softc *sc = arg; + struct sec_desc *desc; + uint64_t isr; + int i, wakeup = 0; + + SEC_LOCK(sc, controller); + + /* Check for errors */ + isr = SEC_READ(sc, SEC_ISR); + if (isr & sc->sc_int_error_mask) { + /* Check each channel for error */ + for (i = 0; i < SEC_CHANNELS; i++) { + if ((isr & SEC_INT_CH_ERR(i)) == 0) + continue; + + device_printf(sc->sc_dev, + "I/O error on channel %i!\n", i); + + /* Find and mark problematic descriptor */ + desc = sec_find_desc(sc, SEC_READ(sc, + SEC_CHAN_CDPR(i))); + + if (desc != NULL) + desc->sd_error = EIO; + + /* Do partial channel reset */ + sec_channel_reset(sc, i, 0); + } + } + + /* ACK interrupt */ + SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL); + + SEC_UNLOCK(sc, controller); + SEC_LOCK(sc, descriptors); + + /* Handle processed descriptors */ + SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + while (SEC_QUEUED_DESC_CNT(sc) > 0) { + desc = SEC_GET_QUEUED_DESC(sc); + + if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) { + SEC_PUT_BACK_QUEUED_DESC(sc); + break; + } + + SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD | + BUS_DMASYNC_PREWRITE); + + desc->sd_crp->crp_etype = desc->sd_error; + crypto_done(desc->sd_crp); + + SEC_DESC_FREE_POINTERS(desc); + SEC_DESC_FREE_LT(sc, desc); + SEC_DESC_QUEUED2FREE(sc); + } + + SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + if (!sc->sc_shutdown) { + wakeup = sc->sc_blocked; + sc->sc_blocked = 0; + } + + SEC_UNLOCK(sc, descriptors); + + /* Enqueue ready descriptors in hardware */ + sec_enqueue(sc); + + if (wakeup) + crypto_unblock(sc->sc_cid, wakeup); +} + +static void +sec_secondary_intr(void *arg) +{ + struct sec_softc *sc = arg; + + device_printf(sc->sc_dev, "spurious secondary interrupt!\n"); + sec_primary_intr(arg); +} + +static int +sec_controller_reset(struct sec_softc *sc) +{ + int timeout = SEC_TIMEOUT; + + /* Reset Controller */ + SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR | 1); + + while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) { + DELAY(1000); + timeout -= 1000; + + if (timeout < 0) { + device_printf(sc->sc_dev, "timeout while waiting for " + "device reset!\n"); + return (ETIMEDOUT); + } + } + + return (0); +} + +static int +sec_channel_reset(struct sec_softc *sc, int channel, int full) +{ + int timeout = SEC_TIMEOUT; + uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON; + uint64_t reg; + + /* Reset Channel */ + reg = SEC_READ(sc, SEC_CHAN_CCR(channel)); + SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit); + + while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) { + DELAY(1000); + timeout -= 1000; + + if (timeout < 0) { + device_printf(sc->sc_dev, "timeout while waiting for " + "channel reset!\n"); + return (ETIMEDOUT); + } + } + + if (full) { + reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS; + + switch(sc->sc_version) { + case 2: + reg |= SEC_CHAN_CCR_CDWE; + break; + case 3: + reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN; + break; + } + + SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg); + } + + return (0); +} + +static int +sec_init(struct sec_softc *sc) +{ + uint64_t reg; + int error, i; + + /* Reset controller twice to clear all pending interrupts */ + error = sec_controller_reset(sc); + if (error) + return (error); + + error = sec_controller_reset(sc); + if (error) + return (error); + + /* Reset channels */ + for (i = 0; i < SEC_CHANNELS; i++) { + error = sec_channel_reset(sc, i, 1); + if (error) + return (error); + } + + /* Enable Interrupts */ + reg = SEC_INT_ITO; + for (i = 0; i < SEC_CHANNELS; i++) + reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i); + + SEC_WRITE(sc, SEC_IER, reg); + + return (error); +} + +static void +sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + struct sec_dma_mem *dma_mem = arg; + + if (error) + return; + + KASSERT(nseg == 1, ("Wrong number of segments, should be 1")); + dma_mem->dma_paddr = segs->ds_addr; +} + +static void +sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg, + int error) +{ + struct sec_desc_map_info *sdmi = arg; + struct sec_softc *sc = sdmi->sdmi_sc; + struct sec_lt *lt = NULL; + bus_addr_t addr; + bus_size_t size; + int i; + + SEC_LOCK_ASSERT(sc, descriptors); + + if (error) + return; + + for (i = 0; i < nseg; i++) { + addr = segs[i].ds_addr; + size = segs[i].ds_len; + + /* Skip requested offset */ + if (sdmi->sdmi_offset >= size) { + sdmi->sdmi_offset -= size; + continue; + } + + addr += sdmi->sdmi_offset; + size -= sdmi->sdmi_offset; + sdmi->sdmi_offset = 0; + + /* Do not link more than requested */ + if (sdmi->sdmi_size < size) + size = sdmi->sdmi_size; + + lt = SEC_ALLOC_LT_ENTRY(sc); + lt->sl_lt->shl_length = size; + lt->sl_lt->shl_r = 0; + lt->sl_lt->shl_n = 0; + lt->sl_lt->shl_ptr = addr; + + if (sdmi->sdmi_lt_first == NULL) + sdmi->sdmi_lt_first = lt; + + sdmi->sdmi_lt_used += 1; + + if ((sdmi->sdmi_size -= size) == 0) + break; + } + + sdmi->sdmi_lt_last = lt; +} + +static void +sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg, + bus_size_t size, int error) +{ + + sec_dma_map_desc_cb(arg, segs, nseg, error); +} + +static int +sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem, + bus_size_t size) +{ + int error; + + if (dma_mem->dma_vaddr != NULL) + return (EBUSY); + + error = bus_dma_tag_create(NULL, /* parent */ + SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + size, 1, /* maxsize, nsegments */ + size, 0, /* maxsegsz, flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &(dma_mem->dma_tag)); /* dmat */ + + if (error) { + device_printf(sc->sc_dev, "failed to allocate busdma tag, error" + " %i!\n", error); + goto err1; + } + + error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr), + BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map)); + + if (error) { + device_printf(sc->sc_dev, "failed to allocate DMA safe" + " memory, error %i!\n", error); + goto err2; + } + + error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map, + dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem, + BUS_DMA_NOWAIT); + + if (error) { + device_printf(sc->sc_dev, "cannot get address of the DMA" + " memory, error %i\n", error); + goto err3; + } + + dma_mem->dma_is_map = 0; + return (0); + +err3: + bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map); +err2: + bus_dma_tag_destroy(dma_mem->dma_tag); +err1: + dma_mem->dma_vaddr = NULL; + return(error); +} + +static int +sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem, + bus_size_t size, int type, struct sec_desc_map_info *sdmi) +{ + int error; + + if (dma_mem->dma_vaddr != NULL) + return (EBUSY); + + switch (type) { + case SEC_MEMORY: + break; + case SEC_UIO: + size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE; + break; + case SEC_MBUF: + size = m_length((struct mbuf*)mem, NULL); + break; + default: + return (EINVAL); + } + + error = bus_dma_tag_create(NULL, /* parent */ + SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + size, /* maxsize */ + SEC_FREE_LT_CNT(sc), /* nsegments */ + SEC_MAX_DMA_BLOCK_SIZE, 0, /* maxsegsz, flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &(dma_mem->dma_tag)); /* dmat */ + + if (error) { + device_printf(sc->sc_dev, "failed to allocate busdma tag, error" + " %i!\n", error); + dma_mem->dma_vaddr = NULL; + return (error); + } + + error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map)); + + if (error) { + device_printf(sc->sc_dev, "failed to create DMA map, error %i!" + "\n", error); + bus_dma_tag_destroy(dma_mem->dma_tag); + return (error); + } + + switch (type) { + case SEC_MEMORY: + error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map, + mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT); + break; + case SEC_UIO: + error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map, + mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT); + break; + case SEC_MBUF: + error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map, + mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT); + break; + } + + if (error) { + device_printf(sc->sc_dev, "cannot get address of the DMA" + " memory, error %i!\n", error); + bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); + bus_dma_tag_destroy(dma_mem->dma_tag); + return (error); + } + + dma_mem->dma_is_map = 1; + dma_mem->dma_vaddr = mem; + + return (0); +} + +static void +sec_free_dma_mem(struct sec_dma_mem *dma_mem) +{ + + /* Check for double free */ + if (dma_mem->dma_vaddr == NULL) + return; + + bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map); + + if (dma_mem->dma_is_map) + bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); + else + bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, + dma_mem->dma_map); + + bus_dma_tag_destroy(dma_mem->dma_tag); + dma_mem->dma_vaddr = NULL; +} + +static int +sec_eu_channel(struct sec_softc *sc, int eu) +{ + uint64_t reg; + int channel = 0; + + SEC_LOCK_ASSERT(sc, controller); + + reg = SEC_READ(sc, SEC_EUASR); + + switch (eu) { + case SEC_EU_AFEU: + channel = SEC_EUASR_AFEU(reg); + break; + case SEC_EU_DEU: + channel = SEC_EUASR_DEU(reg); + break; + case SEC_EU_MDEU_A: + case SEC_EU_MDEU_B: + channel = SEC_EUASR_MDEU(reg); + break; + case SEC_EU_RNGU: + channel = SEC_EUASR_RNGU(reg); + break; + case SEC_EU_PKEU: + channel = SEC_EUASR_PKEU(reg); + break; + case SEC_EU_AESU: + channel = SEC_EUASR_AESU(reg); + break; + case SEC_EU_KEU: + channel = SEC_EUASR_KEU(reg); + break; + case SEC_EU_CRCU: + channel = SEC_EUASR_CRCU(reg); + break; + } + + return (channel - 1); +} + +static int +sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel) +{ + u_int fflvl = SEC_MAX_FIFO_LEVEL; + uint64_t reg; + int i; + + SEC_LOCK_ASSERT(sc, controller); + + /* Find free channel if have not got one */ + if (channel < 0) { + for (i = 0; i < SEC_CHANNELS; i++) { + reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); + + if ((reg & sc->sc_channel_idle_mask) == 0) { + channel = i; + break; + } + } + } + + /* There is no free channel */ + if (channel < 0) + return (-1); + + /* Check FIFO level on selected channel */ + reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); + + switch(sc->sc_version) { + case 2: + fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M; + break; + case 3: + fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M; + break; + } + + if (fflvl >= SEC_MAX_FIFO_LEVEL) + return (-1); + + /* Enqueue descriptor in channel */ + SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr); + + return (channel); +} + +static void +sec_enqueue(struct sec_softc *sc) +{ + struct sec_desc *desc; + int ch0, ch1; + + SEC_LOCK(sc, descriptors); + SEC_LOCK(sc, controller); + + while (SEC_READY_DESC_CNT(sc) > 0) { + desc = SEC_GET_READY_DESC(sc); + + ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0); + ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1); + + /* + * Both EU are used by the same channel. + * Enqueue descriptor in channel used by busy EUs. + */ + if (ch0 >= 0 && ch0 == ch1) { + if (sec_enqueue_desc(sc, desc, ch0) >= 0) { + SEC_DESC_READY2QUEUED(sc); + continue; + } + } + + /* + * Only one EU is free. + * Enqueue descriptor in channel used by busy EU. + */ + if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) { + if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1) + >= 0) { + SEC_DESC_READY2QUEUED(sc); + continue; + } + } + + /* + * Both EU are free. + * Enqueue descriptor in first free channel. + */ + if (ch0 < 0 && ch1 < 0) { + if (sec_enqueue_desc(sc, desc, -1) >= 0) { + SEC_DESC_READY2QUEUED(sc); + continue; + } + } + + /* Current descriptor can not be queued at the moment */ + SEC_PUT_BACK_READY_DESC(sc); + break; + } + + SEC_UNLOCK(sc, controller); + SEC_UNLOCK(sc, descriptors); +} + +static struct sec_desc * +sec_find_desc(struct sec_softc *sc, bus_addr_t paddr) +{ + struct sec_desc *desc = NULL; + int i; + + SEC_LOCK_ASSERT(sc, descriptors); + + for (i = 0; i < SEC_CHANNELS; i++) { + if (sc->sc_desc[i].sd_desc_paddr == paddr) { + desc = &(sc->sc_desc[i]); + break; + } + } + + return (desc); +} + +static int +sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n, + bus_addr_t data, bus_size_t dsize) +{ + struct sec_hw_desc_ptr *ptr; + + SEC_LOCK_ASSERT(sc, descriptors); + + ptr = &(desc->sd_desc->shd_pointer[n]); + ptr->shdp_length = dsize; + ptr->shdp_extent = 0; + ptr->shdp_j = 0; + ptr->shdp_ptr = data; + + return (0); +} + +static int +sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, + u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype) +{ + struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 }; + struct sec_hw_desc_ptr *ptr; + int error; + + SEC_LOCK_ASSERT(sc, descriptors); + + /* For flat memory map only requested region */ + if (dtype == SEC_MEMORY) { + data = (uint8_t*)(data) + doffset; + sdmi.sdmi_offset = 0; + } + + error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize, + dtype, &sdmi); + + if (error) + return (error); + + sdmi.sdmi_lt_last->sl_lt->shl_r = 1; + desc->sd_lt_used += sdmi.sdmi_lt_used; + + ptr = &(desc->sd_desc->shd_pointer[n]); + ptr->shdp_length = dsize; + ptr->shdp_extent = 0; + ptr->shdp_j = 1; + ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr; + + return (0); +} + +static int +sec_split_cri(struct cryptoini *cri, struct cryptoini **enc, + struct cryptoini **mac) +{ + struct cryptoini *e, *m; + + e = cri; + m = cri->cri_next; + + /* We can haldle only two operations */ + if (m && m->cri_next) + return (EINVAL); + + if (sec_mdeu_can_handle(e->cri_alg)) { + cri = m; + m = e; + e = cri; + } + + if (m && !sec_mdeu_can_handle(m->cri_alg)) + return (EINVAL); + + *enc = e; + *mac = m; + + return (0); +} + +static int +sec_split_crp(struct cryptop *crp, struct cryptodesc **enc, + struct cryptodesc **mac) +{ + struct cryptodesc *e, *m, *t; + + e = crp->crp_desc; + m = e->crd_next; + + /* We can haldle only two operations */ + if (m && m->crd_next) + return (EINVAL); + + if (sec_mdeu_can_handle(e->crd_alg)) { + t = m; + m = e; + e = t; + } + + if (m && !sec_mdeu_can_handle(m->crd_alg)) + return (EINVAL); + + *enc = e; + *mac = m; + + return (0); +} + +static int +sec_alloc_session(struct sec_softc *sc) +{ + struct sec_session *ses = NULL; + int sid = -1; + u_int i; + + SEC_LOCK(sc, sessions); + + for (i = 0; i < SEC_MAX_SESSIONS; i++) { + if (sc->sc_sessions[i].ss_used == 0) { + ses = &(sc->sc_sessions[i]); + ses->ss_used = 1; + ses->ss_ivlen = 0; + ses->ss_klen = 0; + ses->ss_mklen = 0; + sid = i; + break; + } + } + + SEC_UNLOCK(sc, sessions); + + return (sid); +} + +static struct sec_session * +sec_get_session(struct sec_softc *sc, u_int sid) +{ + struct sec_session *ses; + + if (sid >= SEC_MAX_SESSIONS) + return (NULL); + + SEC_LOCK(sc, sessions); + + ses = &(sc->sc_sessions[sid]); + + if (ses->ss_used == 0) + ses = NULL; + + SEC_UNLOCK(sc, sessions); + + return (ses); +} + +static int +sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) +{ + struct sec_softc *sc = device_get_softc(dev); + struct sec_eu_methods *eu = sec_eus; + struct cryptoini *enc = NULL; + struct cryptoini *mac = NULL; + struct sec_session *ses; + int error = -1; + int sid; + + error = sec_split_cri(cri, &enc, &mac); + if (error) + return (error); + + /* Check key lengths */ + if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN) + return (E2BIG); + + if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN) + return (E2BIG); + + /* Only SEC 3.0 supports digests larger than 256 bits */ + if (sc->sc_version < 3 && mac && mac->cri_klen > 256) + return (E2BIG); + + sid = sec_alloc_session(sc); + if (sid < 0) + return (ENOMEM); + + ses = sec_get_session(sc, sid); + + /* Find EU for this session */ + while (eu->sem_make_desc != NULL) { + error = eu->sem_newsession(sc, ses, enc, mac); + if (error >= 0) + break; + + eu++; + } + + /* If not found, return EINVAL */ + if (error < 0) { + sec_free_session(sc, ses); + return (EINVAL); + } + + /* Save cipher key */ + if (enc && enc->cri_key) { + ses->ss_klen = enc->cri_klen / 8; + memcpy(ses->ss_key, enc->cri_key, ses->ss_klen); + } + + /* Save digest key */ + if (mac && mac->cri_key) { + ses->ss_mklen = mac->cri_klen / 8; + memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen); + } + + ses->ss_eu = eu; + *sidp = sid; + + return (0); +} + +static int +sec_freesession(device_t dev, uint64_t tid) +{ + struct sec_softc *sc = device_get_softc(dev); + struct sec_session *ses; + int error = 0; + + ses = sec_get_session(sc, CRYPTO_SESID2LID(tid)); + if (ses == NULL) + return (EINVAL); + + sec_free_session(sc, ses); + + return (error); +} + +static int +sec_process(device_t dev, struct cryptop *crp, int hint) +{ + struct sec_softc *sc = device_get_softc(dev); + struct sec_desc *desc = NULL; + struct cryptodesc *mac, *enc; + struct sec_session *ses; + int buftype, error = 0; + + /* Check Session ID */ + ses = sec_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid)); + if (ses == NULL) { + crp->crp_etype = EINVAL; + crypto_done(crp); + return (0); + } + + /* Check for input length */ + if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) { + crp->crp_etype = E2BIG; + crypto_done(crp); + return (0); + } + + /* Get descriptors */ + if (sec_split_crp(crp, &enc, &mac)) { + crp->crp_etype = EINVAL; + crypto_done(crp); + return (0); + } + + SEC_LOCK(sc, descriptors); + SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* Block driver if there is no free descriptors or we are going down */ + if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) { + sc->sc_blocked |= CRYPTO_SYMQ; + SEC_UNLOCK(sc, descriptors); + return (ERESTART); + } + + /* Prepare descriptor */ + desc = SEC_GET_FREE_DESC(sc); + desc->sd_lt_used = 0; + desc->sd_error = 0; + desc->sd_crp = crp; + + if (crp->crp_flags & CRYPTO_F_IOV) + buftype = SEC_UIO; + else if (crp->crp_flags & CRYPTO_F_IMBUF) + buftype = SEC_MBUF; + else + buftype = SEC_MEMORY; + + if (enc && enc->crd_flags & CRD_F_ENCRYPT) { + if (enc->crd_flags & CRD_F_IV_EXPLICIT) + memcpy(desc->sd_desc->shd_iv, enc->crd_iv, + ses->ss_ivlen); + else + arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0); + + if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0) + crypto_copyback(crp->crp_flags, crp->crp_buf, + enc->crd_inject, ses->ss_ivlen, + desc->sd_desc->shd_iv); + } else if (enc) { + if (enc->crd_flags & CRD_F_IV_EXPLICIT) + memcpy(desc->sd_desc->shd_iv, enc->crd_iv, + ses->ss_ivlen); + else + crypto_copydata(crp->crp_flags, crp->crp_buf, + enc->crd_inject, ses->ss_ivlen, + desc->sd_desc->shd_iv); + } + + if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) { + if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) { + ses->ss_klen = enc->crd_klen / 8; + memcpy(ses->ss_key, enc->crd_key, ses->ss_klen); + } else + error = E2BIG; + } + + if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) { + if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) { + ses->ss_mklen = mac->crd_klen / 8; + memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen); + } else + error = E2BIG; + } + + if (!error) { + memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen); + memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen); + + error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype); + } + + if (error) { + SEC_DESC_FREE_POINTERS(desc); + SEC_DESC_PUT_BACK_LT(sc, desc); + SEC_PUT_BACK_FREE_DESC(sc); + SEC_UNLOCK(sc, descriptors); + crp->crp_etype = error; + crypto_done(crp); + return (0); + } + + /* + * Skip DONE interrupt if this is not last request in burst, but only + * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE + * signaling on each descriptor. + */ + if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3) + desc->sd_desc->shd_dn = 0; + else + desc->sd_desc->shd_dn = 1; + + SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD | + BUS_DMASYNC_POSTWRITE); + SEC_DESC_FREE2READY(sc); + SEC_UNLOCK(sc, descriptors); + + /* Enqueue ready descriptors in hardware */ + sec_enqueue(sc); + + return (0); +} + +static int +sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc, + struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc, + int buftype) +{ + struct sec_hw_desc *hd = desc->sd_desc; + int error; + + hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; + hd->shd_eu_sel1 = SEC_EU_NONE; + hd->shd_mode1 = 0; + + /* Pointer 0: NULL */ + error = sec_make_pointer_direct(sc, desc, 0, 0, 0); + if (error) + return (error); + + /* Pointer 1: IV IN */ + error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr + + offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen); + if (error) + return (error); + + /* Pointer 2: Cipher Key */ + error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + + offsetof(struct sec_hw_desc, shd_key), ses->ss_klen); + if (error) + return (error); + + /* Pointer 3: Data IN */ + error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip, + enc->crd_len, buftype); + if (error) + return (error); + + /* Pointer 4: Data OUT */ + error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip, + enc->crd_len, buftype); + if (error) + return (error); + + /* Pointer 5: IV OUT (Not used: NULL) */ + error = sec_make_pointer_direct(sc, desc, 5, 0, 0); + if (error) + return (error); + + /* Pointer 6: NULL */ + error = sec_make_pointer_direct(sc, desc, 6, 0, 0); + + return (error); +} + +static int +sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc, + struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc, + struct cryptodesc *mac, int buftype) +{ + struct sec_hw_desc *hd = desc->sd_desc; + u_int eu, mode, hashlen; + int error; + + if (mac->crd_len < enc->crd_len) + return (EINVAL); + + if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len) + return (EINVAL); + + error = sec_mdeu_config(mac, &eu, &mode, &hashlen); + if (error) + return (error); + + hd->shd_desc_type = SEC_DT_HMAC_SNOOP; + hd->shd_eu_sel1 = eu; + hd->shd_mode1 = mode; + + /* Pointer 0: HMAC Key */ + error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr + + offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen); + if (error) + return (error); + + /* Pointer 1: HMAC-Only Data IN */ + error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip, + mac->crd_len - enc->crd_len, buftype); + if (error) + return (error); + + /* Pointer 2: Cipher Key */ + error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + + offsetof(struct sec_hw_desc, shd_key), ses->ss_klen); + if (error) + return (error); + + /* Pointer 3: IV IN */ + error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr + + offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen); + if (error) + return (error); + + /* Pointer 4: Data IN */ + error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip, + enc->crd_len, buftype); + if (error) + return (error); + + /* Pointer 5: Data OUT */ + error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip, + enc->crd_len, buftype); + if (error) + return (error); + + /* Pointer 6: HMAC OUT */ + error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject, + hashlen, buftype); + + return (error); +} + +/* AESU */ + +static int +sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses, + struct cryptoini *enc, struct cryptoini *mac) +{ + + if (enc == NULL) + return (-1); + + if (enc->cri_alg != CRYPTO_AES_CBC) + return (-1); + + ses->ss_ivlen = AES_BLOCK_LEN; + + return (0); +} + +static int +sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses, + struct sec_desc *desc, struct cryptop *crp, int buftype) +{ + struct sec_hw_desc *hd = desc->sd_desc; + struct cryptodesc *enc, *mac; + int error; + + error = sec_split_crp(crp, &enc, &mac); + if (error) + return (error); + + if (!enc) + return (EINVAL); + + hd->shd_eu_sel0 = SEC_EU_AESU; + hd->shd_mode0 = SEC_AESU_MODE_CBC; + + if (enc->crd_alg != CRYPTO_AES_CBC) + return (EINVAL); + + if (enc->crd_flags & CRD_F_ENCRYPT) { + hd->shd_mode0 |= SEC_AESU_MODE_ED; + hd->shd_dir = 0; + } else + hd->shd_dir = 1; + + if (mac) + error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac, + buftype); + else + error = sec_build_common_ns_desc(sc, desc, ses, crp, enc, + buftype); + + return (error); +} + +/* DEU */ + +static int +sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses, + struct cryptoini *enc, struct cryptoini *mac) +{ + + if (enc == NULL) + return (-1); + + switch (enc->cri_alg) { + case CRYPTO_DES_CBC: + case CRYPTO_3DES_CBC: + break; + default: + return (-1); + } + + ses->ss_ivlen = DES_BLOCK_LEN; + + return (0); +} + +static int +sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses, + struct sec_desc *desc, struct cryptop *crp, int buftype) +{ + struct sec_hw_desc *hd = desc->sd_desc; + struct cryptodesc *enc, *mac; + int error; + + error = sec_split_crp(crp, &enc, &mac); + if (error) + return (error); + + if (!enc) + return (EINVAL); + + hd->shd_eu_sel0 = SEC_EU_DEU; + hd->shd_mode0 = SEC_DEU_MODE_CBC; + + switch (enc->crd_alg) { + case CRYPTO_3DES_CBC: + hd->shd_mode0 |= SEC_DEU_MODE_TS; + break; + case CRYPTO_DES_CBC: + break; + default: + return (EINVAL); + } + + if (enc->crd_flags & CRD_F_ENCRYPT) { + hd->shd_mode0 |= SEC_DEU_MODE_ED; + hd->shd_dir = 0; + } else + hd->shd_dir = 1; + + if (mac) + error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac, + buftype); + else + error = sec_build_common_ns_desc(sc, desc, ses, crp, enc, + buftype); + + return (error); +} + +/* MDEU */ + +static int +sec_mdeu_can_handle(u_int alg) +{ + switch (alg) { + case CRYPTO_MD5: + case CRYPTO_SHA1: + case CRYPTO_MD5_HMAC: + case CRYPTO_SHA1_HMAC: + case CRYPTO_SHA2_256_HMAC: + case CRYPTO_SHA2_384_HMAC: + case CRYPTO_SHA2_512_HMAC: + return (1); + default: + return (0); + } +} + +static int +sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen) +{ + + *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT; + *eu = SEC_EU_NONE; + + switch (crd->crd_alg) { + case CRYPTO_MD5_HMAC: + *mode |= SEC_MDEU_MODE_HMAC; + /* FALLTHROUGH */ + case CRYPTO_MD5: + *eu = SEC_EU_MDEU_A; + *mode |= SEC_MDEU_MODE_MD5; + *hashlen = MD5_HASH_LEN; + break; + case CRYPTO_SHA1_HMAC: + *mode |= SEC_MDEU_MODE_HMAC; + /* FALLTHROUGH */ + case CRYPTO_SHA1: + *eu = SEC_EU_MDEU_A; + *mode |= SEC_MDEU_MODE_SHA1; + *hashlen = SHA1_HASH_LEN; + break; + case CRYPTO_SHA2_256_HMAC: + *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256; + *eu = SEC_EU_MDEU_A; + break; + case CRYPTO_SHA2_384_HMAC: + *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384; + *eu = SEC_EU_MDEU_B; + break; + case CRYPTO_SHA2_512_HMAC: + *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512; + *eu = SEC_EU_MDEU_B; + break; + default: + return (EINVAL); + } + + if (*mode & SEC_MDEU_MODE_HMAC) + *hashlen = SEC_HMAC_HASH_LEN; + + return (0); +} + +static int +sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses, + struct cryptoini *enc, struct cryptoini *mac) +{ + + if (mac && sec_mdeu_can_handle(mac->cri_alg)) + return (0); + + return (-1); +} + +static int +sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses, + struct sec_desc *desc, struct cryptop *crp, int buftype) +{ + struct cryptodesc *enc, *mac; + struct sec_hw_desc *hd = desc->sd_desc; + u_int eu, mode, hashlen; + int error; + + error = sec_split_crp(crp, &enc, &mac); + if (error) + return (error); + + if (enc) + return (EINVAL); + + error = sec_mdeu_config(mac, &eu, &mode, &hashlen); + if (error) + return (error); + + hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; + hd->shd_eu_sel0 = eu; + hd->shd_mode0 = mode; + hd->shd_eu_sel1 = SEC_EU_NONE; + hd->shd_mode1 = 0; + + /* Pointer 0: NULL */ + error = sec_make_pointer_direct(sc, desc, 0, 0, 0); + if (error) + return (error); + + /* Pointer 1: Context In (Not used: NULL) */ + error = sec_make_pointer_direct(sc, desc, 1, 0, 0); + if (error) + return (error); + + /* Pointer 2: HMAC Key (or NULL, depending on digest type) */ + if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC) + error = sec_make_pointer_direct(sc, desc, 2, + desc->sd_desc_paddr + offsetof(struct sec_hw_desc, + shd_mkey), ses->ss_mklen); + else + error = sec_make_pointer_direct(sc, desc, 2, 0, 0); + + if (error) + return (error); + + /* Pointer 3: Input Data */ + error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip, + mac->crd_len, buftype); + if (error) + return (error); + + /* Pointer 4: NULL */ + error = sec_make_pointer_direct(sc, desc, 4, 0, 0); + if (error) + return (error); + + /* Pointer 5: Hash out */ + error = sec_make_pointer(sc, desc, 5, crp->crp_buf, + mac->crd_inject, hashlen, buftype); + if (error) + return (error); + + /* Pointer 6: NULL */ + error = sec_make_pointer_direct(sc, desc, 6, 0, 0); + + return (0); +} diff --git a/sys/dev/sec/sec.h b/sys/dev/sec/sec.h new file mode 100644 index 0000000..18642e6 --- /dev/null +++ b/sys/dev/sec/sec.h @@ -0,0 +1,426 @@ +/*- + * Copyright (C) 2008 Semihalf, Piotr Ziecik + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SEC_H +#define _SEC_H + +/* + * Each SEC channel can hold up to 24 descriptors. All 4 channels can be + * simultaneously active holding 96 descriptors. Each descriptor can use 0 or + * more link table entries depending of size and granulation of input/output + * data. One link table entry is needed for each 65535 bytes of data. + */ + +/* Driver settings */ +#define SEC_TIMEOUT 100000 +#define SEC_MAX_SESSIONS 256 +#define SEC_DESCRIPTORS 256 /* Must be power of 2 */ +#define SEC_LT_ENTRIES 1024 /* Must be power of 2 */ +#define SEC_MAX_IV_LEN 16 +#define SEC_MAX_KEY_LEN 64 + +/* SEC information */ +#define SEC_20_ID 0x0000000000000040ULL +#define SEC_30_ID 0x0030030000000000ULL +#define SEC_CHANNELS 4 +#define SEC_POINTERS 7 +#define SEC_MAX_DMA_BLOCK_SIZE 0xFFFF +#define SEC_MAX_FIFO_LEVEL 24 +#define SEC_DMA_ALIGNMENT 8 + +#define __packed__ __attribute__ ((__packed__)) + +struct sec_softc; +struct sec_session; + +/* SEC descriptor definition */ +struct sec_hw_desc_ptr { + u_int shdp_length : 16; + u_int shdp_j : 1; + u_int shdp_extent : 7; + u_int __padding0 : 4; + uint64_t shdp_ptr : 36; +} __packed__; + +struct sec_hw_desc { + union __packed__ { + struct __packed__ { + u_int eu_sel0 : 4; + u_int mode0 : 8; + u_int eu_sel1 : 4; + u_int mode1 : 8; + u_int desc_type : 5; + u_int __padding0 : 1; + u_int dir : 1; + u_int dn : 1; + u_int __padding1 : 32; + } request; + struct __packed__ { + u_int done : 8; + u_int __padding0 : 27; + u_int iccr0 : 2; + u_int __padding1 : 6; + u_int iccr1 : 2; + u_int __padding2 : 19; + } feedback; + } shd_control; + + struct sec_hw_desc_ptr shd_pointer[SEC_POINTERS]; + + /* Data below is mapped to descriptor pointers */ + uint8_t shd_iv[SEC_MAX_IV_LEN]; + uint8_t shd_key[SEC_MAX_KEY_LEN]; + uint8_t shd_mkey[SEC_MAX_KEY_LEN]; +} __packed__; + +#define shd_eu_sel0 shd_control.request.eu_sel0 +#define shd_mode0 shd_control.request.mode0 +#define shd_eu_sel1 shd_control.request.eu_sel1 +#define shd_mode1 shd_control.request.mode1 +#define shd_desc_type shd_control.request.desc_type +#define shd_dir shd_control.request.dir +#define shd_dn shd_control.request.dn +#define shd_done shd_control.feedback.done +#define shd_iccr0 shd_control.feedback.iccr0 +#define shd_iccr1 shd_control.feedback.iccr1 + +/* SEC link table entries definition */ +struct sec_hw_lt { + u_int shl_length : 16; + u_int __padding0 : 6; + u_int shl_r : 1; + u_int shl_n : 1; + u_int __padding1 : 4; + uint64_t shl_ptr : 36; +} __packed__; + +struct sec_dma_mem { + void *dma_vaddr; + bus_addr_t dma_paddr; + bus_dma_tag_t dma_tag; + bus_dmamap_t dma_map; + u_int dma_is_map; +}; + +struct sec_desc { + struct sec_hw_desc *sd_desc; + bus_addr_t sd_desc_paddr; + struct sec_dma_mem sd_ptr_dmem[SEC_POINTERS]; + struct cryptop *sd_crp; + u_int sd_lt_used; + u_int sd_error; +}; + +struct sec_lt { + struct sec_hw_lt *sl_lt; + bus_addr_t sl_lt_paddr; +}; + +struct sec_eu_methods { + int (*sem_newsession)(struct sec_softc *sc, + struct sec_session *ses, struct cryptoini *enc, + struct cryptoini *mac); + int (*sem_make_desc)(struct sec_softc *sc, + struct sec_session *ses, struct sec_desc *desc, + struct cryptop *crp, int buftype); +}; + +struct sec_session { + u_int ss_used; + struct sec_eu_methods *ss_eu; + uint8_t ss_key[SEC_MAX_KEY_LEN]; + uint8_t ss_mkey[SEC_MAX_KEY_LEN]; + u_int ss_klen; + u_int ss_mklen; + u_int ss_ivlen; +}; + +struct sec_desc_map_info { + struct sec_softc *sdmi_sc; + bus_size_t sdmi_size; + bus_size_t sdmi_offset; + struct sec_lt *sdmi_lt_first; + struct sec_lt *sdmi_lt_last; + u_int sdmi_lt_used; +}; + +struct sec_softc { + device_t sc_dev; + int32_t sc_cid; + int sc_blocked; + int sc_shutdown; + u_int sc_version; + + uint64_t sc_int_error_mask; + uint64_t sc_channel_idle_mask; + + struct sec_session sc_sessions[SEC_MAX_SESSIONS]; + + struct mtx sc_controller_lock; + struct mtx sc_descriptors_lock; + struct mtx sc_sessions_lock; + + struct sec_desc sc_desc[SEC_DESCRIPTORS]; + u_int sc_free_desc_get_cnt; + u_int sc_free_desc_put_cnt; + u_int sc_ready_desc_get_cnt; + u_int sc_ready_desc_put_cnt; + u_int sc_queued_desc_get_cnt; + u_int sc_queued_desc_put_cnt; + + struct sec_lt sc_lt[SEC_LT_ENTRIES + 1]; + u_int sc_lt_alloc_cnt; + u_int sc_lt_free_cnt; + + struct sec_dma_mem sc_desc_dmem; /* descriptors DMA memory */ + struct sec_dma_mem sc_lt_dmem; /* link tables DMA memory */ + + struct resource *sc_rres; /* register resource */ + int sc_rrid; /* register rid */ + struct { + bus_space_tag_t bst; + bus_space_handle_t bsh; + } sc_bas; + + struct resource *sc_pri_ires; /* primary irq resource */ + void *sc_pri_ihand; /* primary irq handler */ + int sc_pri_irid; /* primary irq resource id */ + + struct resource *sc_sec_ires; /* secondary irq resource */ + void *sc_sec_ihand; /* secondary irq handler */ + int sc_sec_irid; /* secondary irq resource id */ +}; + +/* Locking macros */ +#define SEC_LOCK(sc, what) \ + mtx_lock(&(sc)->sc_ ## what ## _lock) +#define SEC_UNLOCK(sc, what) \ + mtx_unlock(&(sc)->sc_ ## what ## _lock) +#define SEC_LOCK_ASSERT(sc, what) \ + mtx_assert(&(sc)->sc_ ## what ## _lock, MA_OWNED) + +/* Read/Write definitions */ +#define SEC_READ(sc, reg) \ + bus_space_read_8((sc)->sc_bas.bst, (sc)->sc_bas.bsh, (reg)) +#define SEC_WRITE(sc, reg, val) \ + bus_space_write_8((sc)->sc_bas.bst, (sc)->sc_bas.bsh, (reg), (val)) + +/* Base allocation macros (warning: wrap must be 2^n) */ +#define SEC_CNT_INIT(sc, cnt, wrap) \ + (((sc)->cnt) = ((wrap) - 1)) +#define SEC_ADD(sc, cnt, wrap, val) \ + ((sc)->cnt = (((sc)->cnt) + (val)) & ((wrap) - 1)) +#define SEC_INC(sc, cnt, wrap) \ + SEC_ADD(sc, cnt, wrap, 1) +#define SEC_DEC(sc, cnt, wrap) \ + SEC_ADD(sc, cnt, wrap, -1) +#define SEC_GET_GENERIC(sc, tab, cnt, wrap) \ + ((sc)->tab[SEC_INC(sc, cnt, wrap)]) +#define SEC_PUT_GENERIC(sc, tab, cnt, wrap, val) \ + ((sc)->tab[SEC_INC(sc, cnt, wrap)] = val) + +/* Interface for descriptors */ +#define SEC_GET_FREE_DESC(sc) \ + &SEC_GET_GENERIC(sc, sc_desc, sc_free_desc_get_cnt, SEC_DESCRIPTORS) + +#define SEC_PUT_BACK_FREE_DESC(sc) \ + SEC_DEC(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS) + +#define SEC_DESC_FREE2READY(sc) \ + SEC_INC(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS) + +#define SEC_GET_READY_DESC(sc) \ + &SEC_GET_GENERIC(sc, sc_desc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS) + +#define SEC_PUT_BACK_READY_DESC(sc) \ + SEC_DEC(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS) + +#define SEC_DESC_READY2QUEUED(sc) \ + SEC_INC(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS) + +#define SEC_GET_QUEUED_DESC(sc) \ + &SEC_GET_GENERIC(sc, sc_desc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS) + +#define SEC_PUT_BACK_QUEUED_DESC(sc) \ + SEC_DEC(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS) + +#define SEC_DESC_QUEUED2FREE(sc) \ + SEC_INC(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS) + +#define SEC_FREE_DESC_CNT(sc) \ + (((sc)->sc_free_desc_put_cnt - (sc)->sc_free_desc_get_cnt - 1) \ + & (SEC_DESCRIPTORS - 1)) + +#define SEC_READY_DESC_CNT(sc) \ + (((sc)->sc_ready_desc_put_cnt - (sc)->sc_ready_desc_get_cnt) & \ + (SEC_DESCRIPTORS - 1)) + +#define SEC_QUEUED_DESC_CNT(sc) \ + (((sc)->sc_queued_desc_put_cnt - (sc)->sc_queued_desc_get_cnt) \ + & (SEC_DESCRIPTORS - 1)) + +#define SEC_DESC_SYNC(sc, mode) do { \ + sec_sync_dma_mem(&((sc)->sc_desc_dmem), (mode)); \ + sec_sync_dma_mem(&((sc)->sc_lt_dmem), (mode)); \ +} while (0) + +#define SEC_DESC_SYNC_POINTERS(desc, mode) do { \ + u_int i; \ + for (i = 0; i < SEC_POINTERS; i++) \ + sec_sync_dma_mem(&((desc)->sd_ptr_dmem[i]), (mode)); \ +} while (0) + +#define SEC_DESC_FREE_POINTERS(desc) do { \ + u_int i; \ + for (i = 0; i < SEC_POINTERS; i++) \ + sec_free_dma_mem(&(desc)->sd_ptr_dmem[i]); \ +} while (0); + +#define SEC_DESC_PUT_BACK_LT(sc, desc) \ + SEC_PUT_BACK_LT(sc, (desc)->sd_lt_used) + +#define SEC_DESC_FREE_LT(sc, desc) \ + SEC_FREE_LT(sc, (desc)->sd_lt_used) + +/* Interface for link tables */ +#define SEC_ALLOC_LT_ENTRY(sc) \ + &SEC_GET_GENERIC(sc, sc_lt, sc_lt_alloc_cnt, SEC_LT_ENTRIES) + +#define SEC_PUT_BACK_LT(sc, num) \ + SEC_ADD(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES, -(num)) + +#define SEC_FREE_LT(sc, num) \ + SEC_ADD(sc, sc_lt_free_cnt, SEC_LT_ENTRIES, num) + +#define SEC_FREE_LT_CNT(sc) \ + (((sc)->sc_lt_free_cnt - (sc)->sc_lt_alloc_cnt - 1) \ + & (SEC_LT_ENTRIES - 1)) + +/* DMA Maping defines */ +#define SEC_MEMORY 0 +#define SEC_UIO 1 +#define SEC_MBUF 2 + +/* Size of SEC registers area */ +#define SEC_IO_SIZE 0x10000 + +/* SEC Controller registers */ +#define SEC_IER 0x1008 +#define SEC_INT_CH_DN(n) (1ULL << (((n) * 2) + 32)) +#define SEC_INT_CH_ERR(n) (1ULL << (((n) * 2) + 33)) +#define SEC_INT_ITO (1ULL << 55) + +#define SEC_ISR 0x1010 +#define SEC_ICR 0x1018 +#define SEC_ID 0x1020 + +#define SEC_EUASR 0x1028 +#define SEC_EUASR_RNGU(r) (((r) >> 0) & 0xF) +#define SEC_EUASR_PKEU(r) (((r) >> 8) & 0xF) +#define SEC_EUASR_KEU(r) (((r) >> 16) & 0xF) +#define SEC_EUASR_CRCU(r) (((r) >> 20) & 0xF) +#define SEC_EUASR_DEU(r) (((r) >> 32) & 0xF) +#define SEC_EUASR_AESU(r) (((r) >> 40) & 0xF) +#define SEC_EUASR_MDEU(r) (((r) >> 48) & 0xF) +#define SEC_EUASR_AFEU(r) (((r) >> 56) & 0xF) + +#define SEC_MCR 0x1030 +#define SEC_MCR_SWR (1ULL << 32) + +/* SEC Channel registers */ +#define SEC_CHAN_CCR(n) (((n) * 0x100) + 0x1108) +#define SEC_CHAN_CCR_CDIE (1ULL << 1) +#define SEC_CHAN_CCR_NT (1ULL << 2) +#define SEC_CHAN_CCR_AWSE (1ULL << 3) +#define SEC_CHAN_CCR_CDWE (1ULL << 4) +#define SEC_CHAN_CCR_BS (1ULL << 8) +#define SEC_CHAN_CCR_WGN (1ULL << 13) +#define SEC_CHAN_CCR_R (1ULL << 32) +#define SEC_CHAN_CCR_CON (1ULL << 33) + +#define SEC_CHAN_CSR(n) (((n) * 0x100) + 0x1110) +#define SEC_CHAN_CSR2_FFLVL_M 0x1FULL +#define SEC_CHAN_CSR2_FFLVL_S 56 +#define SEC_CHAN_CSR2_GSTATE_M 0x0FULL +#define SEC_CHAN_CSR2_GSTATE_S 48 +#define SEC_CHAN_CSR2_PSTATE_M 0x0FULL +#define SEC_CHAN_CSR2_PSTATE_S 40 +#define SEC_CHAN_CSR2_MSTATE_M 0x3FULL +#define SEC_CHAN_CSR2_MSTATE_S 32 +#define SEC_CHAN_CSR3_FFLVL_M 0x1FULL +#define SEC_CHAN_CSR3_FFLVL_S 24 +#define SEC_CHAN_CSR3_MSTATE_M 0x1FFULL +#define SEC_CHAN_CSR3_MSTATE_S 32 +#define SEC_CHAN_CSR3_PSTATE_M 0x7FULL +#define SEC_CHAN_CSR3_PSTATE_S 48 +#define SEC_CHAN_CSR3_GSTATE_M 0x7FULL +#define SEC_CHAN_CSR3_GSTATE_S 56 + +#define SEC_CHAN_CDPR(n) (((n) * 0x100) + 0x1140) +#define SEC_CHAN_FF(n) (((n) * 0x100) + 0x1148) + +/* SEC Execution Units numbers */ +#define SEC_EU_NONE 0x0 +#define SEC_EU_AFEU 0x1 +#define SEC_EU_DEU 0x2 +#define SEC_EU_MDEU_A 0x3 +#define SEC_EU_MDEU_B 0xB +#define SEC_EU_RNGU 0x4 +#define SEC_EU_PKEU 0x5 +#define SEC_EU_AESU 0x6 +#define SEC_EU_KEU 0x7 +#define SEC_EU_CRCU 0x8 + +/* SEC descriptor types */ +#define SEC_DT_COMMON_NONSNOOP 0x02 +#define SEC_DT_HMAC_SNOOP 0x04 + +/* SEC AESU declarations and definitions */ +#define SEC_AESU_MODE_ED (1ULL << 0) +#define SEC_AESU_MODE_CBC (1ULL << 1) + +/* SEC DEU declarations and definitions */ +#define SEC_DEU_MODE_ED (1ULL << 0) +#define SEC_DEU_MODE_TS (1ULL << 1) +#define SEC_DEU_MODE_CBC (1ULL << 2) + +/* SEC MDEU declarations and definitions */ +#define SEC_HMAC_HASH_LEN 12 +#define SEC_MDEU_MODE_SHA1 0x00 /* MDEU A */ +#define SEC_MDEU_MODE_SHA384 0x00 /* MDEU B */ +#define SEC_MDEU_MODE_SHA256 0x01 +#define SEC_MDEU_MODE_MD5 0x02 /* MDEU A */ +#define SEC_MDEU_MODE_SHA512 0x02 /* MDEU B */ +#define SEC_MDEU_MODE_SHA224 0x03 +#define SEC_MDEU_MODE_PD (1ULL << 2) +#define SEC_MDEU_MODE_HMAC (1ULL << 3) +#define SEC_MDEU_MODE_INIT (1ULL << 4) +#define SEC_MDEU_MODE_SMAC (1ULL << 5) +#define SEC_MDEU_MODE_CICV (1ULL << 6) +#define SEC_MDEU_MODE_CONT (1ULL << 7) + +#endif diff --git a/sys/modules/sec/Makefile b/sys/modules/sec/Makefile new file mode 100644 index 0000000..38fea03 --- /dev/null +++ b/sys/modules/sec/Makefile @@ -0,0 +1,7 @@ +.PATH: ${.CURDIR}/../../dev/sec + +KMOD= sec +SRCS= sec.c +SRCS+= bus_if.h device_if.h + +.include diff --git a/sys/powerpc/conf/MPC85XX b/sys/powerpc/conf/MPC85XX index d2f4e8b..c5c1b7b 100644 --- a/sys/powerpc/conf/MPC85XX +++ b/sys/powerpc/conf/MPC85XX @@ -84,6 +86,7 @@ device random #device rl device scbus device scc +device sec device tsec device tun device uart diff --git a/sys/powerpc/include/ocpbus.h b/sys/powerpc/include/ocpbus.h index 0b93ed0..48b593c 100644 --- a/sys/powerpc/include/ocpbus.h +++ b/sys/powerpc/include/ocpbus.h @@ -42,5 +42,6 @@ #define OCPBUS_DEVTYPE_PCIB 5 #define OCPBUS_DEVTYPE_LBC 6 #define OCPBUS_DEVTYPE_I2C 7 +#define OCPBUS_DEVTYPE_SEC 8 #endif /* _MACHINE_OCPBUS_H_ */ diff --git a/sys/powerpc/include/pio.h b/sys/powerpc/include/pio.h index 44b5e03..881da3b 100644 --- a/sys/powerpc/include/pio.h +++ b/sys/powerpc/include/pio.h @@ -61,6 +61,13 @@ __outl(volatile u_int32_t *a, u_int32_t v) } static __inline void +__outll(volatile u_int64_t *a, u_int64_t v) +{ + *a = v; + __asm__ volatile("eieio; sync"); +} + +static __inline void __outwrb(volatile u_int16_t *a, u_int16_t v) { __asm__ volatile("sthbrx %0, 0, %1" :: "r"(v), "r"(a)); @@ -104,6 +111,16 @@ __inl(volatile u_int32_t *a) return _v_; } +static __inline u_int64_t +__inll(volatile u_int64_t *a) +{ + u_int64_t _v_; + + _v_ = *a; + __asm__ volatile("eieio; sync"); + return _v_; +} + static __inline u_int16_t __inwrb(volatile u_int16_t *a) { @@ -130,12 +147,16 @@ __inlrb(volatile u_int32_t *a) #define out16(a,v) outw(a,v) #define outl(a,v) (__outl((volatile u_int32_t *)(a), v)) #define out32(a,v) outl(a,v) +#define outll(a,v) (__outll((volatile u_int64_t *)(a), v)) +#define out64(a,v) outll(a,v) #define inb(a) (__inb((volatile u_int8_t *)(a))) #define in8(a) inb(a) #define inw(a) (__inw((volatile u_int16_t *)(a))) #define in16(a) inw(a) #define inl(a) (__inl((volatile u_int32_t *)(a))) #define in32(a) inl(a) +#define inll(a) (__inll((volatile u_int64_t *)(a))) +#define in64(a) inll(a) #define out8rb(a,v) outb(a,v) #define outwrb(a,v) (__outwrb((volatile u_int16_t *)(a), v)) @@ -174,6 +195,14 @@ __outsl(volatile u_int32_t *a, const u_int32_t *s, size_t c) } static __inline void +__outsll(volatile u_int64_t *a, const u_int64_t *s, size_t c) +{ + while (c--) + *a = *s++; + __asm__ volatile("eieio; sync"); +} + +static __inline void __outswrb(volatile u_int16_t *a, const u_int16_t *s, size_t c) { while (c--) @@ -214,6 +243,14 @@ __insl(volatile u_int32_t *a, u_int32_t *d, size_t c) } static __inline void +__insll(volatile u_int64_t *a, u_int64_t *d, size_t c) +{ + while (c--) + *d++ = *a; + __asm__ volatile("eieio; sync"); +} + +static __inline void __inswrb(volatile u_int16_t *a, u_int16_t *d, size_t c) { while (c--) @@ -235,12 +272,16 @@ __inslrb(volatile u_int32_t *a, u_int32_t *d, size_t c) #define outs16(a,s,c) outsw(a,s,c) #define outsl(a,s,c) (__outsl((volatile u_int32_t *)(a), s, c)) #define outs32(a,s,c) outsl(a,s,c) +#define outsll(a,s,c) (__outsll((volatile u_int64_t *)(a), s, c)) +#define outs64(a,s,c) outsll(a,s,c) #define insb(a,d,c) (__insb((volatile u_int8_t *)(a), d, c)) #define ins8(a,d,c) insb(a,d,c) #define insw(a,d,c) (__insw((volatile u_int16_t *)(a), d, c)) #define ins16(a,d,c) insw(a,d,c) #define insl(a,d,c) (__insl((volatile u_int32_t *)(a), d, c)) #define ins32(a,d,c) insl(a,d,c) +#define insll(a,d,c) (__insll((volatile u_int64_t *)(a), d, c)) +#define ins64(a,d,c) insll(a,d,c) #define outs8rb(a,s,c) outsb(a,s,c) #define outswrb(a,s,c) (__outswrb((volatile u_int16_t *)(a), s, c)) diff --git a/sys/powerpc/mpc85xx/ocpbus.c b/sys/powerpc/mpc85xx/ocpbus.c index 5816282..6483e5b 100644 --- a/sys/powerpc/mpc85xx/ocpbus.c +++ b/sys/powerpc/mpc85xx/ocpbus.c @@ -267,7 +267,7 @@ ocpbus_attach(device_t dev) ocpbus_mk_child(dev, OCPBUS_DEVTYPE_TSEC, 3); ocpbus_mk_child(dev, OCPBUS_DEVTYPE_PIC, 0); ocpbus_mk_child(dev, OCPBUS_DEVTYPE_QUICC, 0); - + ocpbus_mk_child(dev, OCPBUS_DEVTYPE_SEC, 0); /* Set up IRQ rman */ start = 0; @@ -406,6 +406,11 @@ const struct ocp_resource mpc8555_resources[] = { OCP85XX_I2C_SIZE}, {OCPBUS_DEVTYPE_I2C, 1, SYS_RES_IRQ, 0, PIC_IRQ_INT(27), 1}, + {OCPBUS_DEVTYPE_SEC, 0, SYS_RES_MEMORY, 0, OCP85XX_SEC_OFF, + OCP85XX_SEC_SIZE}, + {OCPBUS_DEVTYPE_SEC, 0, SYS_RES_IRQ, 0, PIC_IRQ_INT(29), 1}, + {OCPBUS_DEVTYPE_SEC, 0, SYS_RES_IRQ, 1, PIC_IRQ_INT(42), 1}, + {0} }; diff --git a/sys/powerpc/mpc85xx/ocpbus.h b/sys/powerpc/mpc85xx/ocpbus.h index 4becafd..9c24f93 100644 --- a/sys/powerpc/mpc85xx/ocpbus.h +++ b/sys/powerpc/mpc85xx/ocpbus.h @@ -118,6 +118,8 @@ #define OCP85XX_OPENPIC_SIZE 0x200B4 #define OCP85XX_QUICC_OFF 0x80000 #define OCP85XX_QUICC_SIZE 0x20000 +#define OCP85XX_SEC_OFF 0x30000 +#define OCP85XX_SEC_SIZE 0x10000 /* * PIC definitions diff --git a/sys/powerpc/powerpc/bus_machdep.c b/sys/powerpc/powerpc/bus_machdep.c index f75924f..8e258e7 100644 --- a/sys/powerpc/powerpc/bus_machdep.c +++ b/sys/powerpc/powerpc/bus_machdep.c @@ -190,7 +190,13 @@ bs_be_rs_4(bus_space_handle_t bsh, bus_size_t ofs) static uint64_t bs_be_rs_8(bus_space_handle_t bsh, bus_size_t ofs) { - TODO; + volatile uint64_t *addr; + uint64_t res; + + addr = __ppc_ba(bsh, ofs); + res = *addr; + CTR4(KTR_BE_IO, "%s(bsh=%#x, ofs=%#x) = %#x", __func__, bsh, ofs, res); + return (res); } static void @@ -212,9 +218,9 @@ bs_be_rm_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t *addr, size_t cnt) } static void -bs_be_rm_8(bus_space_handle_t bshh, bus_size_t ofs, uint64_t *addr, size_t cnt) +bs_be_rm_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t *addr, size_t cnt) { - TODO; + ins64(__ppc_ba(bsh, ofs), addr, cnt); } static void @@ -250,7 +256,11 @@ bs_be_rr_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t *addr, size_t cnt) static void bs_be_rr_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t *addr, size_t cnt) { - TODO; + volatile uint64_t *s = __ppc_ba(bsh, ofs); + + while (cnt--) + *addr++ = *s++; + __asm __volatile("eieio; sync"); } static void @@ -286,7 +296,11 @@ bs_be_ws_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t val) static void bs_be_ws_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t val) { - TODO; + volatile uint64_t *addr; + + addr = __ppc_ba(bsh, ofs); + *addr = val; + CTR4(KTR_BE_IO, "%s(bsh=%#x, ofs=%#x, val=%#x)", __func__, bsh, ofs, val); } static void @@ -314,7 +328,7 @@ static void bs_be_wm_8(bus_space_handle_t bsh, bus_size_t ofs, const uint64_t *addr, bus_size_t cnt) { - TODO; + outsll(__ppc_ba(bsh, ofs), addr, cnt); } static void @@ -354,7 +368,11 @@ static void bs_be_wr_8(bus_space_handle_t bsh, bus_size_t ofs, const uint64_t *addr, size_t cnt) { - TODO; + volatile uint64_t *d = __ppc_ba(bsh, ofs); + + while (cnt--) + *d++ = *addr++; + __asm __volatile("eieio; sync"); } static void @@ -390,7 +408,11 @@ bs_be_sm_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t val, size_t cnt) static void bs_be_sm_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t val, size_t cnt) { - TODO; + volatile uint64_t *d = __ppc_ba(bsh, ofs); + + while (cnt--) + *d = val; + __asm __volatile("eieio; sync"); } static void @@ -426,7 +448,11 @@ bs_be_sr_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t val, size_t cnt) static void bs_be_sr_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t val, size_t cnt) { - TODO; + volatile uint64_t *d = __ppc_ba(bsh, ofs); + + while (cnt--) + *d++ = val; + __asm __volatile("eieio; sync"); } /*