diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64 index 5e659b9..30d097b 100644 --- a/sys/conf/files.amd64 +++ b/sys/conf/files.amd64 @@ -481,6 +481,9 @@ x86/cpufreq/powernow.c optional cpufreq x86/cpufreq/est.c optional cpufreq x86/cpufreq/hwpstate.c optional cpufreq x86/cpufreq/p4tcc.c optional cpufreq +x86/iommu/busdma_dmar.c optional acpi pci +x86/iommu/intel_drv.c optional acpi pci +x86/iommu/intel_utils.c optional acpi pci x86/isa/atpic.c optional atpic isa x86/isa/atrtc.c standard x86/isa/clock.c standard @@ -491,6 +494,7 @@ x86/isa/nmi.c standard x86/isa/orm.c optional isa x86/pci/pci_bus.c optional pci x86/pci/qpi.c optional pci +x86/x86/busdma_bounce.c standard x86/x86/busdma_machdep.c standard x86/x86/dump_machdep.c standard x86/x86/intr_machdep.c standard diff --git a/sys/conf/files.i386 b/sys/conf/files.i386 index f4c595a..d6f101a 100644 --- a/sys/conf/files.i386 +++ b/sys/conf/files.i386 @@ -534,6 +534,9 @@ x86/cpufreq/hwpstate.c optional cpufreq x86/cpufreq/p4tcc.c optional cpufreq x86/cpufreq/powernow.c optional cpufreq x86/cpufreq/smist.c optional cpufreq +x86/iommu/busdma_dmar.c optional acpi pci +x86/iommu/intel_drv.c optional acpi pci +x86/iommu/intel_utils.c optional acpi pci x86/isa/atpic.c optional atpic x86/isa/atrtc.c optional native x86/isa/clock.c optional native @@ -544,6 +547,7 @@ x86/isa/nmi.c standard x86/isa/orm.c optional isa x86/pci/pci_bus.c optional pci x86/pci/qpi.c optional pci +x86/x86/busdma_bounce.c standard x86/x86/busdma_machdep.c standard x86/x86/dump_machdep.c standard x86/x86/intr_machdep.c standard diff --git a/sys/dev/acpica/acpi_pci.c b/sys/dev/acpica/acpi_pci.c index 39fba88..5fdd08c 100644 --- a/sys/dev/acpica/acpi_pci.c +++ b/sys/dev/acpica/acpi_pci.c @@ -80,6 +80,7 @@ static ACPI_STATUS acpi_pci_save_handle(ACPI_HANDLE handle, UINT32 level, static int acpi_pci_set_powerstate_method(device_t dev, device_t child, int state); static void acpi_pci_update_device(ACPI_HANDLE handle, device_t pci_child); +static bus_dma_tag_t acpi_pci_get_dma_tag(device_t bus, device_t child); static device_method_t acpi_pci_methods[] = { /* Device interface */ @@ -90,6 +91,7 @@ static device_method_t acpi_pci_methods[] = { DEVMETHOD(bus_read_ivar, acpi_pci_read_ivar), DEVMETHOD(bus_write_ivar, acpi_pci_write_ivar), DEVMETHOD(bus_child_location_str, acpi_pci_child_location_str_method), + DEVMETHOD(bus_get_dma_tag, acpi_pci_get_dma_tag), /* PCI interface */ DEVMETHOD(pci_set_powerstate, acpi_pci_set_powerstate_method), @@ -308,3 +310,19 @@ acpi_pci_attach(device_t dev) return (bus_generic_attach(dev)); } + +bus_dma_tag_t dmar_get_dma_tag(device_t dev, device_t child); +static bus_dma_tag_t +acpi_pci_get_dma_tag(device_t bus, device_t child) +{ + bus_dma_tag_t tag; + + if (device_get_parent(child) == bus) { + /* try dmar and return if it works */ + tag = dmar_get_dma_tag(bus, child); + } else + tag = NULL; + if (tag == NULL) + tag = pci_get_dma_tag(bus, child); + return (tag); +} diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c index f7dba1e..6ec63ba 100644 --- a/sys/dev/ahci/ahci.c +++ b/sys/dev/ahci/ahci.c @@ -452,9 +452,23 @@ ahci_attach(device_t dev) if ((ctlr->caps & AHCI_CAP_CCCS) == 0) ctlr->ccc = 0; ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC); + + /* Create controller-wide DMA tag. */ + if (bus_dma_tag_create(bus_get_dma_tag(dev), 0, 0, + (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR : + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, + 0, NULL, NULL, &ctlr->dma_tag)) { + bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, + ctlr->r_mem); + rman_fini(&ctlr->sc_iomem); + return ENXIO; + } + ahci_ctlr_setup(dev); /* Setup interrupts. */ if (ahci_setup_interrupt(dev)) { + bus_dma_tag_destroy(ctlr->dma_tag); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return ENXIO; @@ -544,6 +558,7 @@ ahci_detach(device_t dev) } } pci_release_msi(dev); + bus_dma_tag_destroy(ctlr->dma_tag); /* Free memory. */ rman_fini(&ctlr->sc_iomem); if (ctlr->r_mem) @@ -876,6 +891,14 @@ ahci_child_location_str(device_t dev, device_t child, char *buf, return (0); } +static bus_dma_tag_t +ahci_get_dma_tag(device_t dev, device_t child) +{ + struct ahci_controller *ctlr = device_get_softc(dev); + + return (ctlr->dma_tag); +} + devclass_t ahci_devclass; static device_method_t ahci_methods[] = { DEVMETHOD(device_probe, ahci_probe), @@ -889,6 +912,7 @@ static device_method_t ahci_methods[] = { DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), DEVMETHOD(bus_child_location_str, ahci_child_location_str), + DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag), { 0, 0 } }; static driver_t ahci_driver = { @@ -1198,13 +1222,9 @@ ahci_dmainit(device_t dev) struct ahci_dc_cb_args dcba; size_t rfsize; - if (ch->caps & AHCI_CAP_64BIT) - ch->dma.max_address = BUS_SPACE_MAXADDR; - else - ch->dma.max_address = BUS_SPACE_MAXADDR_32BIT; /* Command area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, - ch->dma.max_address, BUS_SPACE_MAXADDR, + BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE, 0, NULL, NULL, &ch->dma.work_tag)) goto error; @@ -1223,7 +1243,7 @@ ahci_dmainit(device_t dev) else rfsize = 256; if (bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0, - ch->dma.max_address, BUS_SPACE_MAXADDR, + BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rfsize, 1, rfsize, 0, NULL, NULL, &ch->dma.rfis_tag)) goto error; @@ -1238,7 +1258,7 @@ ahci_dmainit(device_t dev) ch->dma.rfis_bus = dcba.maddr; /* Data area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, - ch->dma.max_address, BUS_SPACE_MAXADDR, + BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots, AHCI_SG_ENTRIES, AHCI_PRD_MAX, diff --git a/sys/dev/ahci/ahci.h b/sys/dev/ahci/ahci.h index 2eef321..c3bee40 100644 --- a/sys/dev/ahci/ahci.h +++ b/sys/dev/ahci/ahci.h @@ -348,7 +348,6 @@ struct ata_dma { uint8_t *rfis; /* FIS receive area */ bus_addr_t rfis_bus; /* bus address of rfis */ bus_dma_tag_t data_tag; /* data DMA tag */ - u_int64_t max_address; /* highest DMA'able address */ }; enum ahci_slot_states { @@ -453,6 +452,7 @@ struct ahci_enclosure { /* structure describing a AHCI controller */ struct ahci_controller { device_t dev; + bus_dma_tag_t dma_tag; int r_rid; struct resource *r_mem; struct rman sc_iomem; diff --git a/sys/dev/ata/ata-pci.c b/sys/dev/ata/ata-pci.c index f47ea82..59d6825 100644 --- a/sys/dev/ata/ata-pci.c +++ b/sys/dev/ata/ata-pci.c @@ -572,6 +572,13 @@ ata_pci_child_location_str(device_t dev, device_t child, char *buf, return (0); } +static bus_dma_tag_t +ata_pci_get_dma_tag(device_t bus, device_t child) +{ + + return (bus_get_dma_tag(bus)); +} + static device_method_t ata_pci_methods[] = { /* device interface */ DEVMETHOD(device_probe, ata_pci_probe), @@ -594,6 +601,7 @@ static device_method_t ata_pci_methods[] = { DEVMETHOD(pci_write_config, ata_pci_write_config), DEVMETHOD(bus_print_child, ata_pci_print_child), DEVMETHOD(bus_child_location_str, ata_pci_child_location_str), + DEVMETHOD(bus_get_dma_tag, ata_pci_get_dma_tag), DEVMETHOD_END }; diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c index 79d5e57..a4198ef 100644 --- a/sys/dev/pci/pci.c +++ b/sys/dev/pci/pci.c @@ -70,10 +70,6 @@ __FBSDID("$FreeBSD$"); #include "pcib_if.h" #include "pci_if.h" -#if (BUS_SPACE_MAXADDR > 0xFFFFFFFF) -#define PCI_DMA_BOUNDARY 0x100000000 -#endif - #define PCIR_IS_BIOS(cfg, reg) \ (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \ ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1)) @@ -99,7 +95,6 @@ static void pci_load_vendor_data(void); static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc); static char *pci_describe_device(device_t dev); -static bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev); static int pci_modevent(module_t mod, int what, void *arg); static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg); diff --git a/sys/dev/pci/pcivar.h b/sys/dev/pci/pcivar.h index db3d8b8..ea7a062 100644 --- a/sys/dev/pci/pcivar.h +++ b/sys/dev/pci/pcivar.h @@ -498,6 +498,15 @@ void pci_restore_state(device_t dev); void pci_save_state(device_t dev); int pci_set_max_read_req(device_t dev, int size); +#ifdef BUS_SPACE_MAXADDR +#if (BUS_SPACE_MAXADDR > 0xFFFFFFFF) +#define PCI_DMA_BOUNDARY 0x100000000 +#else +#define PCI_DMA_BOUNDARY 0 +#endif +bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev); +#endif + #endif /* _SYS_BUS_H_ */ /* diff --git a/sys/dev/pci/vga_pci.c b/sys/dev/pci/vga_pci.c index 6d796c8..786400b 100644 --- a/sys/dev/pci/vga_pci.c +++ b/sys/dev/pci/vga_pci.c @@ -409,6 +409,13 @@ vga_pci_msix_count(device_t dev, device_t child) return (pci_msix_count(dev)); } +static bus_dma_tag_t +vga_pci_get_dma_tag(device_t bus, device_t child) +{ + + return (bus_get_dma_tag(bus)); +} + static device_method_t vga_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vga_pci_probe), @@ -426,6 +433,7 @@ static device_method_t vga_pci_methods[] = { DEVMETHOD(bus_release_resource, vga_pci_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), + DEVMETHOD(bus_get_dma_tag, vga_pci_get_dma_tag), /* PCI interface */ DEVMETHOD(pci_read_config, vga_pci_read_config), diff --git a/sys/x86/include/busdma_impl.h b/sys/x86/include/busdma_impl.h new file mode 100644 index 0000000..a3089f7 --- /dev/null +++ b/sys/x86/include/busdma_impl.h @@ -0,0 +1,94 @@ +/*- + * Copyright (c) 2013 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Konstantin Belousov + * under sponsorship from the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __X86_BUSDMA_IMPL_H +#define __X86_BUSDMA_IMPL_H + +struct bus_dma_tag_common { + struct bus_dma_impl *impl; + struct bus_dma_tag_common *parent; + bus_size_t alignment; + bus_addr_t boundary; + bus_addr_t lowaddr; + bus_addr_t highaddr; + bus_dma_filter_t *filter; + void *filterarg; + bus_size_t maxsize; + u_int nsegments; + bus_size_t maxsegsz; + int flags; + bus_dma_lock_t *lockfunc; + void *lockfuncarg; + int ref_count; +}; + +struct bus_dma_impl { + int (*tag_create)(bus_dma_tag_t parent, + bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, + bus_addr_t highaddr, bus_dma_filter_t *filter, + void *filterarg, bus_size_t maxsize, int nsegments, + bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, + void *lockfuncarg, bus_dma_tag_t *dmat); + int (*tag_destroy)(bus_dma_tag_t dmat); + int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp); + int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map); + int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags, + bus_dmamap_t *mapp); + void (*mem_free)(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map); + int (*load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, int flags, + bus_dma_segment_t *segs, int *segp); + int (*load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map, + void *buf, bus_size_t buflen, pmap_t pmap, int flags, + bus_dma_segment_t *segs, int *segp); + void (*map_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map, + struct memdesc *mem, bus_dmamap_callback_t *callback, + void *callback_arg); + bus_dma_segment_t *(*map_complete)(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error); + void (*map_unload)(bus_dma_tag_t dmat, bus_dmamap_t map); + void (*map_sync)(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dmasync_op_t op); +}; + +void busdma_lock_mutex(void *arg, bus_dma_lock_op_t op); +void bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op); +int bus_dma_run_filter(struct bus_dma_tag_common *dmat, bus_addr_t paddr); +int common_bus_dma_tag_create(struct bus_dma_tag_common *parent, + bus_size_t alignment, + bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, + bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, + int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, + void *lockfuncarg, size_t sz, void **dmat); + +extern struct bus_dma_impl bus_dma_bounce_impl; + +#endif diff --git a/sys/x86/iommu/busdma_dmar.c b/sys/x86/iommu/busdma_dmar.c new file mode 100644 index 0000000..6a025df --- /dev/null +++ b/sys/x86/iommu/busdma_dmar.c @@ -0,0 +1,218 @@ +/*- + * Copyright (c) 2013 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Konstantin Belousov + * under sponsorship from the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +bus_dma_tag_t +dmar_get_dma_tag(device_t dev, device_t child) +{ + struct dmar_unit *dmar; + int busno, domain, function, slot; + + domain = pci_get_domain(child); + busno = pci_get_bus(child); + slot = pci_get_slot(child); + function = pci_get_function(child); + dmar = dmar_find_bsf(domain, busno, slot, function); + if (dmar == NULL) + return (NULL); + return ((bus_dma_tag_t)&dmar->dmar_tag); +} + +static int +dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, + bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, + bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, + int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, + void *lockfuncarg, bus_dma_tag_t *dmat) +{ + struct bus_dma_tag_dmar *newtag; + int error; + + *dmat = NULL; + error = common_bus_dma_tag_create(parent != NULL ? + &((struct bus_dma_tag_dmar *)parent)->common : NULL, alignment, + boundary, + lowaddr, highaddr, filter, filterarg, maxsize, nsegments, maxsegsz, + flags, lockfunc, lockfuncarg, sizeof(struct bus_dma_tag_dmar), + (void **)&newtag); + if (error != 0) + return (error); + + newtag->common.impl = &bus_dma_dmar_impl; + + if (error != 0) + free(newtag, M_DEVBUF); + else + *dmat = (bus_dma_tag_t)newtag; + CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", + __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), + error); + return (error); +} + +static int +dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1) +{ + struct bus_dma_tag_dmar *dmat, *dmat_copy, *parent; + int error; + + error = 0; + dmat_copy = dmat = (struct bus_dma_tag_dmar *)dmat1; + + if (dmat != NULL) { + while (dmat != NULL) { + parent = (struct bus_dma_tag_dmar *)dmat->common.parent; + atomic_subtract_int(&dmat->common.ref_count, 1); + if (dmat->common.ref_count == 0) { + free(dmat, M_DEVBUF); + dmat = parent; + } else + dmat = NULL; + } + } + CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); + return (error); +} + +static int +dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) +{ + + return (ENOSYS); +} + +static int +dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) +{ + + return (ENOSYS); +} + + +static int +dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, + bus_dmamap_t *mapp) +{ + + return (ENOSYS); +} + +static void +dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) +{ +} + +static int +dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, + int *segp) +{ + + return (ENOSYS); +} + +static int +dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, + int *segp) +{ + + return (ENOSYS); +} + +static void +dmar_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) +{ +} + +static bus_dma_segment_t * +dmar_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) +{ + + return (NULL); +} + +static void +dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) +{ +} + +static void +dmar_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dmasync_op_t op) +{ +} + +struct bus_dma_impl bus_dma_dmar_impl = { + .tag_create = dmar_bus_dma_tag_create, + .tag_destroy = dmar_bus_dma_tag_destroy, + .map_create = dmar_bus_dmamap_create, + .map_destroy = dmar_bus_dmamap_destroy, + .mem_alloc = dmar_bus_dmamem_alloc, + .mem_free = dmar_bus_dmamem_free, + .load_phys = dmar_bus_dmamap_load_phys, + .load_buffer = dmar_bus_dmamap_load_buffer, + .map_waitok = dmar_bus_dmamap_waitok, + .map_complete = dmar_bus_dmamap_complete, + .map_unload = dmar_bus_dmamap_unload, + .map_sync = dmar_bus_dmamap_sync +}; diff --git a/sys/x86/iommu/busdma_dmar.h b/sys/x86/iommu/busdma_dmar.h new file mode 100644 index 0000000..2267ea7 --- /dev/null +++ b/sys/x86/iommu/busdma_dmar.h @@ -0,0 +1,45 @@ +/*- + * Copyright (c) 2013 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Konstantin Belousov + * under sponsorship from the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __X86_IOMMU_BUSDMA_DMAR_H +#define __X86_IOMMU_BUSDMA_DMAR_H + +struct bus_dma_tag_dmar { + struct bus_dma_tag_common common; + device_t dev; + struct dmar_unit *dmar; +}; + +extern struct bus_dma_impl bus_dma_dmar_impl; + +bus_dma_tag_t dmar_get_dma_tag(device_t dev, device_t child); + +#endif diff --git a/sys/x86/iommu/intel_dmar.h b/sys/x86/iommu/intel_dmar.h new file mode 100644 index 0000000..6356f4f --- /dev/null +++ b/sys/x86/iommu/intel_dmar.h @@ -0,0 +1,95 @@ +/*- + * Copyright (c) 2013 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Konstantin Belousov + * under sponsorship from the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD + */ + +#ifndef __X86_IOMMU_INTEL_DMAR_H +#define __X86_IOMMU_INTEL_DMAR_H + +struct dmar_unit { + device_t dev; + int unit; + uint16_t segment; + uint64_t base; + int reg_rid; + struct resource *regs; + uint32_t hw_ver; + uint64_t hw_cap; + uint64_t hw_ecap; + struct bus_dma_tag_dmar dmar_tag; +}; + +struct dmar_unit *dmar_find_bsf(int domain, int busno, int device, + int function); +u_int dmar_nd2mask(u_int nd); + +static inline uint32_t +dmar_read4(struct dmar_unit *unit, int reg) +{ + + return (bus_read_4(unit->regs, reg)); +} + +static inline uint64_t +dmar_read8(struct dmar_unit *unit, int reg) +{ +#ifdef __i386__ + uint32_t high, low; + + low = bus_read_4(unit->regs, reg); + high = bus_read_4(unit->regs, reg + 4); + return (low | ((uint64_t)high << 32)); +#else + return (bus_read_8(unit->regs, reg)); +#endif +} + +static inline void +dmar_write4(struct dmar_unit *unit, int reg, uint32_t val) +{ + + bus_write_4(unit->regs, reg, val); +} + +static inline void +dmar_write8(struct dmar_unit *unit, int reg, uint64_t val) +{ +#ifdef __i386__ + uint32_t high, low; + + low = val; + high = val >> 32; + bus_write_4(unit->regs, reg, low); + bus_write_4(unit->regs, reg + 4, high); +#else + bus_write_8(unit->regs, reg, val); +#endif +} + +#endif diff --git a/sys/x86/iommu/intel_drv.c b/sys/x86/iommu/intel_drv.c new file mode 100644 index 0000000..5c0f6df --- /dev/null +++ b/sys/x86/iommu/intel_drv.c @@ -0,0 +1,339 @@ +/*- + * Copyright (c) 2013 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Konstantin Belousov + * under sponsorship from the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_acpi.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DMAR_REG_RID 1 + +static devclass_t dmar_devclass; +static device_t *dmar_devs; +static int dmar_devcnt; + +typedef int (*dmar_iter_t)(ACPI_DMAR_HEADER *, void *); + +static void +dmar_iterate_tbl(dmar_iter_t iter, void *arg) +{ + ACPI_TABLE_DMAR *dmartbl; + ACPI_DMAR_HEADER *dmarh; + char *ptr, *ptrend; + ACPI_STATUS status; + + status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl); + if (ACPI_FAILURE(status)) + return; + ptr = (char *)dmartbl + sizeof(*dmartbl); + ptrend = (char *)dmartbl + dmartbl->Header.Length; + for (;;) { + if (ptr >= ptrend) + break; + dmarh = (ACPI_DMAR_HEADER *)ptr; + if (dmarh->Length <= 0) { + printf("dmar_identify: corrupted DMAR table, l %d\n", + dmarh->Length); + break; + } + ptr += dmarh->Length; + if (!iter(dmarh, arg)) + break; + } +} + +struct find_iter_args { + int i; + ACPI_DMAR_HARDWARE_UNIT *res; +}; + +static int +dmar_find_iter(ACPI_DMAR_HEADER *dmarh, void *arg) +{ + struct find_iter_args *fia; + + if (dmarh->Type != ACPI_DMAR_TYPE_HARDWARE_UNIT) + return (1); + + fia = arg; + if (fia->i == 0) { + fia->res = (ACPI_DMAR_HARDWARE_UNIT *)dmarh; + return (0); + } + fia->i--; + return (1); +} + +static ACPI_DMAR_HARDWARE_UNIT * +dmar_find_by_index(int idx) +{ + struct find_iter_args fia; + + fia.i = idx; + fia.res = NULL; + dmar_iterate_tbl(dmar_find_iter, &fia); + return (fia.res); +} + +static int +dmar_count_iter(ACPI_DMAR_HEADER *dmarh, void *arg) +{ + + if (dmarh->Type == ACPI_DMAR_TYPE_HARDWARE_UNIT) + dmar_devcnt++; + return (1); +} + +static void +dmar_identify(driver_t *driver, device_t parent) +{ + ACPI_TABLE_DMAR *dmartbl; + ACPI_DMAR_HARDWARE_UNIT *dmarh; + ACPI_STATUS status; + int i, error; + + if (acpi_disabled("dmar")) + return; + status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl); + if (ACPI_FAILURE(status)) + return; + if (bootverbose) { + printf("DMAR HAW=%d flags=<%b>\n", dmartbl->Width, + (unsigned)dmartbl->Flags, + "\020\001INTR_REMAP\002X2APIC_OPT_OUT"); + } + + dmar_iterate_tbl(dmar_count_iter, NULL); + if (dmar_devcnt == 0) + return; + dmar_devs = malloc(sizeof(device_t) * dmar_devcnt, M_DEVBUF, + M_WAITOK | M_ZERO); + for (i = 0; i < dmar_devcnt; i++) { + dmarh = dmar_find_by_index(i); + if (dmarh == NULL) { + printf("dmar_identify: cannot find HWUNIT %d\n", i); + continue; + } + dmar_devs[i] = BUS_ADD_CHILD(parent, 1, "dmar", i); + if (dmar_devs[i] == NULL) { + printf("dmar_identify: cannot create instance %d\n", i); + continue; + } + error = bus_set_resource(dmar_devs[i], SYS_RES_MEMORY, + DMAR_REG_RID, dmarh->Address, PAGE_SIZE); + if (error != 0) { + printf( + "dmar%d: unable to alloc register window at 0x%08jx: error %d\n", + i, (uintmax_t)dmarh->Address, error); + device_delete_child(parent, dmar_devs[i]); + dmar_devs[i] = NULL; + } + } +} + +static int +dmar_probe(device_t dev) +{ + int i; + + if (acpi_disabled("dmar")) + return (ENXIO); + if (dmar_devs == NULL) + return (ENXIO); + for (i = 0; i < dmar_devcnt; i++) { + if (dmar_devs[i] == dev) + break; + } + if (i == dmar_devcnt) + return (ENXIO); + device_set_desc(dev, "DMA remap"); + return (0); +} + +static int +dmar_attach(device_t dev) +{ + struct dmar_unit *unit; + ACPI_DMAR_HARDWARE_UNIT *dmaru; + uint32_t caphi, ecaphi; + + unit = device_get_softc(dev); + unit->dev = dev; + unit->unit = device_get_unit(dev); + dmaru = dmar_find_by_index(unit->unit); + if (dmaru == NULL) + return (EINVAL); + unit->segment = dmaru->Segment; + unit->base = dmaru->Address; + unit->reg_rid = DMAR_REG_RID; + unit->dmar_tag.common.ref_count = 1; /* Prevent free */ + unit->dmar_tag.common.impl = &bus_dma_dmar_impl; + unit->dmar_tag.common.boundary = PCI_DMA_BOUNDARY; + /* XXXKIB initialize tag further */ + unit->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &unit->reg_rid, RF_ACTIVE); + if (unit->regs == NULL) { + device_printf(dev, "cannot allocate register window\n"); + return (ENOMEM); + } + unit->hw_ver = dmar_read4(unit, DMAR_VER_REG); + unit->hw_cap = dmar_read8(unit, DMAR_CAP_REG); + unit->hw_ecap = dmar_read8(unit, DMAR_ECAP_REG); + if (bootverbose) { + device_printf(dev, + "regs@0x%08jx, ver=%d.%d, seg=%d, flags=<%b>\n", + (uintmax_t)dmaru->Address, DMAR_MAJOR_VER(unit->hw_ver), + DMAR_MINOR_VER(unit->hw_ver), dmaru->Segment, + dmaru->Flags, "\020\001INCLUDE_ALL_PCI"); + caphi = unit->hw_cap >> 32; + device_printf(dev, "cap=%b,", (u_int)unit->hw_cap, + "\020\004AFL\005WBF\006PLMR\007PHMR\010CM\027ZLR\030ISOCH"); + printf("%b, ", caphi, "\020\010PSI\027DWD\030DRD"); + printf( + "ndoms=%d, sagaw=%d, mgaw=%d, fro=%d, nfr=%d, superp=%d, mamv=%d\n", + DMAR_CAP_ND(unit->hw_cap), DMAR_CAP_SAGAW(unit->hw_cap), + DMAR_CAP_MGAW(unit->hw_cap), DMAR_CAP_FRO(unit->hw_cap), + DMAR_CAP_NFR(unit->hw_cap), DMAR_CAP_SPS(unit->hw_cap), + DMAR_CAP_MAMV(unit->hw_cap)); + ecaphi = unit->hw_ecap >> 32; + device_printf(dev, "ecap=%b,", (u_int)unit->hw_ecap, + "\020\001C\002QI\003DI\004IR\005EIM\007PT\010SC"); + printf("%b, ", ecaphi, "\020"); + printf("mhmw=%d, iro=%d\n", + DMAR_ECAP_MHMV(unit->hw_ecap), + DMAR_ECAP_IRO(unit->hw_ecap)); + } + return (0); +} + +static int +dmar_detach(device_t dev) +{ + + return (EBUSY); +} + +static int +dmar_suspend(device_t dev) +{ + + return (0); +} + +static int +dmar_resume(device_t dev) +{ + + /* XXXKIB */ + return (0); +} + +static device_method_t dmar_methods[] = { + DEVMETHOD(device_identify, dmar_identify), + DEVMETHOD(device_probe, dmar_probe), + DEVMETHOD(device_attach, dmar_attach), + DEVMETHOD(device_detach, dmar_detach), + DEVMETHOD(device_suspend, dmar_suspend), + DEVMETHOD(device_resume, dmar_resume), + DEVMETHOD_END +}; + +static driver_t dmar_driver = { + "dmar", + dmar_methods, + sizeof(struct dmar_unit), +}; + +DRIVER_MODULE(dmar, acpi, dmar_driver, dmar_devclass, 0, 0); +MODULE_DEPEND(dmar, acpi, 1, 1, 1); + +struct dmar_unit * +dmar_find_bsf(int domain, int busno, int slot, int function) +{ + struct dmar_unit *dmar; + ACPI_DMAR_HARDWARE_UNIT *dmarh; + ACPI_DMAR_DEVICE_SCOPE *devscope; + char *ptr, *ptrend; + int i; + + for (i = 0; i < dmar_devcnt; i++) { + if (dmar_devs[i] == NULL) + continue; + dmarh = dmar_find_by_index(i); + if (dmarh == NULL) + continue; + if (dmarh->Segment != domain) + continue; + if ((dmarh->Flags & ACPI_DMAR_INCLUDE_ALL) == 0) { + ptr = (char *)dmarh + sizeof(*dmarh); + ptrend = (char *)dmarh + dmarh->Header.Length; + for (;;) { + if (ptr >= ptrend) + break; + devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; + if (devscope->Length <= 0) { + printf( + "dmar_find_bsf: corrupted DMAR table, l %d\n", + dmarh->Header.Length); + break; + } + ptr += devscope->Length; + /* XXXKIB */ + } +#if 0 /* XXXKIB */ + if (XXX) + continue; +#endif + } + dmar = device_get_softc(dmar_devs[i]); + return (dmar); + } + return (NULL); +} diff --git a/sys/x86/iommu/intel_reg.h b/sys/x86/iommu/intel_reg.h new file mode 100644 index 0000000..2dd424b --- /dev/null +++ b/sys/x86/iommu/intel_reg.h @@ -0,0 +1,144 @@ +/*- + * Copyright (c) 2013 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Konstantin Belousov + * under sponsorship from the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD + */ + +#ifndef __X86_IOMMU_INTEL_REG_H +#define __X86_IOMMU_INTEL_REG_H + +/* Version register */ +#define DMAR_VER_REG 0 +#define DMAR_MAJOR_VER(x) (((x) >> 4) & 0xf) +#define DMAR_MINOR_VER(x) ((x) & 0xf) + +/* Capabilities register */ +#define DMAR_CAP_REG 0x8 +#define DMAR_CAP_DRD (1ULL << 55) /* DMA Read Draining */ +#define DMAR_CAP_DWD (1ULL << 54) /* DMA Write Draining */ +#define DMAR_CAP_MAMV(x) ((u_int)(((x) >> 48) & 0x3f)) + /* Maximum Address Mask */ +#define DMAR_CAP_NFR(x) ((u_int)(((x) >> 40) & 0xff) + 1) + /* Num of Fault-recording regs */ +#define DMAR_CAP_PSI (1ULL << 39) /* Page Selective Invalidation */ +#define DMAR_CAP_SPS(x) ((u_int)(((x) >> 34) & 0xf)) /* Super-Page Support */ +#define DMAR_CAP_SPS_2M 0x1 +#define DMAR_CAP_SPS_1G 0x3 +#define DMAR_CAP_SPS_512G 0x7 +#define DMAR_CAP_SPS_1T 0xf +#define DMAR_CAP_FRO(x) ((u_int)(((x) >> 24) & 0x1ff)) + /* Fault-recording reg offset */ +#define DMAR_CAP_ISOCH (1 << 23) /* Isochrony */ +#define DMAR_CAP_ZLR (1 << 22) /* Zero-length reads */ +#define DMAR_CAP_MGAW(x) ((u_int)(((x) >> 16) & 0x3f)) + /* Max Guest Address Width */ +#define DMAR_CAP_SAGAW(x) ((u_int)(((x) >> 8) & 0x1f)) + /* Adjusted Guest Address Width */ +#define DMAR_CAP_SAGAW_2LVL 0x01 +#define DMAR_CAP_SAGAW_3LVL 0x02 +#define DMAR_CAP_SAGAW_4LVL 0x04 +#define DMAR_CAP_SAGAW_5LVL 0x08 +#define DMAR_CAP_SAGAW_6LVL 0x10 +#define DMAR_CAP_CM (1 << 7) /* Caching mode */ +#define DMAR_CAP_PHMR (1 << 6) /* Protected High-mem Region */ +#define DMAR_CAP_PLMR (1 << 5) /* Protected Low-mem Region */ +#define DMAR_CAP_RWBF (1 << 4) /* Required Write-Buffer Flushing */ +#define DMAR_CAP_AFL (1 << 3) /* Advanced Fault Logging */ +#define DMAR_CAP_ND(x) ((u_int)((x) & 0x3)) /* Number of domains */ + +/* Extended Capabilities register */ +#define DMAR_ECAP_REG 0x10 +#define DMAR_ECAP_MHMV(x) ((u_int)(((x) >> 20) & 0xf)) + /* Maximum Handle Mask Value */ +#define DMAR_ECAP_IRO(x) ((u_int)(((x) >> 8) & 0x1f)) + /* IOTLB Register Offset */ +#define DMAR_ECAP_SC (1 << 7) /* Snoop Control */ +#define DMAR_ECAP_PT (1 << 6) /* Pass Through */ +#define DMAR_ECAP_EIM (1 << 4) /* Extended Interrupt Mode */ +#define DMAR_ECAP_IR (1 << 3) /* Interrupt Remapping */ +#define DMAR_ECAP_DI (1 << 2) /* Device IOTLB */ +#define DMAR_ECAP_QI (1 << 1) /* Queued Invalidation */ +#define DMAR_ECAP_C (1 << 0) /* Coherency */ + +/* Global Command register */ +#define DMAR_GCMD_REG 0x18 +#define DMAR_GCMD_TE (1 << 31) /* Translation Enable */ +#define DMAR_GCMD_SRTP (1 << 30) /* Set Root Table Pointer */ +#define DMAR_GCMD_SFL (1 << 29) /* Set Fault Log */ +#define DMAR_GCMD_EAFL (1 << 28) /* Enable Advanced Fault Logging */ +#define DMAR_GCMD_WBF (1 << 27) /* Write Buffer Flush */ +#define DMAR_GCMD_QIE (1 << 26) /* Queued Invalidation Enable */ +#define DMAR_GCMD_IRE (1 << 25) /* Interrupt Remapping Enable */ +#define DMAR_GCMD_SIRTP (1 << 24) /* Set Interrupt Remap Table Pointer */ +#define DMAR_GCMD_CFI (1 << 23) /* Compatibility Format Interrupt */ + +/* Global Status register */ +#define DMAR_GSTS_REG 0x1c +#define DMAR_GSTS_TES (1 << 31) /* Translation Enable Status */ +#define DMAR_GSTS_RTPS (1 << 30) /* Root Table Pointer Status */ +#define DMAR_GSTS_FLS (1 << 29) /* Fault Log Status */ +#define DMAR_GSTS_AFLS (1 << 28) /* Advanced Fault Logging Status */ +#define DMAR_GSTS_WBFS (1 << 27) /* Write Buffer Flush Status */ +#define DMAR_GSTS_QIES (1 << 26) /* Queued Invalidation Enable Status */ +#define DMAR_GSTS_IRES (1 << 25) /* Interrupt Remapping Enable Status */ +#define DMAR_GSTS_IRTPS (1 << 24) /* Interrupt Remapping Table + Pointer Status */ +#define DMAR_GSTS_CFIS (1 << 23) /* Compatibility Format + Interrupt Status */ + +/* Root-Entry Table Address register */ +#define DMAR_RTADDR_REG 0x20 + +/* Context Command register */ +#define DMAR_CCMD_REG 0x28 +#define DMAR_CCMD_ICC (1ULL << 63) /* Invalidate Context-Cache */ +#define DMAR_CCMD_CIRG_MASK (0x3ULL << 61) /* Context Invalidation + Request Granularity */ +#define DMAR_CCMD_CIRG_GLOB (0x1ULL << 61) /* Global */ +#define DMAR_CCMD_CIRG_DOM (0x2ULL << 61) /* Domain */ +#define DMAR_CCMD_CIRG_DEV (0x3ULL << 61) /* Device */ +#define DMAR_CCMD_CAIG(x) (((x) >> 59) & 0x3) /* Context Actual + Invalidation Granularity */ +#define DMAR_CCMD_CAIG_GLOB 0x1 /* Global */ +#define DMAR_CCMD_CAIG_DOM 0x2 /* Domain */ +#define DMAR_CCMD_CAIG_DEV 0x3 /* Device */ +#define DMAR_CCMD_FM (0x3UUL << 32) /* Function Mask */ +#define DMAR_CCMD_SID(x) (((x) & 0xffff) << 16) /* Source-ID */ +#define DMAR_CCMD_DID(x) ((x) & 0xffff) /* Domain-ID */ + +/* Invalidate Address register */ +#define DMAR_IVA_REG_OFF 0 +#define DMAR_IVA_IH (1 << 6) /* Invalidation Hint */ +#define DMAR_IVA_AM 0x1f /* Address Mask */ + +/* IOTLB Invalidate register */ +#define DMAR_IOTLB_REG_OFF 0x8 +#define DMAR_IOTLB_IVT (1ULL << 63) /* Invalidate IOTLB */ +/* XXX */ + +#endif diff --git a/sys/x86/iommu/intel_utils.c b/sys/x86/iommu/intel_utils.c new file mode 100644 index 0000000..38ed7f7 --- /dev/null +++ b/sys/x86/iommu/intel_utils.c @@ -0,0 +1,65 @@ +/*- + * Copyright (c) 2013 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Konstantin Belousov + * under sponsorship from the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +u_int +dmar_nd2mask(u_int nd) +{ + static const u_int masks[] = { + 0x000f, /* nd == 0 */ + 0x002f, /* nd == 1 */ + 0x00ff, /* nd == 2 */ + 0x02ff, /* nd == 3 */ + 0x0fff, /* nd == 4 */ + 0x2fff, /* nd == 5 */ + 0xffff, /* nd == 6 */ + 0x0000, /* nd == 7 reserved */ + }; + + KASSERT(nd <= 6, ("number of domains %d", nd)); + return (masks[nd]); +} diff --git a/sys/x86/x86/busdma_bounce.c b/sys/x86/x86/busdma_bounce.c new file mode 100644 index 0000000..588e6c9 --- /dev/null +++ b/sys/x86/x86/busdma_bounce.c @@ -0,0 +1,1078 @@ +/*- + * Copyright (c) 1997, 1998 Justin T. Gibbs. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification, immediately at the beginning of the file. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifdef __i386__ +#define MAX_BPAGES 512 +#else +#define MAX_BPAGES 8192 +#endif +#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 +#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 + +struct bounce_zone; + +struct bus_dma_tag { + struct bus_dma_tag_common common; + int map_count; + bus_dma_segment_t *segments; + struct bounce_zone *bounce_zone; +}; + +struct bounce_page { + vm_offset_t vaddr; /* kva of bounce buffer */ + bus_addr_t busaddr; /* Physical address */ + vm_offset_t datavaddr; /* kva of client data */ + bus_addr_t dataaddr; /* client physical address */ + bus_size_t datacount; /* client data count */ + STAILQ_ENTRY(bounce_page) links; +}; + +int busdma_swi_pending; + +struct bounce_zone { + STAILQ_ENTRY(bounce_zone) links; + STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; + int total_bpages; + int free_bpages; + int reserved_bpages; + int active_bpages; + int total_bounced; + int total_deferred; + int map_count; + bus_size_t alignment; + bus_addr_t lowaddr; + char zoneid[8]; + char lowaddrid[20]; + struct sysctl_ctx_list sysctl_tree; + struct sysctl_oid *sysctl_tree_top; +}; + +static struct mtx bounce_lock; +static int total_bpages; +static int busdma_zonecount; +static STAILQ_HEAD(, bounce_zone) bounce_zone_list; + +static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); +SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, + "Total bounce pages"); + +struct bus_dmamap { + struct bp_list bpages; + int pagesneeded; + int pagesreserved; + bus_dma_tag_t dmat; + struct memdesc mem; + bus_dmamap_callback_t *callback; + void *callback_arg; + STAILQ_ENTRY(bus_dmamap) links; +}; + +static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; +static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; +static struct bus_dmamap nobounce_dmamap, contig_dmamap; + +static void init_bounce_pages(void *dummy); +static int alloc_bounce_zone(bus_dma_tag_t dmat); +static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); +static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, + int commit); +static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_offset_t vaddr, bus_addr_t addr, + bus_size_t size); +static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); +int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); +static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, + pmap_t pmap, void *buf, bus_size_t buflen, + int flags); +static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, + int flags); +static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, + int flags); + +#ifdef XEN +#undef pmap_kextract +#define pmap_kextract pmap_kextract_ma +#endif + +/* + * Allocate a device specific dma_tag. + */ +static int +bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, + bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, + bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, + int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, + void *lockfuncarg, bus_dma_tag_t *dmat) +{ + bus_dma_tag_t newtag; + int error; + + *dmat = NULL; + error = common_bus_dma_tag_create(parent != NULL ? &parent->common : + NULL, alignment, boundary, + lowaddr, highaddr, filter, filterarg, maxsize, nsegments, maxsegsz, + flags, lockfunc, lockfuncarg, sizeof (struct bus_dma_tag), + (void **)&newtag); + if (error != 0) + return (error); + + newtag->common.impl = &bus_dma_bounce_impl; + newtag->map_count = 0; + newtag->segments = NULL; + + if (parent != NULL && ((newtag->common.filter != NULL) || + ((parent->common.flags & BUS_DMA_COULD_BOUNCE) != 0))) + newtag->common.flags |= BUS_DMA_COULD_BOUNCE; + + if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) || + newtag->common.alignment > 1) + newtag->common.flags |= BUS_DMA_COULD_BOUNCE; + + if (((newtag->common.flags & BUS_DMA_COULD_BOUNCE) != 0) && + (flags & BUS_DMA_ALLOCNOW) != 0) { + struct bounce_zone *bz; + + /* Must bounce */ + if ((error = alloc_bounce_zone(newtag)) != 0) { + free(newtag, M_DEVBUF); + return (error); + } + bz = newtag->bounce_zone; + + if (ptoa(bz->total_bpages) < maxsize) { + int pages; + + pages = atop(maxsize) - bz->total_bpages; + + /* Add pages to our bounce pool */ + if (alloc_bounce_pages(newtag, pages) < pages) + error = ENOMEM; + } + /* Performed initial allocation */ + newtag->common.flags |= BUS_DMA_MIN_ALLOC_COMP; + } else + error = 0; + + if (error != 0) + free(newtag, M_DEVBUF); + else + *dmat = newtag; + CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", + __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), + error); + return (error); +} + +static int +bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat) +{ + bus_dma_tag_t dmat_copy, parent; + int error; + + error = 0; + dmat_copy = dmat; + + if (dmat != NULL) { + if (dmat->map_count != 0) { + error = EBUSY; + goto out; + } + while (dmat != NULL) { + parent = (bus_dma_tag_t)dmat->common.parent; + atomic_subtract_int(&dmat->common.ref_count, 1); + if (dmat->common.ref_count == 0) { + if (dmat->segments != NULL) + free(dmat->segments, M_DEVBUF); + free(dmat, M_DEVBUF); + /* + * Last reference count, so + * release our reference + * count on our parent. + */ + dmat = parent; + } else + dmat = NULL; + } + } +out: + CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); + return (error); +} + +/* + * Allocate a handle for mapping from kva/uva/physical + * address space into bus device space. + */ +static int +bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) +{ + struct bounce_zone *bz; + int error, maxpages, pages; + + error = 0; + + if (dmat->segments == NULL) { + dmat->segments = (bus_dma_segment_t *)malloc( + sizeof(bus_dma_segment_t) * dmat->common.nsegments, + M_DEVBUF, M_NOWAIT); + if (dmat->segments == NULL) { + CTR3(KTR_BUSDMA, "%s: tag %p error %d", + __func__, dmat, ENOMEM); + return (ENOMEM); + } + } + + /* + * Bouncing might be required if the driver asks for an active + * exclusion region, a data alignment that is stricter than 1, and/or + * an active address boundary. + */ + if (dmat->common.flags & BUS_DMA_COULD_BOUNCE) { + /* Must bounce */ + if (dmat->bounce_zone == NULL) { + if ((error = alloc_bounce_zone(dmat)) != 0) + return (error); + } + bz = dmat->bounce_zone; + + *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, + M_NOWAIT | M_ZERO); + if (*mapp == NULL) { + CTR3(KTR_BUSDMA, "%s: tag %p error %d", + __func__, dmat, ENOMEM); + return (ENOMEM); + } + + /* Initialize the new map */ + STAILQ_INIT(&((*mapp)->bpages)); + + /* + * Attempt to add pages to our pool on a per-instance + * basis up to a sane limit. + */ + if (dmat->common.alignment > 1) + maxpages = MAX_BPAGES; + else + maxpages = MIN(MAX_BPAGES, Maxmem - + atop(dmat->common.lowaddr)); + if ((dmat->common.flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || + (bz->map_count > 0 && bz->total_bpages < maxpages)) { + pages = MAX(atop(dmat->common.maxsize), 1); + pages = MIN(maxpages - bz->total_bpages, pages); + pages = MAX(pages, 1); + if (alloc_bounce_pages(dmat, pages) < pages) + error = ENOMEM; + if ((dmat->common.flags & BUS_DMA_MIN_ALLOC_COMP) + == 0) { + if (error == 0) { + dmat->common.flags |= + BUS_DMA_MIN_ALLOC_COMP; + } + } else + error = 0; + } + bz->map_count++; + } else { + *mapp = NULL; + } + if (error == 0) + dmat->map_count++; + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", + __func__, dmat, dmat->common.flags, error); + return (error); +} + +/* + * Destroy a handle for mapping from kva/uva/physical + * address space into bus device space. + */ +static int +bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) +{ + + if (map != NULL && map != &nobounce_dmamap && map != &contig_dmamap) { + if (STAILQ_FIRST(&map->bpages) != NULL) { + CTR3(KTR_BUSDMA, "%s: tag %p error %d", + __func__, dmat, EBUSY); + return (EBUSY); + } + if (dmat->bounce_zone) + dmat->bounce_zone->map_count--; + free(map, M_DEVBUF); + } + dmat->map_count--; + CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); + return (0); +} + + +/* + * Allocate a piece of memory that can be efficiently mapped into + * bus device space based on the constraints lited in the dma tag. + * A dmamap to for use with dmamap_load is also allocated. + */ +static int +bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, + bus_dmamap_t *mapp) +{ + vm_memattr_t attr; + int mflags; + + if (flags & BUS_DMA_NOWAIT) + mflags = M_NOWAIT; + else + mflags = M_WAITOK; + + /* If we succeed, no mapping/bouncing will be required */ + *mapp = NULL; + + if (dmat->segments == NULL) { + dmat->segments = (bus_dma_segment_t *)malloc( + sizeof(bus_dma_segment_t) * dmat->common.nsegments, + M_DEVBUF, mflags); + if (dmat->segments == NULL) { + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", + __func__, dmat, dmat->common.flags, ENOMEM); + return (ENOMEM); + } + } + if (flags & BUS_DMA_ZERO) + mflags |= M_ZERO; + if (flags & BUS_DMA_NOCACHE) + attr = VM_MEMATTR_UNCACHEABLE; + else + attr = VM_MEMATTR_DEFAULT; + + /* + * XXX: + * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact + * alignment guarantees of malloc need to be nailed down, and the + * code below should be rewritten to take that into account. + * + * In the meantime, we'll warn the user if malloc gets it wrong. + */ + if ((dmat->common.maxsize <= PAGE_SIZE) && + (dmat->common.alignment < dmat->common.maxsize) && + dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) && + attr == VM_MEMATTR_DEFAULT) { + *vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags); + } else if (dmat->common.nsegments >= btoc(dmat->common.maxsize) && + dmat->common.alignment <= PAGE_SIZE && + (dmat->common.boundary == 0 || + dmat->common.boundary >= dmat->common.lowaddr)) { + /* Page-based multi-segment allocations allowed */ + *vaddr = (void *)kmem_alloc_attr(kernel_map, + dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr, + attr); + *mapp = &contig_dmamap; + } else { + *vaddr = (void *)kmem_alloc_contig(kernel_map, + dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr, + dmat->common.alignment != 0 ? dmat->common.alignment : 1ul, + dmat->common.boundary, attr); + *mapp = &contig_dmamap; + } + if (*vaddr == NULL) { + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", + __func__, dmat, dmat->common.flags, ENOMEM); + return (ENOMEM); + } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) { + printf("bus_dmamem_alloc failed to align memory properly.\n"); + } + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", + __func__, dmat, dmat->common.flags, 0); + return (0); +} + +/* + * Free a piece of memory and it's allociated dmamap, that was allocated + * via bus_dmamem_alloc. Make the same choice for free/contigfree. + */ +static void +bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) +{ + /* + * dmamem does not need to be bounced, so the map should be + * NULL if malloc() was used and contig_dmamap if + * kmem_alloc_contig() was used. + */ + if (!(map == NULL || map == &contig_dmamap)) + panic("bus_dmamem_free: Invalid map freed\n"); + if (map == NULL) + free(vaddr, M_DEVBUF); + else + kmem_free(kernel_map, (vm_offset_t)vaddr, + dmat->common.maxsize); + CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, + dmat->common.flags); +} + +static void +_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags) +{ + bus_addr_t curaddr; + bus_size_t sgsize; + + if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + curaddr = buf; + while (buflen != 0) { + sgsize = MIN(buflen, dmat->common.maxsegsz); + if (bus_dma_run_filter(&dmat->common, curaddr)) { + sgsize = MIN(sgsize, PAGE_SIZE); + map->pagesneeded++; + } + curaddr += sgsize; + buflen -= sgsize; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + } +} + +static void +_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, + void *buf, bus_size_t buflen, int flags) +{ + vm_offset_t vaddr; + vm_offset_t vendaddr; + bus_addr_t paddr; + bus_size_t sg_len; + + if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { + CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " + "alignment= %d", dmat->common.lowaddr, + ptoa((vm_paddr_t)Maxmem), + dmat->common.boundary, dmat->common.alignment); + CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", + map, &nobounce_dmamap, map->pagesneeded); + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + vaddr = (vm_offset_t)buf; + vendaddr = (vm_offset_t)buf + buflen; + + while (vaddr < vendaddr) { + sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); + if (pmap == kernel_pmap) + paddr = pmap_kextract(vaddr); + else + paddr = pmap_extract(pmap, vaddr); + if (bus_dma_run_filter(&dmat->common, paddr) != 0) { + sg_len = roundup2(sg_len, + dmat->common.alignment); + map->pagesneeded++; + } + vaddr += sg_len; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + } +} + +static int +_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) +{ + + /* Reserve Necessary Bounce Pages */ + mtx_lock(&bounce_lock); + if (flags & BUS_DMA_NOWAIT) { + if (reserve_bounce_pages(dmat, map, 0) != 0) { + mtx_unlock(&bounce_lock); + return (ENOMEM); + } + } else { + if (reserve_bounce_pages(dmat, map, 1) != 0) { + /* Queue us for resources */ + STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); + mtx_unlock(&bounce_lock); + return (EINPROGRESS); + } + } + mtx_unlock(&bounce_lock); + + return (0); +} + +/* + * Add a single contiguous physical range to the segment list. + */ +static int +_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, + bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) +{ + bus_addr_t baddr, bmask; + int seg; + + /* + * Make sure we don't cross any boundaries. + */ + bmask = ~(dmat->common.boundary - 1); + if (dmat->common.boundary > 0) { + baddr = (curaddr + dmat->common.boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + seg = *segp; + if (seg == -1) { + seg = 0; + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } else { + if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && + (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz && + (dmat->common.boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= dmat->common.nsegments) + return (0); + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } + } + *segp = seg; + return (sgsize); +} + +/* + * Utility function to load a physical buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. + */ +static int +bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, + int *segp) +{ + bus_size_t sgsize; + bus_addr_t curaddr; + int error; + + if (map == NULL || map == &contig_dmamap) + map = &nobounce_dmamap; + + if (segs == NULL) + segs = dmat->segments; + + if ((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } + } + + while (buflen > 0) { + curaddr = buf; + sgsize = MIN(buflen, dmat->common.maxsegsz); + if (((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) && + map->pagesneeded != 0 && + bus_dma_run_filter(&dmat->common, curaddr)) { + sgsize = MIN(sgsize, PAGE_SIZE); + curaddr = add_bounce_page(dmat, map, 0, curaddr, + sgsize); + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + buf += sgsize; + buflen -= sgsize; + } + + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ +} + +/* + * Utility function to load a linear buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. + */ +static int +bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, + int *segp) +{ + bus_size_t sgsize, max_sgsize; + bus_addr_t curaddr; + vm_offset_t vaddr; + int error; + + if (map == NULL || map == &contig_dmamap) + map = &nobounce_dmamap; + + if (segs == NULL) + segs = dmat->segments; + + if ((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } + } + + vaddr = (vm_offset_t)buf; + while (buflen > 0) { + /* + * Get the physical address for this segment. + */ + if (pmap == kernel_pmap) + curaddr = pmap_kextract(vaddr); + else + curaddr = pmap_extract(pmap, vaddr); + + /* + * Compute the segment size, and adjust counts. + */ + max_sgsize = MIN(buflen, dmat->common.maxsegsz); + sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); + if (((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) && + map->pagesneeded != 0 && + bus_dma_run_filter(&dmat->common, curaddr)) { + sgsize = roundup2(sgsize, dmat->common.alignment); + sgsize = MIN(sgsize, max_sgsize); + curaddr = add_bounce_page(dmat, map, vaddr, curaddr, + sgsize); + } else { + sgsize = MIN(sgsize, max_sgsize); + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + vaddr += sgsize; + buflen -= sgsize; + } + + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ +} + +static void +bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) +{ + + if (map == NULL) + return; + map->mem = *mem; + map->dmat = dmat; + map->callback = callback; + map->callback_arg = callback_arg; +} + +static bus_dma_segment_t * +bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) +{ + + if (segs == NULL) + segs = dmat->segments; + return (segs); +} + +/* + * Release the mapping held by map. + */ +static void +bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) +{ + struct bounce_page *bpage; + + while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { + STAILQ_REMOVE_HEAD(&map->bpages, links); + free_bounce_page(dmat, bpage); + } +} + +static void +bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dmasync_op_t op) +{ + struct bounce_page *bpage; + + if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { + /* + * Handle data bouncing. We might also + * want to add support for invalidating + * the caches on broken hardware + */ + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " + "performing bounce", __func__, op, dmat, + dmat->common.flags); + + if (op & BUS_DMASYNC_PREWRITE) { + while (bpage != NULL) { + if (bpage->datavaddr != 0) + bcopy((void *)bpage->datavaddr, + (void *)bpage->vaddr, + bpage->datacount); + else + physcopyout(bpage->dataaddr, + (void *)bpage->vaddr, + bpage->datacount); + bpage = STAILQ_NEXT(bpage, links); + } + dmat->bounce_zone->total_bounced++; + } + + if (op & BUS_DMASYNC_POSTREAD) { + while (bpage != NULL) { + if (bpage->datavaddr != 0) + bcopy((void *)bpage->vaddr, + (void *)bpage->datavaddr, + bpage->datacount); + else + physcopyin((void *)bpage->vaddr, + bpage->dataaddr, + bpage->datacount); + bpage = STAILQ_NEXT(bpage, links); + } + dmat->bounce_zone->total_bounced++; + } + } +} + +static void +init_bounce_pages(void *dummy __unused) +{ + + total_bpages = 0; + STAILQ_INIT(&bounce_zone_list); + STAILQ_INIT(&bounce_map_waitinglist); + STAILQ_INIT(&bounce_map_callbacklist); + mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); +} +SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); + +static struct sysctl_ctx_list * +busdma_sysctl_tree(struct bounce_zone *bz) +{ + return (&bz->sysctl_tree); +} + +static struct sysctl_oid * +busdma_sysctl_tree_top(struct bounce_zone *bz) +{ + return (bz->sysctl_tree_top); +} + +#if defined(__amd64__) || defined(PAE) +#define SYSCTL_ADD_BUS_SIZE_T SYSCTL_ADD_UQUAD +#else +#define SYSCTL_ADD_BUS_SIZE_T(ctx, parent, nbr, name, flag, ptr, desc) \ + SYSCTL_ADD_UINT(ctx, parent, nbr, name, flag, ptr, 0, desc) +#endif + +static int +alloc_bounce_zone(bus_dma_tag_t dmat) +{ + struct bounce_zone *bz; + + /* Check to see if we already have a suitable zone */ + STAILQ_FOREACH(bz, &bounce_zone_list, links) { + if ((dmat->common.alignment <= bz->alignment) && + (dmat->common.lowaddr >= bz->lowaddr)) { + dmat->bounce_zone = bz; + return (0); + } + } + + if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, + M_NOWAIT | M_ZERO)) == NULL) + return (ENOMEM); + + STAILQ_INIT(&bz->bounce_page_list); + bz->free_bpages = 0; + bz->reserved_bpages = 0; + bz->active_bpages = 0; + bz->lowaddr = dmat->common.lowaddr; + bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE); + bz->map_count = 0; + snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); + busdma_zonecount++; + snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); + STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); + dmat->bounce_zone = bz; + + sysctl_ctx_init(&bz->sysctl_tree); + bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, + SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, + CTLFLAG_RD, 0, ""); + if (bz->sysctl_tree_top == NULL) { + sysctl_ctx_free(&bz->sysctl_tree); + return (0); /* XXX error code? */ + } + + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, + "Total bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, + "Free bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, + "Reserved bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, + "Active bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, + "Total bounce requests"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, + "Total bounce requests that were deferred"); + SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); + SYSCTL_ADD_BUS_SIZE_T(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "alignment", CTLFLAG_RD, &bz->alignment, ""); + + return (0); +} + +static int +alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) +{ + struct bounce_zone *bz; + int count; + + bz = dmat->bounce_zone; + count = 0; + while (numpages > 0) { + struct bounce_page *bpage; + + bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, + M_NOWAIT | M_ZERO); + + if (bpage == NULL) + break; + bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, + M_NOWAIT, 0ul, + bz->lowaddr, + PAGE_SIZE, + 0); + if (bpage->vaddr == 0) { + free(bpage, M_DEVBUF); + break; + } + bpage->busaddr = pmap_kextract(bpage->vaddr); + mtx_lock(&bounce_lock); + STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); + total_bpages++; + bz->total_bpages++; + bz->free_bpages++; + mtx_unlock(&bounce_lock); + count++; + numpages--; + } + return (count); +} + +static int +reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) +{ + struct bounce_zone *bz; + int pages; + + mtx_assert(&bounce_lock, MA_OWNED); + bz = dmat->bounce_zone; + pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); + if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) + return (map->pagesneeded - (map->pagesreserved + pages)); + bz->free_bpages -= pages; + bz->reserved_bpages += pages; + map->pagesreserved += pages; + pages = map->pagesneeded - map->pagesreserved; + + return (pages); +} + +static bus_addr_t +add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, + bus_addr_t addr, bus_size_t size) +{ + struct bounce_zone *bz; + struct bounce_page *bpage; + + KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); + KASSERT(map != NULL && map != &nobounce_dmamap && map != &contig_dmamap, + ("add_bounce_page: bad map %p", map)); + + bz = dmat->bounce_zone; + if (map->pagesneeded == 0) + panic("add_bounce_page: map doesn't need any pages"); + map->pagesneeded--; + + if (map->pagesreserved == 0) + panic("add_bounce_page: map doesn't need any pages"); + map->pagesreserved--; + + mtx_lock(&bounce_lock); + bpage = STAILQ_FIRST(&bz->bounce_page_list); + if (bpage == NULL) + panic("add_bounce_page: free page list is empty"); + + STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); + bz->reserved_bpages--; + bz->active_bpages++; + mtx_unlock(&bounce_lock); + + if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { + /* Page offset needs to be preserved. */ + bpage->vaddr |= vaddr & PAGE_MASK; + bpage->busaddr |= vaddr & PAGE_MASK; + } + bpage->datavaddr = vaddr; + bpage->dataaddr = addr; + bpage->datacount = size; + STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); + return (bpage->busaddr); +} + +static void +free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) +{ + struct bus_dmamap *map; + struct bounce_zone *bz; + + bz = dmat->bounce_zone; + bpage->datavaddr = 0; + bpage->datacount = 0; + if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { + /* + * Reset the bounce page to start at offset 0. Other uses + * of this bounce page may need to store a full page of + * data and/or assume it starts on a page boundary. + */ + bpage->vaddr &= ~PAGE_MASK; + bpage->busaddr &= ~PAGE_MASK; + } + + mtx_lock(&bounce_lock); + STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); + bz->free_bpages++; + bz->active_bpages--; + if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { + if (reserve_bounce_pages(map->dmat, map, 1) == 0) { + STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); + STAILQ_INSERT_TAIL(&bounce_map_callbacklist, + map, links); + busdma_swi_pending = 1; + bz->total_deferred++; + swi_sched(vm_ih, 0); + } + } + mtx_unlock(&bounce_lock); +} + +void +busdma_swi(void) +{ + bus_dma_tag_t dmat; + struct bus_dmamap *map; + + mtx_lock(&bounce_lock); + while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { + STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); + mtx_unlock(&bounce_lock); + dmat = map->dmat; + (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK); + bus_dmamap_load_mem(map->dmat, map, &map->mem, + map->callback, map->callback_arg, BUS_DMA_WAITOK); + (dmat->common.lockfunc)(dmat->common.lockfuncarg, + BUS_DMA_UNLOCK); + mtx_lock(&bounce_lock); + } + mtx_unlock(&bounce_lock); +} + +struct bus_dma_impl bus_dma_bounce_impl = { + .tag_create = bounce_bus_dma_tag_create, + .tag_destroy = bounce_bus_dma_tag_destroy, + .map_create = bounce_bus_dmamap_create, + .map_destroy = bounce_bus_dmamap_destroy, + .mem_alloc = bounce_bus_dmamem_alloc, + .mem_free = bounce_bus_dmamem_free, + .load_phys = bounce_bus_dmamap_load_phys, + .load_buffer = bounce_bus_dmamap_load_buffer, + .map_waitok = bounce_bus_dmamap_waitok, + .map_complete = bounce_bus_dmamap_complete, + .map_unload = bounce_bus_dmamap_unload, + .map_sync = bounce_bus_dmamap_sync +}; diff --git a/sys/x86/x86/busdma_machdep.c b/sys/x86/x86/busdma_machdep.c index f3e94e1..445bc55 100644 --- a/sys/x86/x86/busdma_machdep.c +++ b/sys/x86/x86/busdma_machdep.c @@ -31,160 +31,17 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include -#include #include #include -#include #include - #include #include -#include -#include -#include - -#include +#include #include -#include -#include - -#ifdef __i386__ -#define MAX_BPAGES 512 -#else -#define MAX_BPAGES 8192 -#endif -#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 -#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 - -struct bounce_zone; - -struct bus_dma_tag { - bus_dma_tag_t parent; - bus_size_t alignment; - bus_addr_t boundary; - bus_addr_t lowaddr; - bus_addr_t highaddr; - bus_dma_filter_t *filter; - void *filterarg; - bus_size_t maxsize; - u_int nsegments; - bus_size_t maxsegsz; - int flags; - int ref_count; - int map_count; - bus_dma_lock_t *lockfunc; - void *lockfuncarg; - bus_dma_segment_t *segments; - struct bounce_zone *bounce_zone; -}; - -struct bounce_page { - vm_offset_t vaddr; /* kva of bounce buffer */ - bus_addr_t busaddr; /* Physical address */ - vm_offset_t datavaddr; /* kva of client data */ - bus_addr_t dataaddr; /* client physical address */ - bus_size_t datacount; /* client data count */ - STAILQ_ENTRY(bounce_page) links; -}; - -int busdma_swi_pending; - -struct bounce_zone { - STAILQ_ENTRY(bounce_zone) links; - STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; - int total_bpages; - int free_bpages; - int reserved_bpages; - int active_bpages; - int total_bounced; - int total_deferred; - int map_count; - bus_size_t alignment; - bus_addr_t lowaddr; - char zoneid[8]; - char lowaddrid[20]; - struct sysctl_ctx_list sysctl_tree; - struct sysctl_oid *sysctl_tree_top; -}; - -static struct mtx bounce_lock; -static int total_bpages; -static int busdma_zonecount; -static STAILQ_HEAD(, bounce_zone) bounce_zone_list; - -static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); -SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, - "Total bounce pages"); - -struct bus_dmamap { - struct bp_list bpages; - int pagesneeded; - int pagesreserved; - bus_dma_tag_t dmat; - struct memdesc mem; - bus_dmamap_callback_t *callback; - void *callback_arg; - STAILQ_ENTRY(bus_dmamap) links; -}; - -static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; -static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; -static struct bus_dmamap nobounce_dmamap, contig_dmamap; - -static void init_bounce_pages(void *dummy); -static int alloc_bounce_zone(bus_dma_tag_t dmat); -static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); -static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int commit); -static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_addr_t addr, - bus_size_t size); -static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); -int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); -static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - pmap_t pmap, void *buf, bus_size_t buflen, - int flags); -static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_paddr_t buf, bus_size_t buflen, - int flags); -static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int flags); - -#ifdef XEN -#undef pmap_kextract -#define pmap_kextract pmap_kextract_ma -#endif - -/* - * Return true if a match is made. - * - * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. - * - * If paddr is within the bounds of the dma tag then call the filter callback - * to check for a match, if there is no filter callback then assume a match. - */ -int -run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) -{ - int retval; - - retval = 0; - - do { - if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) - || ((paddr & (dmat->alignment - 1)) != 0)) - && (dmat->filter == NULL - || (*dmat->filter)(dmat->filterarg, paddr) != 0)) - retval = 1; - - dmat = dmat->parent; - } while (retval == 0 && dmat != NULL); - return (retval); -} +#include /* * Convenience function for manipulating driver locks from busdma (during @@ -216,169 +73,147 @@ busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) * with the tag are meant to never be defered. * XXX Should have a way to identify which driver is responsible here. */ -static void -dflt_lock(void *arg, bus_dma_lock_op_t op) +void +bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op) { + panic("driver error: busdma dflt_lock called"); } /* - * Allocate a device specific dma_tag. + * Return true if a match is made. + * + * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. + * + * If paddr is within the bounds of the dma tag then call the filter callback + * to check for a match, if there is no filter callback then assume a match. */ int -bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, - bus_addr_t boundary, bus_addr_t lowaddr, - bus_addr_t highaddr, bus_dma_filter_t *filter, - void *filterarg, bus_size_t maxsize, int nsegments, - bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, - void *lockfuncarg, bus_dma_tag_t *dmat) +bus_dma_run_filter(struct bus_dma_tag_common *tc, bus_addr_t paddr) +{ + int retval; + + retval = 0; + do { + if (((paddr > tc->lowaddr && paddr <= tc->highaddr) || + ((paddr & (tc->alignment - 1)) != 0)) && + (tc->filter == NULL || + (*tc->filter)(tc->filterarg, paddr) != 0)) + retval = 1; + + tc = tc->parent; + } while (retval == 0 && tc != NULL); + return (retval); +} + +int +common_bus_dma_tag_create(struct bus_dma_tag_common *parent, + bus_size_t alignment, + bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, + bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, + int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, + void *lockfuncarg, size_t sz, void **dmat) { - bus_dma_tag_t newtag; - int error = 0; + void *newtag; + struct bus_dma_tag_common *common; + KASSERT(sz >= sizeof(struct bus_dma_tag_common), ("sz")); /* Basic sanity checking */ if (boundary != 0 && boundary < maxsegsz) maxsegsz = boundary; - - if (maxsegsz == 0) { + if (maxsegsz == 0) return (EINVAL); - } - /* Return a NULL tag on failure */ *dmat = NULL; - newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, - M_ZERO | M_NOWAIT); + newtag = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT); if (newtag == NULL) { CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, 0, ENOMEM); return (ENOMEM); } - newtag->parent = parent; - newtag->alignment = alignment; - newtag->boundary = boundary; - newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); - newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); - newtag->filter = filter; - newtag->filterarg = filterarg; - newtag->maxsize = maxsize; - newtag->nsegments = nsegments; - newtag->maxsegsz = maxsegsz; - newtag->flags = flags; - newtag->ref_count = 1; /* Count ourself */ - newtag->map_count = 0; + common = newtag; + common->impl = &bus_dma_bounce_impl; + common->parent = parent; + common->alignment = alignment; + common->boundary = boundary; + common->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); + common->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); + common->filter = filter; + common->filterarg = filterarg; + common->maxsize = maxsize; + common->nsegments = nsegments; + common->maxsegsz = maxsegsz; + common->flags = flags; + common->ref_count = 1; /* Count ourself */ if (lockfunc != NULL) { - newtag->lockfunc = lockfunc; - newtag->lockfuncarg = lockfuncarg; + common->lockfunc = lockfunc; + common->lockfuncarg = lockfuncarg; } else { - newtag->lockfunc = dflt_lock; - newtag->lockfuncarg = NULL; + common->lockfunc = bus_dma_dflt_lock; + common->lockfuncarg = NULL; } - newtag->segments = NULL; /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { - newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); - newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); - if (newtag->boundary == 0) - newtag->boundary = parent->boundary; - else if (parent->boundary != 0) - newtag->boundary = MIN(parent->boundary, - newtag->boundary); - if ((newtag->filter != NULL) || - ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) - newtag->flags |= BUS_DMA_COULD_BOUNCE; - if (newtag->filter == NULL) { + common->impl = parent->impl; + common->lowaddr = MIN(parent->lowaddr, common->lowaddr); + common->highaddr = MAX(parent->highaddr, common->highaddr); + if (common->boundary == 0) + common->boundary = parent->boundary; + else if (parent->boundary != 0) { + common->boundary = MIN(parent->boundary, + common->boundary); + } + if (common->filter == NULL) { /* * Short circuit looking at our parent directly * since we have encapsulated all of its information */ - newtag->filter = parent->filter; - newtag->filterarg = parent->filterarg; - newtag->parent = parent->parent; + common->filter = parent->filter; + common->filterarg = parent->filterarg; + common->parent = parent->parent; } - if (newtag->parent != NULL) - atomic_add_int(&parent->ref_count, 1); + atomic_add_int(&parent->ref_count, 1); } + *dmat = common; + return (0); +} - if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) - || newtag->alignment > 1) - newtag->flags |= BUS_DMA_COULD_BOUNCE; - - if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && - (flags & BUS_DMA_ALLOCNOW) != 0) { - struct bounce_zone *bz; - - /* Must bounce */ - - if ((error = alloc_bounce_zone(newtag)) != 0) { - free(newtag, M_DEVBUF); - return (error); - } - bz = newtag->bounce_zone; - - if (ptoa(bz->total_bpages) < maxsize) { - int pages; - - pages = atop(maxsize) - bz->total_bpages; +/* + * Allocate a device specific dma_tag. + */ +int +bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, + bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, + bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, + int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, + void *lockfuncarg, bus_dma_tag_t *dmat) +{ + struct bus_dma_tag_common *tc; + int error; - /* Add pages to our bounce pool */ - if (alloc_bounce_pages(newtag, pages) < pages) - error = ENOMEM; - } - /* Performed initial allocation */ - newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; - } - - if (error != 0) { - free(newtag, M_DEVBUF); + if (parent == NULL) { + error = bus_dma_bounce_impl.tag_create(parent, alignment, + boundary, lowaddr, highaddr, filter, filterarg, maxsize, + nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat); } else { - *dmat = newtag; + tc = (struct bus_dma_tag_common *)parent; + error = tc->impl->tag_create(parent, alignment, + boundary, lowaddr, highaddr, filter, filterarg, maxsize, + nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat); } - CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", - __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); return (error); } int bus_dma_tag_destroy(bus_dma_tag_t dmat) { - bus_dma_tag_t dmat_copy; - int error; - - error = 0; - dmat_copy = dmat; - - if (dmat != NULL) { - - if (dmat->map_count != 0) { - error = EBUSY; - goto out; - } - - while (dmat != NULL) { - bus_dma_tag_t parent; + struct bus_dma_tag_common *tc; - parent = dmat->parent; - atomic_subtract_int(&dmat->ref_count, 1); - if (dmat->ref_count == 0) { - if (dmat->segments != NULL) - free(dmat->segments, M_DEVBUF); - free(dmat, M_DEVBUF); - /* - * Last reference count, so - * release our reference - * count on our parent. - */ - dmat = parent; - } else - dmat = NULL; - } - } -out: - CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); - return (error); + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->tag_destroy(dmat)); } /* @@ -388,83 +223,10 @@ out: int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { - int error; - - error = 0; - - if (dmat->segments == NULL) { - dmat->segments = (bus_dma_segment_t *)malloc( - sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, - M_NOWAIT); - if (dmat->segments == NULL) { - CTR3(KTR_BUSDMA, "%s: tag %p error %d", - __func__, dmat, ENOMEM); - return (ENOMEM); - } - } - - /* - * Bouncing might be required if the driver asks for an active - * exclusion region, a data alignment that is stricter than 1, and/or - * an active address boundary. - */ - if (dmat->flags & BUS_DMA_COULD_BOUNCE) { + struct bus_dma_tag_common *tc; - /* Must bounce */ - struct bounce_zone *bz; - int maxpages; - - if (dmat->bounce_zone == NULL) { - if ((error = alloc_bounce_zone(dmat)) != 0) - return (error); - } - bz = dmat->bounce_zone; - - *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, - M_NOWAIT | M_ZERO); - if (*mapp == NULL) { - CTR3(KTR_BUSDMA, "%s: tag %p error %d", - __func__, dmat, ENOMEM); - return (ENOMEM); - } - - /* Initialize the new map */ - STAILQ_INIT(&((*mapp)->bpages)); - - /* - * Attempt to add pages to our pool on a per-instance - * basis up to a sane limit. - */ - if (dmat->alignment > 1) - maxpages = MAX_BPAGES; - else - maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); - if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 - || (bz->map_count > 0 && bz->total_bpages < maxpages)) { - int pages; - - pages = MAX(atop(dmat->maxsize), 1); - pages = MIN(maxpages - bz->total_bpages, pages); - pages = MAX(pages, 1); - if (alloc_bounce_pages(dmat, pages) < pages) - error = ENOMEM; - - if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { - if (error == 0) - dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; - } else { - error = 0; - } - } - bz->map_count++; - } else { - *mapp = NULL; - } - if (error == 0) - dmat->map_count++; - CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", - __func__, dmat, dmat->flags, error); - return (error); + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->map_create(dmat, flags, mapp)); } /* @@ -474,19 +236,10 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { - if (map != NULL && map != &nobounce_dmamap && map != &contig_dmamap) { - if (STAILQ_FIRST(&map->bpages) != NULL) { - CTR3(KTR_BUSDMA, "%s: tag %p error %d", - __func__, dmat, EBUSY); - return (EBUSY); - } - if (dmat->bounce_zone) - dmat->bounce_zone->map_count--; - free(map, M_DEVBUF); - } - dmat->map_count--; - CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); - return (0); + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->map_destroy(dmat, map)); } @@ -497,72 +250,12 @@ bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) */ int bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, - bus_dmamap_t *mapp) + bus_dmamap_t *mapp) { - vm_memattr_t attr; - int mflags; - - if (flags & BUS_DMA_NOWAIT) - mflags = M_NOWAIT; - else - mflags = M_WAITOK; - - /* If we succeed, no mapping/bouncing will be required */ - *mapp = NULL; - - if (dmat->segments == NULL) { - dmat->segments = (bus_dma_segment_t *)malloc( - sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, - mflags); - if (dmat->segments == NULL) { - CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", - __func__, dmat, dmat->flags, ENOMEM); - return (ENOMEM); - } - } - if (flags & BUS_DMA_ZERO) - mflags |= M_ZERO; - if (flags & BUS_DMA_NOCACHE) - attr = VM_MEMATTR_UNCACHEABLE; - else - attr = VM_MEMATTR_DEFAULT; + struct bus_dma_tag_common *tc; - /* - * XXX: - * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact - * alignment guarantees of malloc need to be nailed down, and the - * code below should be rewritten to take that into account. - * - * In the meantime, we'll warn the user if malloc gets it wrong. - */ - if ((dmat->maxsize <= PAGE_SIZE) && - (dmat->alignment < dmat->maxsize) && - dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && - attr == VM_MEMATTR_DEFAULT) { - *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); - } else if (dmat->nsegments >= btoc(dmat->maxsize) && - dmat->alignment <= PAGE_SIZE && - (dmat->boundary == 0 || dmat->boundary >= dmat->lowaddr)) { - /* Page-based multi-segment allocations allowed */ - *vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize, - mflags, 0ul, dmat->lowaddr, attr); - *mapp = &contig_dmamap; - } else { - *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize, - mflags, 0ul, dmat->lowaddr, dmat->alignment ? - dmat->alignment : 1ul, dmat->boundary, attr); - *mapp = &contig_dmamap; - } - if (*vaddr == NULL) { - CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", - __func__, dmat, dmat->flags, ENOMEM); - return (ENOMEM); - } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { - printf("bus_dmamem_alloc failed to align memory properly.\n"); - } - CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", - __func__, dmat, dmat->flags, 0); - return (0); + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->mem_alloc(dmat, vaddr, flags, mapp)); } /* @@ -572,153 +265,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) { - /* - * dmamem does not need to be bounced, so the map should be - * NULL if malloc() was used and contig_dmamap if - * kmem_alloc_contig() was used. - */ - if (!(map == NULL || map == &contig_dmamap)) - panic("bus_dmamem_free: Invalid map freed\n"); - if (map == NULL) - free(vaddr, M_DEVBUF); - else - kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize); - CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); -} - -static void -_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, - bus_size_t buflen, int flags) -{ - bus_addr_t curaddr; - bus_size_t sgsize; - - if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { - /* - * Count the number of bounce pages - * needed in order to complete this transfer - */ - curaddr = buf; - while (buflen != 0) { - sgsize = MIN(buflen, dmat->maxsegsz); - if (run_filter(dmat, curaddr)) { - sgsize = MIN(sgsize, PAGE_SIZE); - map->pagesneeded++; - } - curaddr += sgsize; - buflen -= sgsize; - } - CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); - } -} - -static void -_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, - void *buf, bus_size_t buflen, int flags) -{ - vm_offset_t vaddr; - vm_offset_t vendaddr; - bus_addr_t paddr; - - if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { - CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " - "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), - dmat->boundary, dmat->alignment); - CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", - map, &nobounce_dmamap, map->pagesneeded); - /* - * Count the number of bounce pages - * needed in order to complete this transfer - */ - vaddr = (vm_offset_t)buf; - vendaddr = (vm_offset_t)buf + buflen; + struct bus_dma_tag_common *tc; - while (vaddr < vendaddr) { - bus_size_t sg_len; - - sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); - if (pmap == kernel_pmap) - paddr = pmap_kextract(vaddr); - else - paddr = pmap_extract(pmap, vaddr); - if (run_filter(dmat, paddr) != 0) { - sg_len = roundup2(sg_len, dmat->alignment); - map->pagesneeded++; - } - vaddr += sg_len; - } - CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); - } -} - -static int -_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) -{ - - /* Reserve Necessary Bounce Pages */ - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } - } - mtx_unlock(&bounce_lock); - - return (0); -} - -/* - * Add a single contiguous physical range to the segment list. - */ -static int -_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, - bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) -{ - bus_addr_t baddr, bmask; - int seg; - - /* - * Make sure we don't cross any boundaries. - */ - bmask = ~(dmat->boundary - 1); - if (dmat->boundary > 0) { - baddr = (curaddr + dmat->boundary) & bmask; - if (sgsize > (baddr - curaddr)) - sgsize = (baddr - curaddr); - } - - /* - * Insert chunk into a segment, coalescing with - * previous segment if possible. - */ - seg = *segp; - if (seg == -1) { - seg = 0; - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; - } else { - if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && - (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && - (dmat->boundary == 0 || - (segs[seg].ds_addr & bmask) == (curaddr & bmask))) - segs[seg].ds_len += sgsize; - else { - if (++seg >= dmat->nsegments) - return (0); - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; - } - } - *segp = seg; - return (sgsize); + tc = (struct bus_dma_tag_common *)dmat; + tc->impl->mem_free(dmat, vaddr, map); } /* @@ -726,53 +276,14 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, * the starting segment on entrace, and the ending segment on exit. */ int -_bus_dmamap_load_phys(bus_dma_tag_t dmat, - bus_dmamap_t map, - vm_paddr_t buf, bus_size_t buflen, - int flags, - bus_dma_segment_t *segs, - int *segp) +_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { - bus_size_t sgsize; - bus_addr_t curaddr; - int error; + struct bus_dma_tag_common *tc; - if (map == NULL || map == &contig_dmamap) - map = &nobounce_dmamap; - - if (segs == NULL) - segs = dmat->segments; - - if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { - _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); - if (map->pagesneeded != 0) { - error = _bus_dmamap_reserve_pages(dmat, map, flags); - if (error) - return (error); - } - } - - while (buflen > 0) { - curaddr = buf; - sgsize = MIN(buflen, dmat->maxsegsz); - if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && - map->pagesneeded != 0 && run_filter(dmat, curaddr)) { - sgsize = MIN(sgsize, PAGE_SIZE); - curaddr = add_bounce_page(dmat, map, 0, curaddr, - sgsize); - } - sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, - segp); - if (sgsize == 0) - break; - buf += sgsize; - buflen -= sgsize; - } - - /* - * Did we fit? - */ - return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->load_phys(dmat, map, buf, buflen, flags, segs, + segp)); } /* @@ -780,96 +291,35 @@ _bus_dmamap_load_phys(bus_dma_tag_t dmat, * the starting segment on entrace, and the ending segment on exit. */ int -_bus_dmamap_load_buffer(bus_dma_tag_t dmat, - bus_dmamap_t map, - void *buf, bus_size_t buflen, - pmap_t pmap, - int flags, - bus_dma_segment_t *segs, - int *segp) +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, + int *segp) { - bus_size_t sgsize; - bus_addr_t curaddr; - vm_offset_t vaddr; - int error; - - if (map == NULL || map == &contig_dmamap) - map = &nobounce_dmamap; - - if (segs == NULL) - segs = dmat->segments; - - if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { - _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); - if (map->pagesneeded != 0) { - error = _bus_dmamap_reserve_pages(dmat, map, flags); - if (error) - return (error); - } - } - - vaddr = (vm_offset_t)buf; + struct bus_dma_tag_common *tc; - while (buflen > 0) { - bus_size_t max_sgsize; - - /* - * Get the physical address for this segment. - */ - if (pmap == kernel_pmap) - curaddr = pmap_kextract(vaddr); - else - curaddr = pmap_extract(pmap, vaddr); - - /* - * Compute the segment size, and adjust counts. - */ - max_sgsize = MIN(buflen, dmat->maxsegsz); - sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); - if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && - map->pagesneeded != 0 && run_filter(dmat, curaddr)) { - sgsize = roundup2(sgsize, dmat->alignment); - sgsize = MIN(sgsize, max_sgsize); - curaddr = add_bounce_page(dmat, map, vaddr, curaddr, - sgsize); - } else { - sgsize = MIN(sgsize, max_sgsize); - } - sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, - segp); - if (sgsize == 0) - break; - vaddr += sgsize; - buflen -= sgsize; - } - - /* - * Did we fit? - */ - return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->load_buffer(dmat, map, buf, buflen, pmap, flags, segs, + segp)); } void __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, - struct memdesc *mem, bus_dmamap_callback_t *callback, - void *callback_arg) + struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) { - if (map != NULL) { - map->mem = *mem; - map->dmat = dmat; - map->callback = callback; - map->callback_arg = callback_arg; - } + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + tc->impl->map_waitok(dmat, map, mem, callback, callback_arg); } bus_dma_segment_t * _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, - bus_dma_segment_t *segs, int nsegs, int error) + bus_dma_segment_t *segs, int nsegs, int error) { + struct bus_dma_tag_common *tc; - if (segs == NULL) - segs = dmat->segments; - return (segs); + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->map_complete(dmat, map, segs, nsegs, error)); } /* @@ -878,317 +328,17 @@ _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, void _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) { - struct bounce_page *bpage; + struct bus_dma_tag_common *tc; - while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { - STAILQ_REMOVE_HEAD(&map->bpages, links); - free_bounce_page(dmat, bpage); - } + tc = (struct bus_dma_tag_common *)dmat; + tc->impl->map_unload(dmat, map); } void _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { - struct bounce_page *bpage; - - if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { - /* - * Handle data bouncing. We might also - * want to add support for invalidating - * the caches on broken hardware - */ - CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " - "performing bounce", __func__, op, dmat, dmat->flags); - - if (op & BUS_DMASYNC_PREWRITE) { - while (bpage != NULL) { - if (bpage->datavaddr != 0) - bcopy((void *)bpage->datavaddr, - (void *)bpage->vaddr, - bpage->datacount); - else - physcopyout(bpage->dataaddr, - (void *)bpage->vaddr, - bpage->datacount); - bpage = STAILQ_NEXT(bpage, links); - } - dmat->bounce_zone->total_bounced++; - } - - if (op & BUS_DMASYNC_POSTREAD) { - while (bpage != NULL) { - if (bpage->datavaddr != 0) - bcopy((void *)bpage->vaddr, - (void *)bpage->datavaddr, - bpage->datacount); - else - physcopyin((void *)bpage->vaddr, - bpage->dataaddr, - bpage->datacount); - bpage = STAILQ_NEXT(bpage, links); - } - dmat->bounce_zone->total_bounced++; - } - } -} + struct bus_dma_tag_common *tc; -static void -init_bounce_pages(void *dummy __unused) -{ - - total_bpages = 0; - STAILQ_INIT(&bounce_zone_list); - STAILQ_INIT(&bounce_map_waitinglist); - STAILQ_INIT(&bounce_map_callbacklist); - mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); -} -SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); - -static struct sysctl_ctx_list * -busdma_sysctl_tree(struct bounce_zone *bz) -{ - return (&bz->sysctl_tree); -} - -static struct sysctl_oid * -busdma_sysctl_tree_top(struct bounce_zone *bz) -{ - return (bz->sysctl_tree_top); -} - -#if defined(__amd64__) || defined(PAE) -#define SYSCTL_ADD_BUS_SIZE_T SYSCTL_ADD_UQUAD -#else -#define SYSCTL_ADD_BUS_SIZE_T(ctx, parent, nbr, name, flag, ptr, desc) \ - SYSCTL_ADD_UINT(ctx, parent, nbr, name, flag, ptr, 0, desc) -#endif - -static int -alloc_bounce_zone(bus_dma_tag_t dmat) -{ - struct bounce_zone *bz; - - /* Check to see if we already have a suitable zone */ - STAILQ_FOREACH(bz, &bounce_zone_list, links) { - if ((dmat->alignment <= bz->alignment) - && (dmat->lowaddr >= bz->lowaddr)) { - dmat->bounce_zone = bz; - return (0); - } - } - - if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, - M_NOWAIT | M_ZERO)) == NULL) - return (ENOMEM); - - STAILQ_INIT(&bz->bounce_page_list); - bz->free_bpages = 0; - bz->reserved_bpages = 0; - bz->active_bpages = 0; - bz->lowaddr = dmat->lowaddr; - bz->alignment = MAX(dmat->alignment, PAGE_SIZE); - bz->map_count = 0; - snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); - busdma_zonecount++; - snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); - STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); - dmat->bounce_zone = bz; - - sysctl_ctx_init(&bz->sysctl_tree); - bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, - SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, - CTLFLAG_RD, 0, ""); - if (bz->sysctl_tree_top == NULL) { - sysctl_ctx_free(&bz->sysctl_tree); - return (0); /* XXX error code? */ - } - - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, - "Total bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, - "Free bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, - "Reserved bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, - "Active bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, - "Total bounce requests"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, - "Total bounce requests that were deferred"); - SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); - SYSCTL_ADD_BUS_SIZE_T(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "alignment", CTLFLAG_RD, &bz->alignment, ""); - - return (0); -} - -static int -alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) -{ - struct bounce_zone *bz; - int count; - - bz = dmat->bounce_zone; - count = 0; - while (numpages > 0) { - struct bounce_page *bpage; - - bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, - M_NOWAIT | M_ZERO); - - if (bpage == NULL) - break; - bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, - M_NOWAIT, 0ul, - bz->lowaddr, - PAGE_SIZE, - 0); - if (bpage->vaddr == 0) { - free(bpage, M_DEVBUF); - break; - } - bpage->busaddr = pmap_kextract(bpage->vaddr); - mtx_lock(&bounce_lock); - STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); - total_bpages++; - bz->total_bpages++; - bz->free_bpages++; - mtx_unlock(&bounce_lock); - count++; - numpages--; - } - return (count); -} - -static int -reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) -{ - struct bounce_zone *bz; - int pages; - - mtx_assert(&bounce_lock, MA_OWNED); - bz = dmat->bounce_zone; - pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); - if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) - return (map->pagesneeded - (map->pagesreserved + pages)); - bz->free_bpages -= pages; - bz->reserved_bpages += pages; - map->pagesreserved += pages; - pages = map->pagesneeded - map->pagesreserved; - - return (pages); -} - -static bus_addr_t -add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_addr_t addr, bus_size_t size) -{ - struct bounce_zone *bz; - struct bounce_page *bpage; - - KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); - KASSERT(map != NULL && map != &nobounce_dmamap && map != &contig_dmamap, - ("add_bounce_page: bad map %p", map)); - - bz = dmat->bounce_zone; - if (map->pagesneeded == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesneeded--; - - if (map->pagesreserved == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesreserved--; - - mtx_lock(&bounce_lock); - bpage = STAILQ_FIRST(&bz->bounce_page_list); - if (bpage == NULL) - panic("add_bounce_page: free page list is empty"); - - STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); - bz->reserved_bpages--; - bz->active_bpages++; - mtx_unlock(&bounce_lock); - - if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { - /* Page offset needs to be preserved. */ - bpage->vaddr |= vaddr & PAGE_MASK; - bpage->busaddr |= vaddr & PAGE_MASK; - } - bpage->datavaddr = vaddr; - bpage->dataaddr = addr; - bpage->datacount = size; - STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); - return (bpage->busaddr); -} - -static void -free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) -{ - struct bus_dmamap *map; - struct bounce_zone *bz; - - bz = dmat->bounce_zone; - bpage->datavaddr = 0; - bpage->datacount = 0; - if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { - /* - * Reset the bounce page to start at offset 0. Other uses - * of this bounce page may need to store a full page of - * data and/or assume it starts on a page boundary. - */ - bpage->vaddr &= ~PAGE_MASK; - bpage->busaddr &= ~PAGE_MASK; - } - - mtx_lock(&bounce_lock); - STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); - bz->free_bpages++; - bz->active_bpages--; - if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { - if (reserve_bounce_pages(map->dmat, map, 1) == 0) { - STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); - STAILQ_INSERT_TAIL(&bounce_map_callbacklist, - map, links); - busdma_swi_pending = 1; - bz->total_deferred++; - swi_sched(vm_ih, 0); - } - } - mtx_unlock(&bounce_lock); -} - -void -busdma_swi(void) -{ - bus_dma_tag_t dmat; - struct bus_dmamap *map; - - mtx_lock(&bounce_lock); - while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { - STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); - mtx_unlock(&bounce_lock); - dmat = map->dmat; - (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load_mem(map->dmat, map, &map->mem, - map->callback, map->callback_arg, - BUS_DMA_WAITOK); - (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); - mtx_lock(&bounce_lock); - } - mtx_unlock(&bounce_lock); + tc = (struct bus_dma_tag_common *)dmat; + tc->impl->map_sync(dmat, map, op); }