Index: alpha/alpha/busdma_machdep.c =================================================================== RCS file: /usr/cvs/src/sys/alpha/alpha/Attic/busdma_machdep.c,v retrieving revision 1.51.2.1 diff -u -r1.51.2.1 busdma_machdep.c --- alpha/alpha/busdma_machdep.c 26 Sep 2005 19:35:33 -0000 1.51.2.1 +++ alpha/alpha/busdma_machdep.c 17 Apr 2007 16:04:00 -0000 @@ -77,7 +77,7 @@ STAILQ_ENTRY(bounce_page) links; }; -int busdma_swi_pending; +int busdma_pyxis_bug, busdma_swi_pending; static struct mtx bounce_lock; static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; @@ -102,7 +102,7 @@ static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; -static struct bus_dmamap nobounce_dmamap; +static struct bus_dmamap nobounce_dmamap, alloc_dmamap; static void init_bounce_pages(void *dummy); static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); @@ -128,8 +128,8 @@ retval = 0; do { - if (paddr > dmat->lowaddr - && paddr <= dmat->highaddr + if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) + || ((paddr & (dmat->alignment - 1)) != 0)) && (dmat->filter == NULL || (*dmat->filter)(dmat->filterarg, paddr) != 0)) retval = 1; @@ -179,7 +179,8 @@ #endif } -#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 +#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 +#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 /* * Allocate a device specific dma_tag. */ @@ -194,6 +195,10 @@ bus_dma_tag_t newtag; int error = 0; + /* Basic sanity checking */ + if (boundary != 0 && boundary < maxsegsz) + maxsegsz = boundary; + /* Return a NULL tag on failure */ *dmat = NULL; @@ -245,8 +250,12 @@ if (newtag->parent != NULL) atomic_add_int(&parent->ref_count, 1); } - - if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { + + if (newtag->lowaddr < ptoa(Maxmem) || newtag->alignment > 1) + newtag->flags |= BUS_DMA_COULD_BOUNCE; + + if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && + (flags & BUS_DMA_ALLOCNOW) != 0) { /* Must bounce */ if (ptoa(total_bpages) < maxsize) { @@ -261,7 +270,7 @@ /* Performed initial allocation */ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; } - + if (error != 0) { free(newtag, M_DEVBUF); } else { @@ -336,7 +345,7 @@ return (ENOMEM); } - if (dmat->lowaddr < ptoa(Maxmem)) { + if (dmat->flags & BUS_DMA_COULD_BOUNCE) { /* Must bounce */ int maxpages; @@ -352,18 +361,23 @@ * Attempt to add pages to our pool on a per-instance * basis up to a sane limit. */ - maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); + if (dmat->alignment > 1) + maxpages = MAX_BPAGES; + else + maxpages = MIN(MAX_BPAGES, + Maxmem - atop(dmat->lowaddr)); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || (dmat->map_count > 0 && total_bpages < maxpages)) { int pages; - pages = atop(dmat->maxsize) + 1; + pages = MAX(atop(dmat->maxsize), 1); pages = MIN(maxpages - total_bpages, pages); + pages = MAX(pages, 1); if (alloc_bounce_pages(dmat, pages) < pages) error = ENOMEM; - + if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { if (error == 0) dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; @@ -390,7 +404,7 @@ sgmap_free_region(chipset.sgmap, map->sgmaphandle); } - if (map != NULL && map != &nobounce_dmamap) { + if (map != NULL && map != &nobounce_dmamap && map != &alloc_dmamap) { if (STAILQ_FIRST(&map->bpages) != NULL) return (EBUSY); free(map, M_DEVBUF); @@ -419,7 +433,7 @@ mflags |= M_ZERO; /* If we succeed, no mapping/bouncing will be required */ - *mapp = &nobounce_dmamap; + *mapp = &alloc_dmamap; if (dmat->segments == NULL) { dmat->segments = (bus_dma_segment_t *)malloc( @@ -429,7 +443,9 @@ return (ENOMEM); } - if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { + if ((dmat->maxsize <= PAGE_SIZE) && + (dmat->alignment < dmat->maxsize) && + dmat->lowaddr >= ptoa(Maxmem)) { *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); } else { /* @@ -457,9 +473,11 @@ * dmamem does not need to be bounced, so the map should be * NULL */ - if (map != &nobounce_dmamap) + if (map != &alloc_dmamap) panic("bus_dmamem_free: Invalid map freed\n"); - if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) + if ((dmat->maxsize <= PAGE_SIZE) && + (dmat->alignment < dmat->maxsize) && + dmat->lowaddr >= ptoa(Maxmem)) free(vaddr, M_DEVBUF); else { contigfree(vaddr, dmat->maxsize, M_DEVBUF); @@ -468,55 +486,38 @@ #define BUS_DMAMAP_NSEGS ((64 * 1024 / PAGE_SIZE) + 1) -/* - * Map the buffer buf into bus space using the dmamap map. - */ - vm_offset_t alpha_XXX_dmamap_or = 1024UL*1024UL*1024UL; /*XXX */ -int -bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, bus_dmamap_callback_t *callback, - void *callback_arg, int flags) +/* + * Utility function to load a linear buffer. lastaddrp holds state + * between invocations (for multiple-buffer loads). segp contains + * the starting segment on entrace, and the ending segment on exit. + * first indicates if this is the first invocation of this function. + */ +static int +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, + bus_dmamap_t map, + void *buf, bus_size_t buflen, + pmap_t pmap, + int flags, + bus_addr_t *lastaddrp, + bus_dma_segment_t *segs, + int *segp, + int first) { - bus_dma_segment_t segment; - vm_offset_t vaddr; - vm_offset_t paddr; - bus_dma_segment_t *sg; - int seg; - int error; - vm_offset_t nextpaddr; - - error = 0; - - if ((dmat->flags & BUS_DMA_ISA) && chipset.sgmap != NULL) { - /* - * For ISA dma, we use the chipset's scatter-gather - * map to map the tranfer into the ISA reachable range - * of the bus address space. - */ - vaddr = trunc_page((vm_offset_t) buf); - dmat->segments = &segment; - dmat->segments[0].ds_addr = - map->busaddress + (vm_offset_t) buf - vaddr; - dmat->segments[0].ds_len = buflen; - buflen = round_page((vm_offset_t) buf + buflen) - vaddr; - sgmap_load_region(chipset.sgmap, - map->busaddress, - vaddr, - buflen); - map->buflen = buflen; - (*callback)(callback_arg, dmat->segments, 1, error); - dmat->segments = NULL; - - return (0); - } + bus_size_t sgsize; + bus_addr_t curaddr, lastaddr, baddr, bmask; + vm_offset_t vaddr; + bus_addr_t paddr; + int needbounce = 0; + int seg; /* * If we are being called during a callback, pagesneeded will * be non-zero, so we can avoid doing the work twice. */ - if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { + if ((map != &nobounce_dmamap && map != &alloc_dmamap && map->pagesneeded == 0) + && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) { vm_offset_t vendaddr; /* @@ -527,9 +528,12 @@ vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { - paddr = pmap_kextract(vaddr); + if (pmap) + paddr = pmap_extract(pmap, vaddr); + else + paddr = pmap_kextract(vaddr); if (run_filter(dmat, paddr) != 0) { - + needbounce = 1; map->pagesneeded++; } vaddr += PAGE_SIZE; @@ -550,8 +554,6 @@ map->dmat = dmat; map->buf = buf; map->buflen = buflen; - map->callback = callback; - map->callback_arg = callback_arg; STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); mtx_unlock(&bounce_lock); @@ -562,84 +564,20 @@ } vaddr = (vm_offset_t)buf; - sg = &dmat->segments[0]; - seg = 1; - sg->ds_len = 0; - - nextpaddr = 0; - - do { - bus_size_t size; + lastaddr = *lastaddrp; - paddr = pmap_kextract(vaddr); - size = PAGE_SIZE - (paddr & PAGE_MASK); - if (size > buflen) - size = buflen; - - if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { - paddr = add_bounce_page(dmat, map, vaddr, size); - } - - if (sg->ds_len == 0) { - sg->ds_addr = paddr | alpha_XXX_dmamap_or; - sg->ds_len = size; - } else if (paddr == nextpaddr) { - sg->ds_len += size; - } else { - /* Go to the next segment */ - sg++; - seg++; - if (seg > dmat->nsegments) - break; - sg->ds_addr = paddr | alpha_XXX_dmamap_or; - sg->ds_len = size; - } - vaddr += size; - nextpaddr = paddr + size; - buflen -= size; - - } while (buflen > 0); - - if (buflen != 0) { - printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", - buflen); - error = EFBIG; + /* + * Enforce a boundary of 8k for loads of non-pre-allocated + * buffers (i.e., buffers not allocated via + * bus_dmamem_alloc()) for systems with the Pyxis pass 1 DMA + * bug. + */ + boundary = dmat->boundary; + if (busdma_pyxis_bug && map != &alloc_dmamap) { + if (boundary == 0 || boundary > 8192) + boundary = 8192; } - - (*callback)(callback_arg, dmat->segments, seg, error); - - return (0); -} - -/* - * Utility function to load a linear buffer. lastaddrp holds state - * between invocations (for multiple-buffer loads). segp contains - * the starting segment on entrace, and the ending segment on exit. - * first indicates if this is the first invocation of this function. - */ -static int -_bus_dmamap_load_buffer(bus_dma_tag_t dmat, - void *buf, bus_size_t buflen, - struct thread *td, - int flags, - vm_offset_t *lastaddrp, - bus_dma_segment_t *segs, - int *segp, - int first) -{ - bus_size_t sgsize; - bus_addr_t curaddr, lastaddr, baddr, bmask; - vm_offset_t vaddr = (vm_offset_t)buf; - int seg; - pmap_t pmap; - - if (td != NULL) - pmap = vmspace_pmap(td->td_proc->p_vmspace); - else - pmap = NULL; - - lastaddr = *lastaddrp; - bmask = ~(dmat->boundary - 1); + bmask = ~(boundary - 1); for (seg = *segp; buflen > 0 ; ) { /* @@ -660,12 +598,15 @@ /* * Make sure we don't cross any boundaries. */ - if (dmat->boundary > 0) { - baddr = (curaddr + dmat->boundary) & bmask; + if (boundary > 0) { + baddr = (curaddr + boundary) & bmask; if (sgsize > (baddr - curaddr)) sgsize = (baddr - curaddr); } + if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) + curaddr = add_bounce_page(dmat, map, vaddr, sgsize); + /* * Insert chunk into a segment, coalescing with * previous segment if possible. @@ -677,7 +618,7 @@ } else { if (curaddr == lastaddr && (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && - (dmat->boundary == 0 || + (boundary == 0 || (segs[seg].ds_addr & bmask) == (curaddr & bmask))) segs[seg].ds_len += sgsize; else { @@ -703,6 +644,65 @@ } /* + * Map the buffer buf into bus space using the dmamap map. + */ +int +bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, bus_dmamap_callback_t *callback, + void *callback_arg, int flags) +{ + bus_addr_t lastaddr = 0; + int error, nsegs; + + if ((dmat->flags & BUS_DMA_ISA) && chipset.sgmap != NULL) { + bus_dma_segment_t segment; + vm_offset_t vaddr; + + /* + * For ISA dma, we use the chipset's scatter-gather + * map to map the tranfer into the ISA reachable range + * of the bus address space. + */ + vaddr = trunc_page((vm_offset_t) buf); + dmat->segments = &segment; + dmat->segments[0].ds_addr = + map->busaddress + (vm_offset_t) buf - vaddr; + dmat->segments[0].ds_len = buflen; + buflen = round_page((vm_offset_t) buf + buflen) - vaddr; + sgmap_load_region(chipset.sgmap, + map->busaddress, + vaddr, + buflen); + map->buflen = buflen; + (*callback)(callback_arg, dmat->segments, 1, 0); + dmat->segments = NULL; + + return (0); + } + + if (map != &nobounce_dmamap && map != &alloc_dmamap) { + map->callback = callback; + map->callback_arg = callback_arg; + } + + error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, + &lastaddr, dmat->segments, &nsegs, 1); + + if (error == EINPROGRESS) { + return (error); + } + + if (error) + (*callback)(callback_arg, dmat->segments, 0, error); + else + (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); + + if (error == ENOMEM) + return (error); + return (0); +} + +/* * Like _bus_dmamap_load(), but for mbufs. */ int @@ -713,10 +713,9 @@ { int nsegs, error; - KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, - ("bus_dmamap_load_mbuf: No support for bounce pages!")); M_ASSERTPKTHDR(m0); + flags |= BUS_DMA_NOWAIT; nsegs = 0; error = 0; if (m0->m_pkthdr.len <= dmat->maxsize) { @@ -726,7 +725,7 @@ for (m = m0; m != NULL && error == 0; m = m->m_next) { if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, + error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len, NULL, flags, &lastaddr, dmat->segments, &nsegs, first); @@ -754,10 +753,9 @@ { int error; - KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, - ("bus_dmamap_load_mbuf: No support for bounce pages!")); M_ASSERTPKTHDR(m0); + flags |= BUS_DMA_NOWAIT; *nsegs = 0; error = 0; if (m0->m_pkthdr.len <= dmat->maxsize) { @@ -767,7 +765,7 @@ for (m = m0; m != NULL && error == 0; m = m->m_next) { if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, + error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len, NULL, flags, &lastaddr, segs, nsegs, first); @@ -795,19 +793,18 @@ int nsegs, error, first, i; bus_size_t resid; struct iovec *iov; - struct thread *td = NULL; - - KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, - ("bus_dmamap_load_uio: No support for bounce pages!")); + pmap_t pmap; + flags |= BUS_DMA_NOWAIT; resid = uio->uio_resid; iov = uio->uio_iov; if (uio->uio_segflg == UIO_USERSPACE) { - td = uio->uio_td; - KASSERT(td != NULL, + KASSERT(uio->uio_td != NULL, ("bus_dmamap_load_uio: USERSPACE but no proc")); - } + pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); + } else + pmap = NULL; nsegs = 0; error = 0; @@ -822,9 +819,9 @@ caddr_t addr = (caddr_t) iov[i].iov_base; if (minlen > 0) { - error = _bus_dmamap_load_buffer(dmat, addr, minlen, - td, flags, &lastaddr, dmat->segments, - &nsegs, first); + error = _bus_dmamap_load_buffer(dmat, map, + addr, minlen, pmap, flags, &lastaddr, + dmat->segments, &nsegs, first); first = 0; resid -= minlen; Index: alpha/include/md_var.h =================================================================== RCS file: /usr/cvs/src/sys/alpha/include/Attic/md_var.h,v retrieving revision 1.23 diff -u -r1.23 md_var.h --- alpha/include/md_var.h 17 Aug 2003 06:42:07 -0000 1.23 +++ alpha/include/md_var.h 26 Mar 2007 17:57:27 -0000 @@ -50,6 +50,7 @@ #endif extern long Maxmem; extern int busdma_swi_pending; +extern int busdma_pyxis_bug; extern struct rpb *hwrpb; extern volatile int mc_expected; extern volatile int mc_received; Index: alpha/pci/cia.c =================================================================== RCS file: /usr/cvs/src/sys/alpha/pci/Attic/cia.c,v retrieving revision 1.44 diff -u -r1.44 cia.c --- alpha/pci/cia.c 5 Jan 2005 20:05:52 -0000 1.44 +++ alpha/pci/cia.c 26 Mar 2007 17:54:19 -0000 @@ -490,6 +490,7 @@ ctrl &= ~(CTRL_RD_TYPE|CTRL_RL_TYPE|CTRL_RM_TYPE); REGVAL(CIA_CSR_CTRL) = ctrl; alpha_mb(); + busdma_pyxis_bug = 1; } #endif