Index: alpha/busdma_machdep.c =================================================================== RCS file: /usr/cvs/src/sys/alpha/alpha/Attic/busdma_machdep.c,v retrieving revision 1.51.2.1 diff -u -r1.51.2.1 busdma_machdep.c --- alpha/busdma_machdep.c 26 Sep 2005 19:35:33 -0000 1.51.2.1 +++ alpha/busdma_machdep.c 8 Nov 2007 15:39:44 -0000 @@ -77,7 +77,7 @@ STAILQ_ENTRY(bounce_page) links; }; -int busdma_swi_pending; +int busdma_pyxis_bug, busdma_swi_pending; static struct mtx bounce_lock; static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; @@ -128,8 +128,8 @@ retval = 0; do { - if (paddr > dmat->lowaddr - && paddr <= dmat->highaddr + if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) + || ((paddr & (dmat->alignment - 1)) != 0)) && (dmat->filter == NULL || (*dmat->filter)(dmat->filterarg, paddr) != 0)) retval = 1; @@ -179,7 +179,8 @@ #endif } -#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 +#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 +#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 /* * Allocate a device specific dma_tag. */ @@ -194,10 +195,24 @@ bus_dma_tag_t newtag; int error = 0; +#ifdef notyet + /* + * Enforce a boundary of 8k for the ATA data tag for systems + * with the Pyxis pass 1 DMA bug. This is rather gross. + */ + if (busdma_pyxis_bug && boundary == 65536 && alignment == 2) + boundary = 8192; +#endif + + /* Basic sanity checking */ + if (boundary != 0 && boundary < maxsegsz) + maxsegsz = boundary; + /* Return a NULL tag on failure */ *dmat = NULL; - newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); + newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, + M_ZERO | M_NOWAIT); if (newtag == NULL) return (ENOMEM); @@ -221,7 +236,6 @@ newtag->lockfunc = dflt_lock; newtag->lockfuncarg = NULL; } - newtag->segments = NULL; /* Take into account any restrictions imposed by our parent tag */ @@ -245,8 +259,12 @@ if (newtag->parent != NULL) atomic_add_int(&parent->ref_count, 1); } - - if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { + + if (newtag->lowaddr < ptoa(Maxmem) || newtag->alignment > 1) + newtag->flags |= BUS_DMA_COULD_BOUNCE; + + if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && + (flags & BUS_DMA_ALLOCNOW) != 0) { /* Must bounce */ if (ptoa(total_bpages) < maxsize) { @@ -261,7 +279,7 @@ /* Performed initial allocation */ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; } - + if (error != 0) { free(newtag, M_DEVBUF); } else { @@ -336,7 +354,7 @@ return (ENOMEM); } - if (dmat->lowaddr < ptoa(Maxmem)) { + if (dmat->flags & BUS_DMA_COULD_BOUNCE) { /* Must bounce */ int maxpages; @@ -352,18 +370,21 @@ * Attempt to add pages to our pool on a per-instance * basis up to a sane limit. */ - maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); + if (dmat->alignment > 1) + maxpages = MAX_BPAGES; + else + maxpages = MIN(MAX_BPAGES, + Maxmem - atop(dmat->lowaddr)); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 - || (dmat->map_count > 0 - && total_bpages < maxpages)) { + || (dmat->map_count > 0 && total_bpages < maxpages)) { int pages; - pages = atop(dmat->maxsize) + 1; + pages = MAX(atop(dmat->maxsize), 1); pages = MIN(maxpages - total_bpages, pages); - + pages = MAX(pages, 1); if (alloc_bounce_pages(dmat, pages) < pages) error = ENOMEM; - + if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { if (error == 0) dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; @@ -429,7 +450,9 @@ return (ENOMEM); } - if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { + if ((dmat->maxsize <= PAGE_SIZE) && + (dmat->alignment < dmat->maxsize) && + dmat->lowaddr >= ptoa(Maxmem)) { *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); } else { /* @@ -459,7 +482,9 @@ */ if (map != &nobounce_dmamap) panic("bus_dmamem_free: Invalid map freed\n"); - if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) + if ((dmat->maxsize <= PAGE_SIZE) && + (dmat->alignment < dmat->maxsize) && + dmat->lowaddr >= ptoa(Maxmem)) free(vaddr, M_DEVBUF); else { contigfree(vaddr, dmat->maxsize, M_DEVBUF); @@ -468,55 +493,38 @@ #define BUS_DMAMAP_NSEGS ((64 * 1024 / PAGE_SIZE) + 1) -/* - * Map the buffer buf into bus space using the dmamap map. - */ - vm_offset_t alpha_XXX_dmamap_or = 1024UL*1024UL*1024UL; /*XXX */ -int -bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, bus_dmamap_callback_t *callback, - void *callback_arg, int flags) +/* + * Utility function to load a linear buffer. lastaddrp holds state + * between invocations (for multiple-buffer loads). segp contains + * the starting segment on entrace, and the ending segment on exit. + * first indicates if this is the first invocation of this function. + */ +static int +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, + bus_dmamap_t map, + void *buf, bus_size_t buflen, + pmap_t pmap, + int flags, + bus_addr_t *lastaddrp, + bus_dma_segment_t *segs, + int *segp, + int first) { - bus_dma_segment_t segment; - vm_offset_t vaddr; - vm_offset_t paddr; - bus_dma_segment_t *sg; - int seg; - int error; - vm_offset_t nextpaddr; - - error = 0; - - if ((dmat->flags & BUS_DMA_ISA) && chipset.sgmap != NULL) { - /* - * For ISA dma, we use the chipset's scatter-gather - * map to map the tranfer into the ISA reachable range - * of the bus address space. - */ - vaddr = trunc_page((vm_offset_t) buf); - dmat->segments = &segment; - dmat->segments[0].ds_addr = - map->busaddress + (vm_offset_t) buf - vaddr; - dmat->segments[0].ds_len = buflen; - buflen = round_page((vm_offset_t) buf + buflen) - vaddr; - sgmap_load_region(chipset.sgmap, - map->busaddress, - vaddr, - buflen); - map->buflen = buflen; - (*callback)(callback_arg, dmat->segments, 1, error); - dmat->segments = NULL; - - return (0); - } + bus_size_t sgsize; + bus_addr_t curaddr, lastaddr, baddr, bmask; + vm_offset_t vaddr; + bus_addr_t paddr; + int needbounce = 0; + int seg; /* * If we are being called during a callback, pagesneeded will * be non-zero, so we can avoid doing the work twice. */ - if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { + if ((map != &nobounce_dmamap && map->pagesneeded == 0) + && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) { vm_offset_t vendaddr; /* @@ -527,9 +535,12 @@ vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { - paddr = pmap_kextract(vaddr); + if (pmap) + paddr = pmap_extract(pmap, vaddr); + else + paddr = pmap_kextract(vaddr); if (run_filter(dmat, paddr) != 0) { - + needbounce = 1; map->pagesneeded++; } vaddr += PAGE_SIZE; @@ -550,8 +561,6 @@ map->dmat = dmat; map->buf = buf; map->buflen = buflen; - map->callback = callback; - map->callback_arg = callback_arg; STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); mtx_unlock(&bounce_lock); @@ -562,82 +571,6 @@ } vaddr = (vm_offset_t)buf; - sg = &dmat->segments[0]; - seg = 1; - sg->ds_len = 0; - - nextpaddr = 0; - - do { - bus_size_t size; - - paddr = pmap_kextract(vaddr); - size = PAGE_SIZE - (paddr & PAGE_MASK); - if (size > buflen) - size = buflen; - - if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { - paddr = add_bounce_page(dmat, map, vaddr, size); - } - - if (sg->ds_len == 0) { - sg->ds_addr = paddr | alpha_XXX_dmamap_or; - sg->ds_len = size; - } else if (paddr == nextpaddr) { - sg->ds_len += size; - } else { - /* Go to the next segment */ - sg++; - seg++; - if (seg > dmat->nsegments) - break; - sg->ds_addr = paddr | alpha_XXX_dmamap_or; - sg->ds_len = size; - } - vaddr += size; - nextpaddr = paddr + size; - buflen -= size; - - } while (buflen > 0); - - if (buflen != 0) { - printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", - buflen); - error = EFBIG; - } - - (*callback)(callback_arg, dmat->segments, seg, error); - - return (0); -} - -/* - * Utility function to load a linear buffer. lastaddrp holds state - * between invocations (for multiple-buffer loads). segp contains - * the starting segment on entrace, and the ending segment on exit. - * first indicates if this is the first invocation of this function. - */ -static int -_bus_dmamap_load_buffer(bus_dma_tag_t dmat, - void *buf, bus_size_t buflen, - struct thread *td, - int flags, - vm_offset_t *lastaddrp, - bus_dma_segment_t *segs, - int *segp, - int first) -{ - bus_size_t sgsize; - bus_addr_t curaddr, lastaddr, baddr, bmask; - vm_offset_t vaddr = (vm_offset_t)buf; - int seg; - pmap_t pmap; - - if (td != NULL) - pmap = vmspace_pmap(td->td_proc->p_vmspace); - else - pmap = NULL; - lastaddr = *lastaddrp; bmask = ~(dmat->boundary - 1); @@ -666,6 +599,9 @@ sgsize = (baddr - curaddr); } + if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) + curaddr = add_bounce_page(dmat, map, vaddr, sgsize); + /* * Insert chunk into a segment, coalescing with * previous segment if possible. @@ -703,6 +639,66 @@ } /* + * Map the buffer buf into bus space using the dmamap map. + */ +int +bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, bus_dmamap_callback_t *callback, + void *callback_arg, int flags) +{ + bus_addr_t lastaddr = 0; + int error, nsegs = 0; + + if ((dmat->flags & BUS_DMA_ISA) && chipset.sgmap != NULL) { + bus_dma_segment_t segment; + vm_offset_t vaddr; + + /* + * For ISA dma, we use the chipset's scatter-gather + * map to map the tranfer into the ISA reachable range + * of the bus address space. + */ + vaddr = trunc_page((vm_offset_t) buf); + dmat->segments = &segment; + dmat->segments[0].ds_addr = + map->busaddress + (vm_offset_t) buf - vaddr; + dmat->segments[0].ds_len = buflen; + buflen = round_page((vm_offset_t) buf + buflen) - vaddr; + sgmap_load_region(chipset.sgmap, + map->busaddress, + vaddr, + buflen); + map->buflen = buflen; + (*callback)(callback_arg, dmat->segments, 1, 0); + dmat->segments = NULL; + + return (0); + } + + if (map != &nobounce_dmamap) { + flags |= BUS_DMA_WAITOK; + map->callback = callback; + map->callback_arg = callback_arg; + } + + error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, + &lastaddr, dmat->segments, &nsegs, 1); + + if (error == EINPROGRESS) { + return (error); + } + + if (error) + (*callback)(callback_arg, dmat->segments, 0, error); + else + (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); + + if (error == ENOMEM) + return (error); + return (0); +} + +/* * Like _bus_dmamap_load(), but for mbufs. */ int @@ -713,10 +709,9 @@ { int nsegs, error; - KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, - ("bus_dmamap_load_mbuf: No support for bounce pages!")); M_ASSERTPKTHDR(m0); + flags |= BUS_DMA_NOWAIT; nsegs = 0; error = 0; if (m0->m_pkthdr.len <= dmat->maxsize) { @@ -726,7 +721,7 @@ for (m = m0; m != NULL && error == 0; m = m->m_next) { if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, + error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len, NULL, flags, &lastaddr, dmat->segments, &nsegs, first); @@ -754,10 +749,9 @@ { int error; - KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, - ("bus_dmamap_load_mbuf: No support for bounce pages!")); M_ASSERTPKTHDR(m0); + flags |= BUS_DMA_NOWAIT; *nsegs = 0; error = 0; if (m0->m_pkthdr.len <= dmat->maxsize) { @@ -767,7 +761,7 @@ for (m = m0; m != NULL && error == 0; m = m->m_next) { if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, + error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len, NULL, flags, &lastaddr, segs, nsegs, first); @@ -795,19 +789,18 @@ int nsegs, error, first, i; bus_size_t resid; struct iovec *iov; - struct thread *td = NULL; - - KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, - ("bus_dmamap_load_uio: No support for bounce pages!")); + pmap_t pmap; + flags |= BUS_DMA_NOWAIT; resid = uio->uio_resid; iov = uio->uio_iov; if (uio->uio_segflg == UIO_USERSPACE) { - td = uio->uio_td; - KASSERT(td != NULL, + KASSERT(uio->uio_td != NULL, ("bus_dmamap_load_uio: USERSPACE but no proc")); - } + pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); + } else + pmap = NULL; nsegs = 0; error = 0; @@ -822,9 +815,9 @@ caddr_t addr = (caddr_t) iov[i].iov_base; if (minlen > 0) { - error = _bus_dmamap_load_buffer(dmat, addr, minlen, - td, flags, &lastaddr, dmat->segments, - &nsegs, first); + error = _bus_dmamap_load_buffer(dmat, map, + addr, minlen, pmap, flags, &lastaddr, + dmat->segments, &nsegs, first); first = 0; resid -= minlen; Index: include/md_var.h =================================================================== RCS file: /usr/cvs/src/sys/alpha/include/Attic/md_var.h,v retrieving revision 1.23 diff -u -r1.23 md_var.h --- include/md_var.h 17 Aug 2003 06:42:07 -0000 1.23 +++ include/md_var.h 26 Mar 2007 17:57:27 -0000 @@ -50,6 +50,7 @@ #endif extern long Maxmem; extern int busdma_swi_pending; +extern int busdma_pyxis_bug; extern struct rpb *hwrpb; extern volatile int mc_expected; extern volatile int mc_received; Index: pci/cia.c =================================================================== RCS file: /usr/cvs/src/sys/alpha/pci/Attic/cia.c,v retrieving revision 1.44 diff -u -r1.44 cia.c --- pci/cia.c 5 Jan 2005 20:05:52 -0000 1.44 +++ pci/cia.c 26 Mar 2007 17:54:19 -0000 @@ -490,6 +490,7 @@ ctrl &= ~(CTRL_RD_TYPE|CTRL_RL_TYPE|CTRL_RM_TYPE); REGVAL(CIA_CSR_CTRL) = ctrl; alpha_mb(); + busdma_pyxis_bug = 1; } #endif