--- //depot/yahoo/ybsd_6/src/sys/dev/ata/ata-all.c 2006/10/03 12:50:47 +++ //depot/jhb/ata_waitok/src/sys/dev/ata/ata-all.c 2007/05/23 14:44:40 @@ -207,6 +207,13 @@ callout_stop(&request->callout); ch->running = NULL; + /* + * If the request is waiting for bounce pages from bus dma, wait + * for it to tell us it is dead. + */ + while (request->flags & ATA_R_DMA_DEFERRED) + msleep(&request->dma_sg_addr, &ch->state_mtx, 0, "atadmadrn", 0); + /* unconditionally grap the channel lock */ ch->state |= ATA_STALL_QUEUE; mtx_unlock(&ch->state_mtx); @@ -326,6 +333,10 @@ if (!(request = ch->running)) break; + /* ignore requests waiting for bounce pages */ + if (request->flags & ATA_R_DMA_DEFERRED) + break; + ATA_DEBUG_RQ(request, "interrupt"); /* safetycheck for the right state */ --- //depot/yahoo/ybsd_6/src/sys/dev/ata/ata-all.h 2007/06/01 11:18:21 +++ //depot/jhb/ata_waitok/src/sys/dev/ata/ata-all.h 2007/06/01 13:45:25 @@ -299,6 +299,11 @@ caddr_t data_2; }; +struct ata_request; + +/* callback invoked when DMA load operation is complete */ +typedef void dma_load_complete(struct ata_request *request, int nsegs); + /* structure used to queue an ATA/ATAPI request */ struct ata_request { device_t dev; /* device handle */ @@ -333,6 +338,7 @@ #define ATA_R_REQUEUE 0x00000400 #define ATA_R_THREAD 0x00000800 #define ATA_R_DIRECT 0x00001000 +#define ATA_R_DMA_DEFERRED 0x00002000 #define ATA_R_DEBUG 0x10000000 #define ATA_R_DANGER1 0x20000000 @@ -354,6 +360,14 @@ struct ata_composite *composite; /* for composite atomic ops */ void *driver; /* driver specific */ TAILQ_ENTRY(ata_request) chain; /* list management */ + + /* Per-request DMA data. */ + dma_load_complete *dma_complete; + void *dma_sg_addr; + + /* Per-request AHCI data. */ + int ahci_fis_size; + int ahci_tag; }; /* define this for debugging request processing */ @@ -392,13 +406,6 @@ u_int32_t count; }; -/* structure used by the setprd function */ -struct ata_dmasetprd_args { - void *dmatab; - int nsegs; - int error; -}; - /* structure holding DMA related information */ struct ata_dma { bus_dma_tag_t dmatag; /* parent DMA tag */ @@ -426,8 +433,8 @@ void (*alloc)(device_t dev); void (*free)(device_t dev); - void (*setprd)(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); - int (*load)(device_t dev, caddr_t data, int32_t count, int dir, void *addr, int *nsegs); + void (*setprd)(void *xsc, bus_dma_segment_t *segs, int nsegs); + int (*load)(device_t dev, struct ata_request *request, void *addr, dma_load_complete *completion); int (*unload)(device_t dev); int (*start)(device_t dev); int (*stop)(device_t dev); --- //depot/yahoo/ybsd_6/src/sys/dev/ata/ata-chipset.c 2007/06/01 11:18:21 +++ //depot/jhb/ata_waitok/src/sys/dev/ata/ata-chipset.c 2007/06/01 13:45:25 @@ -64,9 +64,10 @@ static int ata_ahci_allocate(device_t dev); static int ata_ahci_status(device_t dev); static int ata_ahci_begin_transaction(struct ata_request *request); +static void ata_ahci_dma_complete(struct ata_request *request, int nsegs); static int ata_ahci_end_transaction(struct ata_request *request); static void ata_ahci_reset(device_t dev); -static void ata_ahci_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); +static void ata_ahci_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs); static void ata_ahci_dmainit(device_t dev); static int ata_ahci_setup_fis(u_int8_t *fis, struct ata_request *request); static int ata_acard_chipinit(device_t dev); @@ -110,9 +111,10 @@ static int ata_marvell_allocate(device_t dev); static int ata_marvell_status(device_t dev); static int ata_marvell_begin_transaction(struct ata_request *request); +static void ata_marvell_dma_complete(struct ata_request *request, int nsegs); static int ata_marvell_end_transaction(struct ata_request *request); static void ata_marvell_reset(device_t dev); -static void ata_marvell_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); +static void ata_marvell_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs); static void ata_marvell_dmainit(device_t dev); static int ata_national_chipinit(device_t dev); static void ata_national_setmode(device_t dev, int mode); @@ -572,12 +574,10 @@ static int ata_ahci_begin_transaction(struct ata_request *request) { - struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); struct ata_ahci_cmd_tab *ctp; - struct ata_ahci_cmd_list *clp; - int tag = 0, entries = 0; - int fis_size; + int tag = 0; + int error, fis_size; /* get a piece of the workspace for this request */ ctp = (struct ata_ahci_cmd_tab *) @@ -589,26 +589,41 @@ request->result = EIO; return ATA_OP_FINISHED; } + request->ahci_fis_size = fis_size; + request->ahci_tag = tag; /* if request moves data setup and load SG list */ if (request->flags & (ATA_R_READ | ATA_R_WRITE)) { - if (ch->dma->load(ch->dev, request->data, request->bytecount, - request->flags & ATA_R_READ, - ctp->prd_tab, &entries)) { - device_printf(request->dev, "setting up DMA failed\n"); - request->result = EIO; + error = ch->dma->load(ch->dev, request, ctp->prd_tab, + ata_ahci_dma_complete); + + /* Handle EINPROGRESS */ + if (error && error != EINPROGRESS) return ATA_OP_FINISHED; - } - } + } else + ata_ahci_dma_complete(request, 0); + + return ATA_OP_CONTINUES; +} + +static void +ata_ahci_dma_complete(struct ata_request *request, int nsegs) +{ + struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); + struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); + struct ata_ahci_cmd_list *clp; + int tag; + + tag = request->ahci_tag; /* setup the command list entry */ clp = (struct ata_ahci_cmd_list *) (ch->dma->work + ATA_AHCI_CL_OFFSET + (ATA_AHCI_CL_SIZE * tag)); - clp->prd_length = entries; + clp->prd_length = nsegs; clp->cmd_flags = (request->flags & ATA_R_WRITE ? (1<<6) : 0) | (request->flags & ATA_R_ATAPI ? (1<<5) : 0) | - (fis_size / sizeof(u_int32_t)); + (request->ahci_fis_size / sizeof(u_int32_t)); clp->bytecount = 0; clp->cmd_table_phys = htole64(ch->dma->work_bus + ATA_AHCI_CT_OFFSET + (ATA_AHCI_CT_SIZE * tag)); @@ -622,7 +637,6 @@ /* start the timeout */ callout_reset(&request->callout, request->timeout * hz, (timeout_t*)ata_timeout, request); - return ATA_OP_CONTINUES; } /* must be called with ATA channel locked and state_mtx held */ @@ -711,19 +725,15 @@ } static void -ata_ahci_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) +ata_ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs) { - struct ata_dmasetprd_args *args = xsc; - struct ata_ahci_dma_prd *prd = args->dmatab; + struct ata_ahci_dma_prd *prd = arg; int i; - if (!(args->error = error)) { - for (i = 0; i < nsegs; i++) { - prd[i].dba = htole64(segs[i].ds_addr); - prd[i].dbc = htole32((segs[i].ds_len - 1) & ATA_AHCI_PRD_MASK); - } + for (i = 0; i < nsegs; i++) { + prd[i].dba = htole64(segs[i].ds_addr); + prd[i].dbc = htole32((segs[i].ds_len - 1) & ATA_AHCI_PRD_MASK); } - args->nsegs = nsegs; } static void @@ -2532,12 +2542,7 @@ { struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); - u_int32_t req_in; - u_int8_t *bytep; - u_int16_t *wordp; - u_int32_t *quadp; - int i, tag = 0x07; - int dummy, error, slot; + int error; /* only DMA R/W goes through the EMDA machine */ if (request->u.ata.command != ATA_READ_DMA && @@ -2553,13 +2558,26 @@ ata_modify_if_48bit(request); /* check sanity, setup SG list and DMA engine */ - if ((error = ch->dma->load(ch->dev, request->data, request->bytecount, - request->flags & ATA_R_READ, ch->dma->sg, - &dummy))) { - device_printf(request->dev, "setting up DMA failed\n"); - request->result = error; + error = ch->dma->load(ch->dev, request, ch->dma->sg, + ata_marvell_dma_complete); + + /* Handle EINPROGRESS */ + if (error && error != EINPROGRESS) return ATA_OP_FINISHED; - } + + return ATA_OP_CONTINUES; +} + +static void +ata_marvell_dma_complete(struct ata_request *request, int nsegs) +{ + struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); + struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); + u_int32_t req_in; + u_int8_t *bytep; + u_int16_t *wordp; + u_int32_t *quadp; + int i, slot, tag = 0x07; /* get next free request queue slot */ req_in = ATA_INL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch)); @@ -2614,8 +2632,6 @@ req_in &= 0xfffffc00; req_in += (slot << 5); ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), req_in); - - return ATA_OP_CONTINUES; } /* must be called with ATA channel locked and state_mtx held */ @@ -2701,15 +2717,11 @@ } static void -ata_marvell_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) +ata_marvell_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs) { - struct ata_dmasetprd_args *args = xsc; - struct ata_marvell_dma_prdentry *prd = args->dmatab; + struct ata_marvell_dma_prdentry *prd = arg; int i; - if ((args->error = error)) - return; - for (i = 0; i < nsegs; i++) { prd[i].addrlo = htole32(segs[i].ds_addr); prd[i].count = htole32(segs[i].ds_len); --- //depot/yahoo/ybsd_6/src/sys/dev/ata/ata-dma.c 2007/06/01 11:18:21 +++ //depot/jhb/ata_waitok/src/sys/dev/ata/ata-dma.c 2007/06/01 13:45:25 @@ -31,10 +31,12 @@ #include #include #include +#include #include #include +#include #include -#include +#include #include #include #include @@ -45,12 +47,17 @@ #include #include +#include + /* prototypes */ static void ata_dmaalloc(device_t); static void ata_dmafree(device_t); -static void ata_dmasetprd(void *, bus_dma_segment_t *, int, int); -static int ata_dmaload(device_t, caddr_t, int32_t, int, void *, int *); +static void ata_dmasetprd(void *, bus_dma_segment_t *, int); +static int ata_dmaload(device_t, struct ata_request *, void *, + dma_load_complete *); static int ata_dmaunload(device_t); +static void ata_dma_callback(void *, bus_dma_segment_t *, int, int); +static void ata_dma_lock(void *, bus_dma_lock_op_t); /* local vars */ static MALLOC_DEFINE(M_ATADMA, "ata_dma", "ATA driver DMA"); @@ -115,7 +126,7 @@ ch->dma->max_address, BUS_SPACE_MAXADDR, NULL, NULL, ch->dma->max_iosize, ATA_DMA_ENTRIES, ch->dma->segsize, - 0, NULL, NULL, &ch->dma->data_tag)) + 0, ata_dma_lock, ch, &ch->dma->data_tag)) goto error; if (bus_dmamem_alloc(ch->dma->sg_tag, (void **)&ch->dma->sg, 0, @@ -200,31 +211,91 @@ } static void -ata_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) +ata_dma_lock(void *arg, bus_dma_lock_op_t op) +{ + struct ata_channel *ch = arg; + + switch (op) { + case BUS_DMA_LOCK: + /* + * Channel should be locked already. + */ + KASSERT(ATA_LOCKING(ch->dev, ATA_LF_WHICH) == ch->unit, + ("%s: channel unlocked", __func__)); + mtx_lock(&ch->state_mtx); + break; + case BUS_DMA_UNLOCK: + mtx_unlock(&ch->state_mtx); + break; + default: + panic("Unknown operation 0x%x for ata_dma_lock!", op); + } +} + +static void +ata_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs) { - struct ata_dmasetprd_args *args = xsc; - struct ata_dma_prdentry *prd = args->dmatab; + struct ata_dma_prdentry *prd = arg; int i; - if ((args->error = error)) - return; - for (i = 0; i < nsegs; i++) { prd[i].addr = htole32(segs[i].ds_addr); prd[i].count = htole32(segs[i].ds_len); } prd[i - 1].count |= htole32(ATA_DMA_EOT); - args->nsegs = nsegs; +} + +static void +ata_dma_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) +{ + struct ata_request *request = arg; + struct ata_channel *ch; + + ch = device_get_softc(request->parent); + if (ch->running == NULL) { + /* The request has been aborted, so unload the map. */ + if (error == 0) + bus_dmamap_unload(ch->dma->data_tag, ch->dma->data_map); + request->flags &= ~ATA_R_DMA_DEFERRED; + wakeup(&request->dma_sg_addr); + } else if (error == 0) { + ch->dma->setprd(request->dma_sg_addr, segs, nsegs); + bus_dmamap_sync(ch->dma->sg_tag, ch->dma->sg_map, + BUS_DMASYNC_PREWRITE); + + bus_dmamap_sync(ch->dma->data_tag, ch->dma->data_map, + (request->flags & ATA_R_READ) ? BUS_DMASYNC_PREREAD : + BUS_DMASYNC_PREWRITE); + + ch->dma->cur_iosize = request->bytecount; + ch->dma->flags = ATA_DMA_LOADED; + if (request->flags & ATA_R_READ) + ch->dma->flags |= ATA_DMA_READ; + request->flags &= ~ATA_R_DMA_DEFERRED; + request->dma_complete(request, nsegs); + } else { + request->result = error; + device_printf(request->dev, "setting up DMA failed\n"); + + /* These requests are synchronous and do ata_finish() later. */ + if (dumping || + (ch->state & ATA_STALL_QUEUE) || (request->flags & ATA_R_DIRECT)) + return; + ata_finish(request); + } } static int -ata_dmaload(device_t dev, caddr_t data, int32_t count, int dir, - void *addr, int *entries) +ata_dmaload(device_t dev, struct ata_request *request, void *addr, + dma_load_complete *completion) { struct ata_channel *ch = device_get_softc(dev); - struct ata_dmasetprd_args cba; - int error; + caddr_t data; + int32_t count; + int error, flags; + data = request->data; + count = request->bytecount; if (ch->dma->flags & ATA_DMA_LOADED) { device_printf(dev, "FAILURE - already active DMA on this device\n"); return EIO; @@ -244,23 +315,22 @@ return EIO; } - cba.dmatab = addr; + request->dma_complete = completion; + request->dma_sg_addr = addr; + request->flags |= ATA_R_DMA_DEFERRED; - if ((error = bus_dmamap_load(ch->dma->data_tag, ch->dma->data_map, - data, count, ch->dma->setprd, &cba, - BUS_DMA_NOWAIT)) || (error = cba.error)) - return error; + flags = 0; - *entries = cba.nsegs; - - bus_dmamap_sync(ch->dma->sg_tag, ch->dma->sg_map, BUS_DMASYNC_PREWRITE); - - bus_dmamap_sync(ch->dma->data_tag, ch->dma->data_map, - dir ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); - - ch->dma->cur_iosize = count; - ch->dma->flags = dir ? (ATA_DMA_LOADED | ATA_DMA_READ) : ATA_DMA_LOADED; - return 0; + /* These requests are synchronous. */ + if (dumping || + (ch->state & ATA_STALL_QUEUE) || (request->flags & ATA_R_DIRECT)) + flags = BUS_DMA_NOWAIT; + + error = bus_dmamap_load(ch->dma->data_tag, ch->dma->data_map, + data, count, ata_dma_callback, request, flags); + if (error == 0 && (flags & BUS_DMA_NOWAIT)) + error = request->result; + return (error); } int --- //depot/yahoo/ybsd_6/src/sys/dev/ata/ata-lowlevel.c 2007/06/01 11:18:21 +++ //depot/jhb/ata_waitok/src/sys/dev/ata/ata-lowlevel.c 2007/06/01 13:45:25 @@ -46,6 +46,7 @@ #include /* prototypes */ +static void ata_dma_complete(struct ata_request *request, int nsegs); static int ata_generic_status(device_t dev); static int ata_wait(struct ata_channel *ch, struct ata_device *, u_int8_t); static void ata_pio_read(struct ata_request *, int); @@ -71,7 +72,7 @@ { struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); struct ata_device *atadev = device_get_softc(request->dev); - int dummy, error; + int error; ATA_DEBUG_RQ(request, "begin transaction"); @@ -129,29 +130,12 @@ /* ATA DMA data transfer commands */ case ATA_R_DMA: /* check sanity, setup SG list and DMA engine */ - if ((error = ch->dma->load(ch->dev, request->data, request->bytecount, - request->flags & ATA_R_READ, ch->dma->sg, - &dummy))) { - device_printf(request->dev, "setting up DMA failed\n"); - request->result = error; - goto begin_finished; - } + error = ch->dma->load(ch->dev, request, ch->dma->sg, ata_dma_complete); - /* issue command */ - if (ch->hw.command(request)) { - device_printf(request->dev, "error issuing %s command\n", - ata_cmd2str(request)); - request->result = EIO; + /* Handle EINPROGRESS */ + if (error && error != EINPROGRESS) goto begin_finished; - } - - /* start DMA engine */ - if (ch->dma->start && ch->dma->start(request->dev)) { - device_printf(request->dev, "error starting DMA\n"); - request->result = EIO; - goto begin_finished; - } - goto begin_continue; + return ATA_OP_CONTINUES; /* ATAPI PIO commands */ case ATA_R_ATAPI: @@ -184,34 +168,17 @@ } /* check sanity, setup SG list and DMA engine */ - if ((error = ch->dma->load(ch->dev, request->data, request->bytecount, - request->flags & ATA_R_READ, ch->dma->sg, - &dummy))) { - device_printf(request->dev, "setting up DMA failed\n"); - request->result = error; - goto begin_finished; - } + error = ch->dma->load(ch->dev, request, ch->dma->sg, ata_dma_complete); - /* start ATAPI operation */ - if (ch->hw.command(request)) { - device_printf(request->dev, "error issuing ATA PACKET command\n"); - request->result = EIO; + /* Handle EINPROGRESS */ + if (error && error != EINPROGRESS) goto begin_finished; - } - - /* start DMA engine */ - if (ch->dma->start && ch->dma->start(request->dev)) { - request->result = EIO; - goto begin_finished; - } - goto begin_continue; + return ATA_OP_CONTINUES; } /* NOT REACHED */ printf("ata_begin_transaction OOPS!!!\n"); begin_finished: - if (ch->dma && ch->dma->flags & ATA_DMA_LOADED) - ch->dma->unload(ch->dev); return ATA_OP_FINISHED; begin_continue: @@ -220,6 +187,41 @@ return ATA_OP_CONTINUES; } +static void +ata_dma_complete(struct ata_request *request, int nsegs) +{ + struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); + + /* issue command */ + if (ch->hw.command(request)) { + device_printf(request->dev, "error issuing %s command\n", + request->flags & ATA_R_ATAPI ? "ATA PACKET" : ata_cmd2str(request)); + request->result = EIO; + goto error; + } + + /* start DMA engine */ + if (ch->dma->start && ch->dma->start(request->dev)) { + device_printf(request->dev, "error starting DMA\n"); + request->result = EIO; + goto error; + } + + callout_reset(&request->callout, request->timeout * hz, + (timeout_t*)ata_timeout, request); + return; + +error: + if (ch->dma) + ch->dma->unload(ch->dev); + + /* These requests are synchronous and do ata_finish() later. */ + if (dumping || + (ch->state & ATA_STALL_QUEUE) || (request->flags & ATA_R_DIRECT)) + return; + ata_finish(request); +} + /* must be called with ATA channel locked and state_mtx held */ int ata_end_transaction(struct ata_request *request) --- //depot/yahoo/ybsd_6/src/sys/dev/ata/ata-queue.c 2007/04/23 12:03:21 +++ //depot/jhb/ata_waitok/src/sys/dev/ata/ata-queue.c 2007/05/23 14:44:40 @@ -261,6 +265,17 @@ struct ata_device *atadev = device_get_softc(request->dev); struct ata_composite *composite; + /* + * If the request is waiting for bounce pages from bus dma, wait + * for it to tell us it is dead. + */ + if (request->flags & ATA_R_DMA_DEFERRED) { + mtx_lock(&ch->state_mtx); + while (request->flags & ATA_R_DMA_DEFERRED) + msleep(&request->dma_sg_addr, &ch->state_mtx, 0, "atadmadrn", 0); + mtx_unlock(&ch->state_mtx); + } + if (request->flags & ATA_R_DANGER2) { device_printf(request->dev, "WARNING - %s freeing taskqueue zombie request\n", @@ -545,6 +560,18 @@ /* finish up all requests collected above */ TAILQ_FOREACH_SAFE(request, &fail_requests, chain, tmp) { TAILQ_REMOVE(&fail_requests, request, chain); + + /* + * If the request is waiting for bounce pages from bus dma, wait + * for it to tell us it is dead. + */ + if (request->flags & ATA_R_DMA_DEFERRED) { + mtx_lock(&ch->state_mtx); + while (request->flags & ATA_R_DMA_DEFERRED) + msleep(&request->dma_sg_addr, &ch->state_mtx, 0, "atadmadrn", + 0); + mtx_unlock(&ch->state_mtx); + } ata_finish(request); } }