Index: dev/mly/mlyvar.h =================================================================== --- dev/mly/mlyvar.h (.../head/sys) (revision 244874) +++ dev/mly/mlyvar.h (.../projects/physbio/sys) (revision 244874) @@ -126,6 +126,7 @@ struct mly_command { #define MLY_CMD_MAPPED (1<<3) /* command has had its data mapped */ #define MLY_CMD_DATAIN (1<<4) /* data moves controller->system */ #define MLY_CMD_DATAOUT (1<<5) /* data moves system->controller */ +#define MLY_CMD_CCB (1<<6) /* data is ccb. */ u_int16_t mc_status; /* command completion status */ u_int8_t mc_sense; /* sense data length */ int32_t mc_resid; /* I/O residual count */ Index: dev/mly/mly.c =================================================================== --- dev/mly/mly.c (.../head/sys) (revision 244874) +++ dev/mly/mly.c (.../projects/physbio/sys) (revision 244874) @@ -1864,9 +1864,13 @@ mly_map_command(struct mly_command *mc) /* does the command have a data buffer? */ if (mc->mc_data != NULL) { - bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length, - mly_map_command_sg, mc, 0); - + if (mc->mc_flags & MLY_CMD_CCB) + bus_dmamap_load_ccb(sc->mly_buffer_dmat, mc->mc_datamap, + mc->mc_data, mly_map_command_sg, mc, 0); + else + bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, + mc->mc_data, mc->mc_length, + mly_map_command_sg, mc, 0); if (mc->mc_flags & MLY_CMD_DATAIN) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD); if (mc->mc_flags & MLY_CMD_DATAOUT) @@ -2220,18 +2224,6 @@ mly_cam_action_io(struct cam_sim *sim, struct ccb_ csio->ccb_h.status = CAM_REQ_CMP_ERR; } - /* if there is data transfer, it must be to/from a virtual address */ - if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if (csio->ccb_h.flags & CAM_DATA_PHYS) { /* we can't map it */ - debug(0, " data pointer is to physical address"); - csio->ccb_h.status = CAM_REQ_CMP_ERR; - } - if (csio->ccb_h.flags & CAM_SCATTER_VALID) { /* we want to do the s/g setup */ - debug(0, " data has premature s/g setup"); - csio->ccb_h.status = CAM_REQ_CMP_ERR; - } - } - /* abandon aborted ccbs or those that have failed validation */ if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { debug(2, "abandoning CCB due to abort/validation failure"); @@ -2251,10 +2243,12 @@ mly_cam_action_io(struct cam_sim *sim, struct ccb_ } /* build the command */ - mc->mc_data = csio->data_ptr; + mc->mc_data = csio; mc->mc_length = csio->dxfer_len; mc->mc_complete = mly_cam_complete; mc->mc_private = csio; + mc->mc_flags |= MLY_CMD_CCB; + /* XXX This code doesn't set the data direction in mc_flags. */ /* save the bus number in the ccb for later recovery XXX should be a better way */ csio->ccb_h.sim_priv.entries[0].field = bus; Index: dev/amr/amr_cam.c =================================================================== --- dev/amr/amr_cam.c (.../head/sys) (revision 244874) +++ dev/amr/amr_cam.c (.../projects/physbio/sys) (revision 244874) @@ -274,12 +274,9 @@ amr_cam_action(struct cam_sim *sim, union ccb *ccb * address */ if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if (ccbh->flags & CAM_DATA_PHYS) + if ((ccbh->flags & CAM_DATA_MASK) != CAM_DATA_VADDR) /* we can't map it */ ccbh->status = CAM_REQ_INVALID; - if (ccbh->flags & CAM_SCATTER_VALID) - /* we want to do the s/g setup */ - ccbh->status = CAM_REQ_INVALID; } /* Index: dev/hpt27xx/osm_bsd.c =================================================================== --- dev/hpt27xx/osm_bsd.c (.../head/sys) (revision 244874) +++ dev/hpt27xx/osm_bsd.c (.../projects/physbio/sys) (revision 244874) @@ -473,33 +473,6 @@ static void os_cmddone(PCOMMAND pCmd) static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { - POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; - union ccb *ccb = ext->ccb; - bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; - int idx; - - if(logical) { - if (ccb->ccb_h.flags & CAM_DATA_PHYS) - panic("physical address unsupported"); - - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { - if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) - panic("physical address unsupported"); - - for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { - os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr); - pSg[idx].size = sgList[idx].ds_len; - pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; - } - } - else { - os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); - pSg->size = ccb->csio.dxfer_len; - pSg->eot = 1; - } - return TRUE; - } - /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; @@ -514,24 +487,28 @@ static void hpt_io_dmamap_callback(void *arg, bus_ HPT_ASSERT(pCmd->flags.physical_sg); - if (error || nsegs == 0) + if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); - for (idx = 0; idx < nsegs; idx++, psg++) { - psg->addr.bus = segs[idx].ds_addr; - psg->size = segs[idx].ds_len; - psg->eot = 0; - } - psg[-1].eot = 1; + if (nsegs != 0) { + for (idx = 0; idx < nsegs; idx++, psg++) { + psg->addr.bus = segs[idx].ds_addr; + psg->size = segs[idx].ds_len; + psg->eot = 0; + } + psg[-1].eot = 1; - if (pCmd->flags.data_in) { - bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); + if (pCmd->flags.data_in) { + bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, + BUS_DMASYNC_PREREAD); + } + else if (pCmd->flags.data_out) { + bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, + BUS_DMASYNC_PREWRITE); + } } - else if (pCmd->flags.data_out) { - bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); - } ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); ldm_queue_cmd(pCmd); @@ -660,6 +637,7 @@ static void hpt_scsi_io(PVBUS_EXT vbus_ext, union case 0x2f: case 0x8f: /* VERIFY_16 */ { + int error; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); @@ -716,42 +694,20 @@ static void hpt_scsi_io(PVBUS_EXT vbus_ext, union pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; - pCmd->psg = ext->psg; - - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { - int idx; - bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; - - if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) - pCmd->flags.physical_sg = 1; - - for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { - pCmd->psg[idx].addr.bus = sgList[idx].ds_addr; - pCmd->psg[idx].size = sgList[idx].ds_len; - pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; - } - - ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); - ldm_queue_cmd(pCmd); - } - else { - int error; - pCmd->flags.physical_sg = 1; - error = bus_dmamap_load(vbus_ext->io_dmat, - ext->dma_map, - ccb->csio.data_ptr, ccb->csio.dxfer_len, - hpt_io_dmamap_callback, pCmd, + pCmd->flags.physical_sg = 1; + error = bus_dmamap_load_ccb(vbus_ext->io_dmat, + ext->dma_map, ccb, + hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); - KdPrint(("bus_dmamap_load return %d", error)); - if (error && error!=EINPROGRESS) { - os_printk("bus_dmamap_load error %d", error); - cmdext_put(ext); - ldm_free_cmds(pCmd); - ccb->ccb_h.status = CAM_REQ_CMP_ERR; - xpt_done(ccb); - } + KdPrint(("bus_dmamap_load return %d", error)); + if (error && error!=EINPROGRESS) { + os_printk("bus_dmamap_load error %d", error); + cmdext_put(ext); + ldm_free_cmds(pCmd); + ccb->ccb_h.status = CAM_REQ_CMP_ERR; + xpt_done(ccb); } return; } Index: dev/isci/isci_io_request.c =================================================================== --- dev/isci/isci_io_request.c (.../head/sys) (revision 244874) +++ dev/isci/isci_io_request.c (.../projects/physbio/sys) (revision 244874) @@ -713,7 +713,6 @@ void isci_io_request_execute_scsi_io(union ccb *ccb, struct ISCI_CONTROLLER *controller) { - struct ccb_scsiio *csio = &ccb->csio; target_id_t target_id = ccb->ccb_h.target_id; struct ISCI_REQUEST *request; struct ISCI_IO_REQUEST *io_request; @@ -748,29 +747,21 @@ isci_io_request_execute_scsi_io(union ccb *ccb, io_request->current_sge_index = 0; io_request->parent.remote_device_handle = device->sci_object; - if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) != 0) - panic("Unexpected CAM_SCATTER_VALID flag! flags = 0x%x\n", + if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) + panic("Unexpected cam data format! flags = 0x%x\n", ccb->ccb_h.flags); - if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) - panic("Unexpected CAM_DATA_PHYS flag! flags = 0x%x\n", - ccb->ccb_h.flags); - - if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - error = bus_dmamap_load(io_request->parent.dma_tag, - io_request->parent.dma_map, csio->data_ptr, csio->dxfer_len, - isci_io_request_construct, io_request, 0x0); - - /* A resource shortage from BUSDMA will be automatically - * continued at a later point, pushing the CCB processing - * forward, which will in turn unfreeze the simq. - */ - if (error == EINPROGRESS) { - xpt_freeze_simq(controller->sim, 1); - ccb->ccb_h.flags |= CAM_RELEASE_SIMQ; - } - } else - isci_io_request_construct(io_request, NULL, 0, 0); + error = bus_dmamap_load_ccb(io_request->parent.dma_tag, + io_request->parent.dma_map, ccb, + isci_io_request_construct, io_request, 0x0); + /* A resource shortage from BUSDMA will be automatically + * continued at a later point, pushing the CCB processing + * forward, which will in turn unfreeze the simq. + */ + if (error == EINPROGRESS) { + xpt_freeze_simq(controller->sim, 1); + ccb->ccb_h.flags |= CAM_RELEASE_SIMQ; + } } void Index: dev/twa/tw_osl_freebsd.c =================================================================== --- dev/twa/tw_osl_freebsd.c (.../head/sys) (revision 244874) +++ dev/twa/tw_osl_freebsd.c (.../projects/physbio/sys) (revision 244874) @@ -1473,6 +1473,10 @@ tw_osli_map_request(struct tw_osli_req_context *re twa_map_load_data_callback, req, BUS_DMA_WAITOK); mtx_unlock_spin(sc->io_lock); + } else if (req->flags & TW_OSLI_REQ_FLAGS_CCB) { + error = bus_dmamap_load_ccb(sc->dma_tag, req->dma_map, + req->data, twa_map_load_data_callback, req, + BUS_DMA_WAITOK); } else { /* * There's only one CAM I/O thread running at a time. Index: dev/twa/tw_osl.h =================================================================== --- dev/twa/tw_osl.h (.../head/sys) (revision 244874) +++ dev/twa/tw_osl.h (.../projects/physbio/sys) (revision 244874) @@ -72,6 +72,7 @@ #define TW_OSLI_REQ_FLAGS_PASSTHRU (1<<5) /* pass through request */ #define TW_OSLI_REQ_FLAGS_SLEEPING (1<<6) /* owner sleeping on this cmd */ #define TW_OSLI_REQ_FLAGS_FAILED (1<<7) /* bus_dmamap_load() failed */ +#define TW_OSLI_REQ_FLAGS_CCB (1<<8) /* req is ccb. */ #ifdef TW_OSL_DEBUG Index: dev/twa/tw_osl_cam.c =================================================================== --- dev/twa/tw_osl_cam.c (.../head/sys) (revision 244874) +++ dev/twa/tw_osl_cam.c (.../projects/physbio/sys) (revision 244874) @@ -261,55 +261,23 @@ tw_osli_execute_scsi(struct tw_osli_req_context *r scsi_req->cdb = csio->cdb_io.cdb_bytes; scsi_req->cdb_len = csio->cdb_len; - if (!(ccb_h->flags & CAM_DATA_PHYS)) { - /* Virtual data addresses. Need to convert them... */ - tw_osli_dbg_dprintf(3, sc, - "XPT_SCSI_IO: Single virtual address!"); - if (!(ccb_h->flags & CAM_SCATTER_VALID)) { - if (csio->dxfer_len > TW_CL_MAX_IO_SIZE) { - tw_osli_printf(sc, "size = %d", - TW_CL_SEVERITY_ERROR_STRING, - TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, - 0x2106, - "I/O size too big", - csio->dxfer_len); - ccb_h->status = CAM_REQ_TOO_BIG; - ccb_h->status &= ~CAM_SIM_QUEUED; - xpt_done(ccb); - return(1); - } - - if ((req->length = csio->dxfer_len)) { - req->data = csio->data_ptr; - scsi_req->sgl_entries = 1; - } - } else { - tw_osli_printf(sc, "", - TW_CL_SEVERITY_ERROR_STRING, - TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, - 0x2107, - "XPT_SCSI_IO: Got SGList"); - ccb_h->status = CAM_REQ_INVALID; - ccb_h->status &= ~CAM_SIM_QUEUED; - xpt_done(ccb); - return(1); - } - } else { - /* Data addresses are physical. */ - tw_osli_printf(sc, "", + if (csio->dxfer_len > TW_CL_MAX_IO_SIZE) { + tw_osli_printf(sc, "size = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, - 0x2108, - "XPT_SCSI_IO: Physical data addresses"); - ccb_h->status = CAM_REQ_INVALID; + 0x2106, + "I/O size too big", + csio->dxfer_len); + ccb_h->status = CAM_REQ_TOO_BIG; ccb_h->status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return(1); } - + req->data = ccb; + req->length = csio->dxfer_len; + req->flags |= TW_OSLI_REQ_FLAGS_CCB; req->deadline = tw_osl_get_local_time() + (ccb_h->timeout / 1000); - /* * twa_map_load_data_callback will fill in the SGL, * and submit the I/O. Index: dev/hptiop/hptiop.c =================================================================== --- dev/hptiop/hptiop.c (.../head/sys) (revision 244874) +++ dev/hptiop/hptiop.c (.../projects/physbio/sys) (revision 244874) @@ -2358,6 +2358,7 @@ static void hptiop_action(struct cam_sim *sim, uni { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim); struct hpt_iop_srb * srb; + int error; switch (ccb->ccb_h.func_code) { @@ -2380,54 +2381,24 @@ static void hptiop_action(struct cam_sim *sim, uni } srb->ccb = ccb; + error = bus_dmamap_load_ccb(hba->io_dmat, + srb->dma_map, + ccb, + hptiop_post_scsi_command, + srb, + 0); - if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) - hptiop_post_scsi_command(srb, NULL, 0, 0); - else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { - if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { - int error; - - error = bus_dmamap_load(hba->io_dmat, - srb->dma_map, - ccb->csio.data_ptr, - ccb->csio.dxfer_len, - hptiop_post_scsi_command, - srb, 0); - - if (error && error != EINPROGRESS) { - device_printf(hba->pcidev, - "%d bus_dmamap_load error %d", - hba->pciunit, error); - xpt_freeze_simq(hba->sim, 1); - ccb->ccb_h.status = CAM_REQ_CMP_ERR; -invalid: - hptiop_free_srb(hba, srb); - xpt_done(ccb); - goto scsi_done; - } - } - else { - device_printf(hba->pcidev, - "CAM_DATA_PHYS not supported"); - ccb->ccb_h.status = CAM_REQ_CMP_ERR; - goto invalid; - } + if (error && error != EINPROGRESS) { + device_printf(hba->pcidev, + "%d bus_dmamap_load error %d", + hba->pciunit, error); + xpt_freeze_simq(hba->sim, 1); + ccb->ccb_h.status = CAM_REQ_CMP_ERR; + hptiop_free_srb(hba, srb); + xpt_done(ccb); + goto scsi_done; } - else { - struct bus_dma_segment *segs; - if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 || - (ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { - device_printf(hba->pcidev, "SCSI cmd failed"); - ccb->ccb_h.status=CAM_PROVIDE_FAIL; - goto invalid; - } - - segs = (struct bus_dma_segment *)ccb->csio.data_ptr; - hptiop_post_scsi_command(srb, segs, - ccb->csio.sglist_cnt, 0); - } - scsi_done: hptiop_unlock_adapter(hba); return; Index: dev/buslogic/bt.c =================================================================== --- dev/buslogic/bt.c (.../head/sys) (revision 244874) +++ dev/buslogic/bt.c (.../projects/physbio/sys) (revision 244874) @@ -1158,6 +1158,7 @@ btaction(struct cam_sim *sim, union ccb *ccb) if (ccb->ccb_h.func_code == XPT_SCSI_IO) { struct ccb_scsiio *csio; struct ccb_hdr *ccbh; + int error; csio = &ccb->csio; ccbh = &csio->ccb_h; @@ -1205,67 +1206,21 @@ btaction(struct cam_sim *sim, union ccb *ccb) * If we have any data to send with this command, * map it into bus space. */ - /* Only use S/G if there is a transfer */ - if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { - /* - * We've been given a pointer - * to a single buffer. - */ - if ((ccbh->flags & CAM_DATA_PHYS)==0) { - int error; - - error = bus_dmamap_load( - bt->buffer_dmat, - bccb->dmamap, - csio->data_ptr, - csio->dxfer_len, - btexecuteccb, - bccb, - /*flags*/0); - if (error == EINPROGRESS) { - /* - * So as to maintain - * ordering, freeze the - * controller queue - * until our mapping is - * returned. - */ - xpt_freeze_simq(bt->sim, - 1); - csio->ccb_h.status |= - CAM_RELEASE_SIMQ; - } - } else { - struct bus_dma_segment seg; - - /* Pointer to physical buffer */ - seg.ds_addr = - (bus_addr_t)csio->data_ptr; - seg.ds_len = csio->dxfer_len; - btexecuteccb(bccb, &seg, 1, 0); - } - } else { - struct bus_dma_segment *segs; - - if ((ccbh->flags & CAM_DATA_PHYS) != 0) - panic("btaction - Physical " - "segment pointers " - "unsupported"); - - if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) - panic("btaction - Virtual " - "segment addresses " - "unsupported"); - - /* Just use the segments provided */ - segs = (struct bus_dma_segment *) - csio->data_ptr; - btexecuteccb(bccb, segs, - csio->sglist_cnt, 0); - } - } else { - btexecuteccb(bccb, NULL, 0, 0); + error = bus_dmamap_load_ccb( + bt->buffer_dmat, + bccb->dmamap, + ccb, + btexecuteccb, + bccb, + /*flags*/0); + if (error == EINPROGRESS) { + /* + * So as to maintain ordering, freeze the + * controller queue until our mapping is + * returned. + */ + xpt_freeze_simq(bt->sim, 1); + csio->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { hccb->opcode = INITIATOR_BUS_DEV_RESET; Index: dev/siis/siis.c =================================================================== --- dev/siis/siis.c (.../head/sys) (revision 244874) +++ dev/siis/siis.c (.../projects/physbio/sys) (revision 244874) @@ -994,23 +994,9 @@ siis_begin_transaction(device_t dev, union ccb *cc (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) ch->aslots |= (1 << slot->slot); slot->dma.nsegs = 0; - /* If request moves data, setup and load SG list */ - if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - void *buf; - bus_size_t size; - - slot->state = SIIS_SLOT_LOADING; - if (ccb->ccb_h.func_code == XPT_ATA_IO) { - buf = ccb->ataio.data_ptr; - size = ccb->ataio.dxfer_len; - } else { - buf = ccb->csio.data_ptr; - size = ccb->csio.dxfer_len; - } - bus_dmamap_load(ch->dma.data_tag, slot->dma.data_map, - buf, size, siis_dmasetprd, slot, 0); - } else - siis_execute_transaction(slot); + slot->state = SIIS_SLOT_LOADING; + bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, + ccb, siis_dmasetprd, slot, 0); } /* Locked by busdma engine. */ @@ -1032,24 +1018,26 @@ siis_dmasetprd(void *arg, bus_dma_segment_t *segs, return; } KASSERT(nsegs <= SIIS_SG_ENTRIES, ("too many DMA segment entries\n")); - /* Get a piece of the workspace for this request */ - ctp = (struct siis_cmd *) - (ch->dma.work + SIIS_CT_OFFSET + (SIIS_CT_SIZE * slot->slot)); - /* Fill S/G table */ - if (slot->ccb->ccb_h.func_code == XPT_ATA_IO) - prd = &ctp->u.ata.prd[0]; - else - prd = &ctp->u.atapi.prd[0]; - for (i = 0; i < nsegs; i++) { - prd[i].dba = htole64(segs[i].ds_addr); - prd[i].dbc = htole32(segs[i].ds_len); - prd[i].control = 0; - } - prd[nsegs - 1].control = htole32(SIIS_PRD_TRM); slot->dma.nsegs = nsegs; - bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, - ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? - BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); + if (nsegs != 0) { + /* Get a piece of the workspace for this request */ + ctp = (struct siis_cmd *)(ch->dma.work + SIIS_CT_OFFSET + + (SIIS_CT_SIZE * slot->slot)); + /* Fill S/G table */ + if (slot->ccb->ccb_h.func_code == XPT_ATA_IO) + prd = &ctp->u.ata.prd[0]; + else + prd = &ctp->u.atapi.prd[0]; + for (i = 0; i < nsegs; i++) { + prd[i].dba = htole64(segs[i].ds_addr); + prd[i].dbc = htole32(segs[i].ds_len); + prd[i].control = 0; + } + prd[nsegs - 1].control = htole32(SIIS_PRD_TRM); + bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, + ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? + BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); + } siis_execute_transaction(slot); } Index: dev/sym/sym_hipd.c =================================================================== --- dev/sym/sym_hipd.c (.../head/sys) (revision 244874) +++ dev/sym/sym_hipd.c (.../projects/physbio/sys) (revision 244874) @@ -7877,51 +7877,15 @@ sym_setup_data_and_start(hcb_p np, struct ccb_scsi return; } - if (!(ccb_h->flags & CAM_SCATTER_VALID)) { - /* Single buffer */ - if (!(ccb_h->flags & CAM_DATA_PHYS)) { - /* Buffer is virtual */ - cp->dmamapped = (dir == CAM_DIR_IN) ? - SYM_DMA_READ : SYM_DMA_WRITE; - retv = bus_dmamap_load(np->data_dmat, cp->dmamap, - csio->data_ptr, csio->dxfer_len, - sym_execute_ccb, cp, 0); - if (retv == EINPROGRESS) { - cp->host_status = HS_WAIT; - xpt_freeze_simq(np->sim, 1); - csio->ccb_h.status |= CAM_RELEASE_SIMQ; - } - } else { - /* Buffer is physical */ - struct bus_dma_segment seg; - - seg.ds_addr = (bus_addr_t) csio->data_ptr; - sym_execute_ccb(cp, &seg, 1, 0); - } - } else { - /* Scatter/gather list */ - struct bus_dma_segment *segs; - - if ((ccb_h->flags & CAM_SG_LIST_PHYS) != 0) { - /* The SG list pointer is physical */ - sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); - goto out_abort; - } - - if (!(ccb_h->flags & CAM_DATA_PHYS)) { - /* SG buffer pointers are virtual */ - sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); - goto out_abort; - } - - /* SG buffer pointers are physical */ - segs = (struct bus_dma_segment *)csio->data_ptr; - sym_execute_ccb(cp, segs, csio->sglist_cnt, 0); + cp->dmamapped = (dir == CAM_DIR_IN) ? SYM_DMA_READ : SYM_DMA_WRITE; + retv = bus_dmamap_load_ccb(np->data_dmat, cp->dmamap, + (union ccb *)csio, sym_execute_ccb, cp, 0); + if (retv == EINPROGRESS) { + cp->host_status = HS_WAIT; + xpt_freeze_simq(np->sim, 1); + csio->ccb_h.status |= CAM_RELEASE_SIMQ; } return; -out_abort: - sym_xpt_done(np, (union ccb *) csio, cp); - sym_free_ccb(np, cp); } /* Index: dev/mfi/mfivar.h =================================================================== --- dev/mfi/mfivar.h (.../head/sys) (revision 244874) +++ dev/mfi/mfivar.h (.../projects/physbio/sys) (revision 244874) @@ -107,6 +107,7 @@ struct mfi_command { #define MFI_ON_MFIQ_BUSY (1<<7) #define MFI_ON_MFIQ_MASK ((1<<5)|(1<<6)|(1<<7)) #define MFI_CMD_SCSI (1<<8) +#define MFI_CMD_CCB (1<<9) uint8_t retry_for_fw_reset; void (* cm_complete)(struct mfi_command *cm); void *cm_private; Index: dev/mfi/mfi.c =================================================================== --- dev/mfi/mfi.c (.../head/sys) (revision 244874) +++ dev/mfi/mfi.c (.../projects/physbio/sys) (revision 244874) @@ -2267,8 +2267,14 @@ mfi_mapcmd(struct mfi_softc *sc, struct mfi_comman if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) { polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0; - error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap, - cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled); + if (cm->cm_flags & MFI_CMD_CCB) + error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat, + cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm, + polled); + else + error = bus_dmamap_load(sc->mfi_buffer_dmat, + cm->cm_dmamap, cm->cm_data, cm->cm_len, + mfi_data_cb, cm, polled); if (error == EINPROGRESS) { sc->mfi_flags |= MFI_FLAGS_QFRZN; return (0); Index: dev/mfi/mfi_cam.c =================================================================== --- dev/mfi/mfi_cam.c (.../head/sys) (revision 244874) +++ dev/mfi/mfi_cam.c (.../projects/physbio/sys) (revision 244874) @@ -264,17 +264,6 @@ mfip_cam_action(struct cam_sim *sim, union ccb *cc ccbh->status = CAM_REQ_INVALID; break; } - if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if (ccbh->flags & CAM_DATA_PHYS) { - ccbh->status = CAM_REQ_INVALID; - break; - } - if (ccbh->flags & CAM_SCATTER_VALID) { - ccbh->status = CAM_REQ_INVALID; - break; - } - } - ccbh->ccb_mfip_ptr = sc; TAILQ_INSERT_TAIL(&mfisc->mfi_cam_ccbq, ccbh, sim_links.tqe); mfi_startio(mfisc); @@ -379,14 +368,14 @@ mfip_start(void *data) cm->cm_private = ccb; cm->cm_sg = &pt->sgl; cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; - cm->cm_data = csio->data_ptr; + cm->cm_data = ccb; cm->cm_len = csio->dxfer_len; switch (ccbh->flags & CAM_DIR_MASK) { case CAM_DIR_IN: - cm->cm_flags = MFI_CMD_DATAIN; + cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_CCB; break; case CAM_DIR_OUT: - cm->cm_flags = MFI_CMD_DATAOUT; + cm->cm_flags = MFI_CMD_DATAOUT | MFI_CMD_CCB; break; case CAM_DIR_NONE: default: Index: dev/firewire/sbp.c =================================================================== --- dev/firewire/sbp.c (.../head/sys) (revision 244874) +++ dev/firewire/sbp.c (.../projects/physbio/sys) (revision 244874) @@ -2478,11 +2478,6 @@ END_DEBUG ocb->orb[4] |= htonl(ORB_CMD_IN); } - if (csio->ccb_h.flags & CAM_SCATTER_VALID) - printf("sbp: CAM_SCATTER_VALID\n"); - if (csio->ccb_h.flags & CAM_DATA_PHYS) - printf("sbp: CAM_DATA_PHYS\n"); - if (csio->ccb_h.flags & CAM_CDB_POINTER) cdb = (void *)csio->cdb_io.cdb_ptr; else @@ -2496,10 +2491,9 @@ printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb int s, error; s = splsoftvm(); - error = bus_dmamap_load(/*dma tag*/sbp->dmat, + error = bus_dmamap_load_ccb(/*dma tag*/sbp->dmat, /*dma map*/ocb->dmamap, - ccb->csio.data_ptr, - ccb->csio.dxfer_len, + ccb, sbp_execute_ocb, ocb, /*flags*/0); Index: dev/tws/tws_cam.c =================================================================== --- dev/tws/tws_cam.c (.../head/sys) (revision 244874) +++ dev/tws/tws_cam.c (.../projects/physbio/sys) (revision 244874) @@ -739,39 +739,8 @@ tws_execute_scsi(struct tws_softc *sc, union ccb * else bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); - if (!(ccb_h->flags & CAM_DATA_PHYS)) { - /* Virtual data addresses. Need to convert them... */ - if (!(ccb_h->flags & CAM_SCATTER_VALID)) { - if (csio->dxfer_len > TWS_MAX_IO_SIZE) { - TWS_TRACE(sc, "I/O is big", csio->dxfer_len, 0); - tws_release_request(req); - ccb_h->status = CAM_REQ_TOO_BIG; - xpt_done(ccb); - return(0); - } - - req->length = csio->dxfer_len; - if (req->length) { - req->data = csio->data_ptr; - /* there is 1 sgl_entrie */ - /* cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= 1; */ - } - } else { - TWS_TRACE_DEBUG(sc, "got sglist", ccb_h->target_id, ccb_h->target_lun); - tws_release_request(req); - ccb_h->status = CAM_REQ_INVALID; - xpt_done(ccb); - return(0); - } - } else { - /* Data addresses are physical. */ - TWS_TRACE_DEBUG(sc, "Phy data addr", ccb_h->target_id, ccb_h->target_lun); - tws_release_request(req); - ccb_h->status = CAM_REQ_INVALID; - ccb_h->status &= ~CAM_SIM_QUEUED; - xpt_done(ccb); - return(0); - } + req->data = ccb; + req->flags |= TWS_DATA_CCB; /* save ccb ptr */ req->ccb_ptr = ccb; /* @@ -961,10 +930,16 @@ tws_map_request(struct tws_softc *sc, struct tws_r * Map the data buffer into bus space and build the SG list. */ mtx_lock(&sc->io_lock); - error = bus_dmamap_load(sc->data_tag, req->dma_map, - req->data, req->length, - tws_dmamap_data_load_cbfn, req, - my_flags); + if (req->flags & TWS_DATA_CCB) + error = bus_dmamap_load_ccb(sc->data_tag, req->dma_map, + req->data, + tws_dmamap_data_load_cbfn, req, + my_flags); + else + error = bus_dmamap_load(sc->data_tag, req->dma_map, + req->data, req->length, + tws_dmamap_data_load_cbfn, req, + my_flags); mtx_unlock(&sc->io_lock); if (error == EINPROGRESS) { Index: dev/tws/tws.h =================================================================== --- dev/tws/tws.h (.../head/sys) (revision 244874) +++ dev/tws/tws.h (.../projects/physbio/sys) (revision 244874) @@ -137,6 +137,7 @@ enum tws_req_flags { TWS_DIR_IN = 0x2, TWS_DIR_OUT = 0x4, TWS_DIR_NONE = 0x8, + TWS_DATA_CCB = 0x16, }; enum tws_intrs { Index: dev/dpt/dpt_scsi.c =================================================================== --- dev/dpt/dpt_scsi.c (.../head/sys) (revision 244874) +++ dev/dpt/dpt_scsi.c (.../projects/physbio/sys) (revision 244874) @@ -910,56 +910,22 @@ dpt_action(struct cam_sim *sim, union ccb *ccb) */ /* Only use S/G if there is a transfer */ if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { + int error; + + error = bus_dmamap_load_ccb(dpt->buffer_dmat, + dccb->dmamap, + ccb, + dptexecuteccb, + dccb, /*flags*/0); + if (error == EINPROGRESS) { /* - * We've been given a pointer - * to a single buffer. + * So as to maintain ordering, + * freeze the controller queue + * until our mapping is + * returned. */ - if ((ccbh->flags & CAM_DATA_PHYS) == 0) { - int error; - - error = - bus_dmamap_load(dpt->buffer_dmat, - dccb->dmamap, - csio->data_ptr, - csio->dxfer_len, - dptexecuteccb, - dccb, /*flags*/0); - if (error == EINPROGRESS) { - /* - * So as to maintain ordering, - * freeze the controller queue - * until our mapping is - * returned. - */ - xpt_freeze_simq(sim, 1); - dccb->state |= CAM_RELEASE_SIMQ; - } - } else { - struct bus_dma_segment seg; - - /* Pointer to physical buffer */ - seg.ds_addr = - (bus_addr_t)csio->data_ptr; - seg.ds_len = csio->dxfer_len; - dptexecuteccb(dccb, &seg, 1, 0); - } - } else { - struct bus_dma_segment *segs; - - if ((ccbh->flags & CAM_DATA_PHYS) != 0) - panic("dpt_action - Physical " - "segment pointers " - "unsupported"); - - if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) - panic("dpt_action - Virtual " - "segment addresses " - "unsupported"); - - /* Just use the segments provided */ - segs = (struct bus_dma_segment *)csio->data_ptr; - dptexecuteccb(dccb, segs, csio->sglist_cnt, 0); + xpt_freeze_simq(sim, 1); + dccb->state |= CAM_RELEASE_SIMQ; } } else { /* Index: dev/aic7xxx/aic7xxx_osm.c =================================================================== --- dev/aic7xxx/aic7xxx_osm.c (.../head/sys) (revision 244874) +++ dev/aic7xxx/aic7xxx_osm.c (.../projects/physbio/sys) (revision 244874) @@ -1138,6 +1138,7 @@ ahc_setup_data(struct ahc_softc *ahc, struct cam_s { struct hardware_scb *hscb; struct ccb_hdr *ccb_h; + int error; hscb = scb->hscb; ccb_h = &csio->ccb_h; @@ -1179,64 +1180,21 @@ ahc_setup_data(struct ahc_softc *ahc, struct cam_s } } - /* Only use S/G if there is a transfer */ - if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { - /* We've been given a pointer to a single buffer */ - if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { - int s; - int error; - - s = splsoftvm(); - error = bus_dmamap_load(ahc->buffer_dmat, - scb->dmamap, - csio->data_ptr, - csio->dxfer_len, - ahc_execute_scb, - scb, /*flags*/0); - if (error == EINPROGRESS) { - /* - * So as to maintain ordering, - * freeze the controller queue - * until our mapping is - * returned. - */ - xpt_freeze_simq(sim, - /*count*/1); - scb->io_ctx->ccb_h.status |= - CAM_RELEASE_SIMQ; - } - splx(s); - } else { - struct bus_dma_segment seg; - - /* Pointer to physical buffer */ - if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE) - panic("ahc_setup_data - Transfer size " - "larger than can device max"); - - seg.ds_addr = - (bus_addr_t)(vm_offset_t)csio->data_ptr; - seg.ds_len = csio->dxfer_len; - ahc_execute_scb(scb, &seg, 1, 0); - } - } else { - struct bus_dma_segment *segs; - - if ((ccb_h->flags & CAM_DATA_PHYS) != 0) - panic("ahc_setup_data - Physical segment " - "pointers unsupported"); - - if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) - panic("ahc_setup_data - Virtual segment " - "addresses unsupported"); - - /* Just use the segments provided */ - segs = (struct bus_dma_segment *)csio->data_ptr; - ahc_execute_scb(scb, segs, csio->sglist_cnt, 0); - } - } else { - ahc_execute_scb(scb, NULL, 0, 0); + error = bus_dmamap_load_ccb(ahc->buffer_dmat, + scb->dmamap, + (union ccb *)csio, + ahc_execute_scb, + scb, + 0); + if (error == EINPROGRESS) { + /* + * So as to maintain ordering, + * freeze the controller queue + * until our mapping is + * returned. + */ + xpt_freeze_simq(sim, /*count*/1); + scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; } } Index: dev/aic7xxx/aic79xx_osm.c =================================================================== --- dev/aic7xxx/aic79xx_osm.c (.../head/sys) (revision 244874) +++ dev/aic7xxx/aic79xx_osm.c (.../projects/physbio/sys) (revision 244874) @@ -1071,6 +1071,7 @@ ahd_setup_data(struct ahd_softc *ahd, struct cam_s { struct hardware_scb *hscb; struct ccb_hdr *ccb_h; + int error; hscb = scb->hscb; ccb_h = &csio->ccb_h; @@ -1120,64 +1121,18 @@ ahd_setup_data(struct ahd_softc *ahd, struct cam_s } } - /* Only use S/G if there is a transfer */ - if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { - /* We've been given a pointer to a single buffer */ - if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { - int s; - int error; - - s = splsoftvm(); - error = bus_dmamap_load(ahd->buffer_dmat, - scb->dmamap, - csio->data_ptr, - csio->dxfer_len, - ahd_execute_scb, - scb, /*flags*/0); - if (error == EINPROGRESS) { - /* - * So as to maintain ordering, - * freeze the controller queue - * until our mapping is - * returned. - */ - xpt_freeze_simq(sim, - /*count*/1); - scb->io_ctx->ccb_h.status |= - CAM_RELEASE_SIMQ; - } - splx(s); - } else { - struct bus_dma_segment seg; - - /* Pointer to physical buffer */ - if (csio->dxfer_len > AHD_MAXTRANSFER_SIZE) - panic("ahd_setup_data - Transfer size " - "larger than can device max"); - - seg.ds_addr = - (bus_addr_t)(vm_offset_t)csio->data_ptr; - seg.ds_len = csio->dxfer_len; - ahd_execute_scb(scb, &seg, 1, 0); - } - } else { - struct bus_dma_segment *segs; - - if ((ccb_h->flags & CAM_DATA_PHYS) != 0) - panic("ahd_setup_data - Physical segment " - "pointers unsupported"); - - if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) - panic("ahd_setup_data - Virtual segment " - "addresses unsupported"); - - /* Just use the segments provided */ - segs = (struct bus_dma_segment *)csio->data_ptr; - ahd_execute_scb(scb, segs, csio->sglist_cnt, 0); - } - } else { - ahd_execute_scb(scb, NULL, 0, 0); + error = bus_dmamap_load_ccb(ahd->buffer_dmat, + scb->dmamap, + (union ccb *)csio, + ahd_execute_scb, + scb, /*flags*/0); + if (error == EINPROGRESS) { + /* + * So as to maintain ordering, freeze the controller queue + * until our mapping is returned. + */ + xpt_freeze_simq(sim, /*count*/1); + scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; } } Index: dev/ahci/ahci.c =================================================================== --- dev/ahci/ahci.c (.../head/sys) (revision 244874) +++ dev/ahci/ahci.c (.../projects/physbio/sys) (revision 244874) @@ -1672,21 +1672,10 @@ ahci_begin_transaction(device_t dev, union ccb *cc (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) ch->aslots |= (1 << slot->slot); slot->dma.nsegs = 0; - /* If request moves data, setup and load SG list */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - void *buf; - bus_size_t size; - slot->state = AHCI_SLOT_LOADING; - if (ccb->ccb_h.func_code == XPT_ATA_IO) { - buf = ccb->ataio.data_ptr; - size = ccb->ataio.dxfer_len; - } else { - buf = ccb->csio.data_ptr; - size = ccb->csio.dxfer_len; - } - bus_dmamap_load(ch->dma.data_tag, slot->dma.data_map, - buf, size, ahci_dmasetprd, slot, 0); + bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, + ahci_dmasetprd, slot, 0); } else ahci_execute_transaction(slot); } Index: dev/mvs/mvs.c =================================================================== --- dev/mvs/mvs.c (.../head/sys) (revision 244874) +++ dev/mvs/mvs.c (.../projects/physbio/sys) (revision 244874) @@ -1260,19 +1260,9 @@ mvs_begin_transaction(device_t dev, union ccb *ccb mvs_set_edma_mode(dev, MVS_EDMA_OFF); } if (ch->numpslots == 0 || ch->basic_dma) { - void *buf; - bus_size_t size; - slot->state = MVS_SLOT_LOADING; - if (ccb->ccb_h.func_code == XPT_ATA_IO) { - buf = ccb->ataio.data_ptr; - size = ccb->ataio.dxfer_len; - } else { - buf = ccb->csio.data_ptr; - size = ccb->csio.dxfer_len; - } - bus_dmamap_load(ch->dma.data_tag, slot->dma.data_map, - buf, size, mvs_dmasetprd, slot, 0); + bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, + ccb, mvs_dmasetprd, slot, 0); } else mvs_legacy_execute_transaction(slot); } Index: dev/aha/aha.c =================================================================== --- dev/aha/aha.c (.../head/sys) (revision 244874) +++ dev/aha/aha.c (.../projects/physbio/sys) (revision 244874) @@ -778,6 +778,7 @@ ahaaction(struct cam_sim *sim, union ccb *ccb) if (ccb->ccb_h.func_code == XPT_SCSI_IO) { struct ccb_scsiio *csio; struct ccb_hdr *ccbh; + int error; csio = &ccb->csio; ccbh = &csio->ccb_h; @@ -811,67 +812,22 @@ ahaaction(struct cam_sim *sim, union ccb *ccb) * If we have any data to send with this command, * map it into bus space. */ - /* Only use S/G if there is a transfer */ - if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { - /* - * We've been given a pointer - * to a single buffer. - */ - if ((ccbh->flags & CAM_DATA_PHYS)==0) { - int error; - error = bus_dmamap_load( - aha->buffer_dmat, - accb->dmamap, - csio->data_ptr, - csio->dxfer_len, - ahaexecuteccb, - accb, - /*flags*/0); - if (error == EINPROGRESS) { - /* - * So as to maintain - * ordering, freeze the - * controller queue - * until our mapping is - * returned. - */ - xpt_freeze_simq(aha->sim, - 1); - csio->ccb_h.status |= - CAM_RELEASE_SIMQ; - } - } else { - struct bus_dma_segment seg; - - /* Pointer to physical buffer */ - seg.ds_addr = - (bus_addr_t)csio->data_ptr; - seg.ds_len = csio->dxfer_len; - ahaexecuteccb(accb, &seg, 1, 0); - } - } else { - struct bus_dma_segment *segs; - - if ((ccbh->flags & CAM_DATA_PHYS) != 0) - panic("ahaaction - Physical " - "segment pointers " - "unsupported"); - - if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) - panic("ahaaction - Virtual " - "segment addresses " - "unsupported"); - - /* Just use the segments provided */ - segs = (struct bus_dma_segment *) - csio->data_ptr; - ahaexecuteccb(accb, segs, - csio->sglist_cnt, 0); - } - } else { - ahaexecuteccb(accb, NULL, 0, 0); + error = bus_dmamap_load_ccb( + aha->buffer_dmat, + accb->dmamap, + ccb, + ahaexecuteccb, + accb, + /*flags*/0); + if (error == EINPROGRESS) { + /* + * So as to maintain ordering, freeze the + * controller queue until our mapping is + * returned. + */ + xpt_freeze_simq(aha->sim, 1); + csio->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { hccb->opcode = INITIATOR_BUS_DEV_RESET; Index: dev/ahb/ahb.c =================================================================== --- dev/ahb/ahb.c (.../head/sys) (revision 244874) +++ dev/ahb/ahb.c (.../projects/physbio/sys) (revision 244874) @@ -1006,6 +1006,7 @@ ahbaction(struct cam_sim *sim, union ccb *ccb) { struct ecb *ecb; struct hardware_ecb *hecb; + int error; /* * get an ecb to use. @@ -1056,65 +1057,19 @@ ahbaction(struct cam_sim *sim, union ccb *ccb) hecb->cdb, hecb->cdb_len); } - /* - * If we have any data to send with this command, - * map it into bus space. - */ - if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { - /* - * We've been given a pointer - * to a single buffer. - */ - if ((ccb->ccb_h.flags & CAM_DATA_PHYS)==0) { - int error; - - error = bus_dmamap_load( - ahb->buffer_dmat, - ecb->dmamap, - ccb->csio.data_ptr, - ccb->csio.dxfer_len, - ahbexecuteecb, - ecb, /*flags*/0); - if (error == EINPROGRESS) { - /* - * So as to maintain ordering, - * freeze the controller queue - * until our mapping is - * returned. - */ - xpt_freeze_simq(ahb->sim, 1); - ccb->ccb_h.status |= - CAM_RELEASE_SIMQ; - } - } else { - struct bus_dma_segment seg; - - /* Pointer to physical buffer */ - seg.ds_addr = - (bus_addr_t)ccb->csio.data_ptr; - seg.ds_len = ccb->csio.dxfer_len; - ahbexecuteecb(ecb, &seg, 1, 0); - } - } else { - struct bus_dma_segment *segs; - - if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) - panic("ahbaction - Physical segment " - "pointers unsupported"); - - if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) - panic("btaction - Virtual segment " - "addresses unsupported"); - - /* Just use the segments provided */ - segs = (struct bus_dma_segment *) - ccb->csio.data_ptr; - ahbexecuteecb(ecb, segs, ccb->csio.sglist_cnt, - 0); - } - } else { - ahbexecuteecb(ecb, NULL, 0, 0); + error = bus_dmamap_load_ccb( + ahb->buffer_dmat, + ecb->dmamap, + ccb, + ahbexecuteecb, + ecb, /*flags*/0); + if (error == EINPROGRESS) { + /* + * So as to maintain ordering, freeze the controller + * queue until our mapping is returned. + */ + xpt_freeze_simq(ahb->sim, 1); + ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } break; } Index: dev/hptrr/hptrr_osm_bsd.c =================================================================== --- dev/hptrr/hptrr_osm_bsd.c (.../head/sys) (revision 244874) +++ dev/hptrr/hptrr_osm_bsd.c (.../projects/physbio/sys) (revision 244874) @@ -481,33 +481,7 @@ static void os_cmddone(PCOMMAND pCmd) static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { - POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; - union ccb *ccb = ext->ccb; - bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; - int idx; - if(logical) { - if (ccb->ccb_h.flags & CAM_DATA_PHYS) - panic("physical address unsupported"); - - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { - if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) - panic("physical address unsupported"); - - for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { - os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr); - pSg[idx].size = sgList[idx].ds_len; - pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; - } - } - else { - os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); - pSg->size = ccb->csio.dxfer_len; - pSg->eot = 1; - } - return TRUE; - } - /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; @@ -522,25 +496,28 @@ static void hpt_io_dmamap_callback(void *arg, bus_ HPT_ASSERT(pCmd->flags.physical_sg); - if (error || nsegs == 0) + if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); - for (idx = 0; idx < nsegs; idx++, psg++) { - psg->addr.bus = segs[idx].ds_addr; - psg->size = segs[idx].ds_len; - psg->eot = 0; + if (nsegs != 0) { + for (idx = 0; idx < nsegs; idx++, psg++) { + psg->addr.bus = segs[idx].ds_addr; + psg->size = segs[idx].ds_len; + psg->eot = 0; + } + psg[-1].eot = 1; + + if (pCmd->flags.data_in) { + bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, + BUS_DMASYNC_PREREAD); + } + else if (pCmd->flags.data_out) { + bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, + BUS_DMASYNC_PREWRITE); + } } - psg[-1].eot = 1; - - if (pCmd->flags.data_in) { - bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); - } - else if (pCmd->flags.data_out) { - bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); - } - ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); ldm_queue_cmd(pCmd); } @@ -667,6 +644,8 @@ static void hpt_scsi_io(PVBUS_EXT vbus_ext, union case 0x13: case 0x2f: { + int error; + pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); @@ -722,42 +701,21 @@ static void hpt_scsi_io(PVBUS_EXT vbus_ext, union pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; - pCmd->psg = ext->psg; - - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { - int idx; - bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; - - if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) - pCmd->flags.physical_sg = 1; - - for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { - pCmd->psg[idx].addr.bus = sgList[idx].ds_addr; - pCmd->psg[idx].size = sgList[idx].ds_len; - pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; - } - - ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); - ldm_queue_cmd(pCmd); - } - else { - int error; - pCmd->flags.physical_sg = 1; - error = bus_dmamap_load(vbus_ext->io_dmat, - ext->dma_map, - ccb->csio.data_ptr, ccb->csio.dxfer_len, - hpt_io_dmamap_callback, pCmd, - BUS_DMA_WAITOK + pCmd->flags.physical_sg = 1; + error = bus_dmamap_load_ccb(vbus_ext->io_dmat, + ext->dma_map, + ccb, + hpt_io_dmamap_callback, pCmd, + BUS_DMA_WAITOK ); - KdPrint(("bus_dmamap_load return %d", error)); - if (error && error!=EINPROGRESS) { - os_printk("bus_dmamap_load error %d", error); - cmdext_put(ext); - ldm_free_cmds(pCmd); - ccb->ccb_h.status = CAM_REQ_CMP_ERR; - xpt_done(ccb); - } + KdPrint(("bus_dmamap_load return %d", error)); + if (error && error!=EINPROGRESS) { + os_printk("bus_dmamap_load error %d", error); + cmdext_put(ext); + ldm_free_cmds(pCmd); + ccb->ccb_h.status = CAM_REQ_CMP_ERR; + xpt_done(ccb); } return; } Index: dev/ciss/ciss.c =================================================================== --- dev/ciss/ciss.c (.../head/sys) (revision 244874) +++ dev/ciss/ciss.c (.../projects/physbio/sys) (revision 244874) @@ -2676,9 +2676,14 @@ ciss_map_request(struct ciss_request *cr) BUS_DMASYNC_PREWRITE); if (cr->cr_data != NULL) { - error = bus_dmamap_load(sc->ciss_buffer_dmat, cr->cr_datamap, - cr->cr_data, cr->cr_length, - ciss_request_map_helper, cr, 0); + if (cr->cr_flags & CISS_REQ_CCB) + error = bus_dmamap_load_ccb(sc->ciss_buffer_dmat, + cr->cr_datamap, cr->cr_data, + ciss_request_map_helper, cr, 0); + else + error = bus_dmamap_load(sc->ciss_buffer_dmat, cr->cr_datamap, + cr->cr_data, cr->cr_length, + ciss_request_map_helper, cr, 0); if (error != 0) return (error); } else { @@ -3044,18 +3049,6 @@ ciss_cam_action_io(struct cam_sim *sim, struct ccb csio->ccb_h.status = CAM_REQ_CMP_ERR; } - /* if there is data transfer, it must be to/from a virtual address */ - if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if (csio->ccb_h.flags & CAM_DATA_PHYS) { /* we can't map it */ - debug(3, " data pointer is to physical address"); - csio->ccb_h.status = CAM_REQ_CMP_ERR; - } - if (csio->ccb_h.flags & CAM_SCATTER_VALID) { /* we want to do the s/g setup */ - debug(3, " data has premature s/g setup"); - csio->ccb_h.status = CAM_REQ_CMP_ERR; - } - } - /* abandon aborted ccbs or those that have failed validation */ if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { debug(3, "abandoning CCB due to abort/validation failure"); @@ -3082,7 +3075,7 @@ ciss_cam_action_io(struct cam_sim *sim, struct ccb * Build the command. */ cc = cr->cr_cc; - cr->cr_data = csio->data_ptr; + cr->cr_data = csio; cr->cr_length = csio->dxfer_len; cr->cr_complete = ciss_cam_complete; cr->cr_private = csio; @@ -3100,12 +3093,13 @@ ciss_cam_action_io(struct cam_sim *sim, struct ccb cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; /* XXX ordered tags? */ if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { - cr->cr_flags = CISS_REQ_DATAOUT; + cr->cr_flags = CISS_REQ_DATAOUT | CISS_REQ_CCB; cc->cdb.direction = CISS_CDB_DIRECTION_WRITE; } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - cr->cr_flags = CISS_REQ_DATAIN; + cr->cr_flags = CISS_REQ_DATAIN | CISS_REQ_CCB; cc->cdb.direction = CISS_CDB_DIRECTION_READ; } else { + cr->cr_data = NULL; cr->cr_flags = 0; cc->cdb.direction = CISS_CDB_DIRECTION_NONE; } Index: dev/ciss/cissvar.h =================================================================== --- dev/ciss/cissvar.h (.../head/sys) (revision 244874) +++ dev/ciss/cissvar.h (.../projects/physbio/sys) (revision 244874) @@ -113,6 +113,7 @@ struct ciss_request #define CISS_REQ_DATAOUT (1<<3) /* data host->adapter */ #define CISS_REQ_DATAIN (1<<4) /* data adapter->host */ #define CISS_REQ_BUSY (1<<5) /* controller has req */ +#define CISS_REQ_CCB (1<<6) /* data is ccb */ void (* cr_complete)(struct ciss_request *); void *cr_private; Index: dev/aac/aac_cam.c =================================================================== --- dev/aac/aac_cam.c (.../head/sys) (revision 244874) +++ dev/aac/aac_cam.c (.../projects/physbio/sys) (revision 244874) @@ -448,26 +448,28 @@ aac_cam_action(struct cam_sim *sim, union ccb *ccb /* Map the s/g list. XXX 32bit addresses only! */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { + switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { + case CAM_DATA_VADDR: srb->data_len = csio->dxfer_len; - if (ccb->ccb_h.flags & CAM_DATA_PHYS) { - /* Send a 32bit command */ - fib->Header.Command = ScsiPortCommand; - srb->sg_map.SgCount = 1; - srb->sg_map.SgEntry[0].SgAddress = - (uint32_t)(uintptr_t)csio->data_ptr; - srb->sg_map.SgEntry[0].SgByteCount = - csio->dxfer_len; - } else { - /* - * Arrange things so that the S/G - * map will get set up automagically - */ - cm->cm_data = (void *)csio->data_ptr; - cm->cm_datalen = csio->dxfer_len; - cm->cm_sgtable = &srb->sg_map; - } - } else { + /* + * Arrange things so that the S/G + * map will get set up automagically + */ + cm->cm_data = (void *)csio->data_ptr; + cm->cm_datalen = csio->dxfer_len; + cm->cm_sgtable = &srb->sg_map; + break; + case CAM_DATA_PADDR: + /* Send a 32bit command */ + fib->Header.Command = ScsiPortCommand; + srb->sg_map.SgCount = 1; + srb->sg_map.SgEntry[0].SgAddress = + (uint32_t)(uintptr_t)csio->data_ptr; + srb->sg_map.SgEntry[0].SgByteCount = + csio->dxfer_len; + srb->data_len = csio->dxfer_len; + break; + default: /* XXX Need to handle multiple s/g elements */ panic("aac_cam: multiple s/g elements"); } Index: dev/aic/aic.c =================================================================== --- dev/aic/aic.c (.../head/sys) (revision 244874) +++ dev/aic/aic.c (.../projects/physbio/sys) (revision 244874) @@ -146,8 +146,8 @@ aic_action(struct cam_sim *sim, union ccb *ccb) scb->cmd_ptr = ccb->csio.cdb_io.cdb_bytes; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) || - (ccb->ccb_h.flags & CAM_DATA_PHYS)) { + if ((ccb->ccb_h.flags & CAM_DATA_MASK) != + CAM_DATA_VADDR) { ccb->ccb_h.status = CAM_REQ_INVALID; aic_free_scb(aic, scb); xpt_done(ccb); Index: dev/wds/wd7000.c =================================================================== --- dev/wds/wd7000.c (.../head/sys) (revision 244874) +++ dev/wds/wd7000.c (.../projects/physbio/sys) (revision 244874) @@ -1066,7 +1066,7 @@ wds_scsi_io(struct cam_sim * sim, struct ccb_scsii xpt_done((union ccb *) csio); return; } - if (ccb_h->flags & (CAM_CDB_PHYS | CAM_SCATTER_VALID | CAM_DATA_PHYS)) { + if ((ccb_h->flags & CAM_DATA_MASK) != CAM_DATA_VADDR) { /* don't support these */ ccb_h->status = CAM_REQ_INVALID; xpt_done((union ccb *) csio); Index: dev/mps/mpsvar.h =================================================================== --- dev/mps/mpsvar.h (.../head/sys) (revision 244874) +++ dev/mps/mpsvar.h (.../projects/physbio/sys) (revision 244874) @@ -231,6 +231,7 @@ struct mps_command { #define MPS_CM_FLAGS_SMP_PASS (1 << 8) #define MPS_CM_FLAGS_CHAIN_FAILED (1 << 9) #define MPS_CM_FLAGS_ERROR_MASK MPS_CM_FLAGS_CHAIN_FAILED +#define MPS_CM_FLAGS_USE_CCB (1 << 10) u_int cm_state; #define MPS_CM_STATE_FREE 0 #define MPS_CM_STATE_BUSY 1 Index: dev/mps/mps.c =================================================================== --- dev/mps/mps.c (.../head/sys) (revision 244874) +++ dev/mps/mps.c (.../projects/physbio/sys) (revision 244874) @@ -2278,6 +2278,9 @@ mps_map_command(struct mps_softc *sc, struct mps_c if (cm->cm_flags & MPS_CM_FLAGS_USE_UIO) { error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, &cm->cm_uio, mps_data_cb2, cm, 0); + } else if (cm->cm_flags & MPS_CM_FLAGS_USE_CCB) { + error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, + cm->cm_data, mps_data_cb, cm, 0); } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, cm->cm_data, cm->cm_length, mps_data_cb, cm, 0); Index: dev/mps/mps_sas.c =================================================================== --- dev/mps/mps_sas.c (.../head/sys) (revision 244874) +++ dev/mps/mps_sas.c (.../projects/physbio/sys) (revision 244874) @@ -1755,8 +1755,13 @@ mpssas_action_scsiio(struct mpssas_softc *sassc, u } } - cm->cm_data = csio->data_ptr; cm->cm_length = csio->dxfer_len; + if (cm->cm_length != 0) { + cm->cm_data = ccb; + cm->cm_flags |= MPS_CM_FLAGS_USE_CCB; + } else { + cm->cm_data = NULL; + } cm->cm_sge = &req->SGL; cm->cm_sglsize = (32 - 24) * 4; cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; @@ -2691,19 +2696,15 @@ mpssas_send_smpcmd(struct mpssas_softc *sassc, uni /* * XXX We don't yet support physical addresses here. */ - if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) { + switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { + case CAM_DATA_PADDR: + case CAM_DATA_SG_PADDR: mps_printf(sc, "%s: physical addresses not supported\n", __func__); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; - } - - /* - * If the user wants to send an S/G list, check to make sure they - * have single buffers. - */ - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { + case CAM_DATA_SG: /* * The chip does not support more than one buffer for the * request or response. @@ -2741,9 +2742,15 @@ mpssas_send_smpcmd(struct mpssas_softc *sassc, uni response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; } else response = ccb->smpio.smp_response; - } else { + break; + case CAM_DATA_VADDR: request = ccb->smpio.smp_request; response = ccb->smpio.smp_response; + break; + default: + ccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(ccb); + return; } cm = mps_alloc_command(sc); Index: dev/mpt/mpt_cam.c =================================================================== --- dev/mpt/mpt_cam.c (.../head/sys) (revision 244874) +++ dev/mpt/mpt_cam.c (.../projects/physbio/sys) (revision 244874) @@ -1382,7 +1382,7 @@ bad: } } - if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { + if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { @@ -1623,7 +1623,7 @@ out: mpt_prt(mpt, "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", ccb->ccb_h.status & CAM_STATUS_MASK); - if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { + if (nseg) { bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; @@ -1785,7 +1785,7 @@ bad: } } - if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { + if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if (istgt) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { @@ -2010,7 +2010,7 @@ out: mpt_prt(mpt, "mpt_execute_req: I/O cancelled (status 0x%x)\n", ccb->ccb_h.status & CAM_STATUS_MASK); - if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { + if (nseg) { bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; @@ -2062,6 +2062,7 @@ mpt_start(struct cam_sim *sim, union ccb *ccb) bus_dmamap_callback_t *cb; target_id_t tgt; int raid_passthru; + int error; /* Get the pointer for the physical addapter */ mpt = ccb->ccb_h.ccb_mpt_ptr; @@ -2206,64 +2207,15 @@ mpt_start(struct cam_sim *sim, union ccb *ccb) ccb->ccb_h.target_lun, req, req->serno); } - /* - * If we have any data to send with this command map it into bus space. - */ - if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { - /* - * We've been given a pointer to a single buffer. - */ - if ((ccbh->flags & CAM_DATA_PHYS) == 0) { - /* - * Virtual address that needs to translated into - * one or more physical address ranges. - */ - int error; - int s = splsoftvm(); - error = bus_dmamap_load(mpt->buffer_dmat, - req->dmap, csio->data_ptr, csio->dxfer_len, - cb, req, 0); - splx(s); - if (error == EINPROGRESS) { - /* - * So as to maintain ordering, - * freeze the controller queue - * until our mapping is - * returned. - */ - xpt_freeze_simq(mpt->sim, 1); - ccbh->status |= CAM_RELEASE_SIMQ; - } - } else { - /* - * We have been given a pointer to single - * physical buffer. - */ - struct bus_dma_segment seg; - seg.ds_addr = - (bus_addr_t)(vm_offset_t)csio->data_ptr; - seg.ds_len = csio->dxfer_len; - (*cb)(req, &seg, 1, 0); - } - } else { - /* - * We have been given a list of addresses. - * This case could be easily supported but they are not - * currently generated by the CAM subsystem so there - * is no point in wasting the time right now. - */ - struct bus_dma_segment *segs; - if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { - (*cb)(req, NULL, 0, EFAULT); - } else { - /* Just use the segments provided */ - segs = (struct bus_dma_segment *)csio->data_ptr; - (*cb)(req, segs, csio->sglist_cnt, 0); - } - } - } else { - (*cb)(req, NULL, 0, 0); + error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb, + req, 0); + if (error == EINPROGRESS) { + /* + * So as to maintain ordering, freeze the controller queue + * until our mapping is returned. + */ + xpt_freeze_simq(mpt->sim, 1); + ccbh->status |= CAM_RELEASE_SIMQ; } } @@ -4458,6 +4410,7 @@ mpt_target_start_io(struct mpt_softc *mpt, union c bus_dmamap_callback_t *cb; PTR_MSG_TARGET_ASSIST_REQUEST ta; request_t *req; + int error; KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, ("dxfer_len %u but direction is NONE", csio->dxfer_len)); @@ -4544,44 +4497,11 @@ mpt_target_start_io(struct mpt_softc *mpt, union c "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); - if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { - if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { - int error; - int s = splsoftvm(); - error = bus_dmamap_load(mpt->buffer_dmat, - req->dmap, csio->data_ptr, csio->dxfer_len, - cb, req, 0); - splx(s); - if (error == EINPROGRESS) { - xpt_freeze_simq(mpt->sim, 1); - ccb->ccb_h.status |= CAM_RELEASE_SIMQ; - } - } else { - /* - * We have been given a pointer to single - * physical buffer. - */ - struct bus_dma_segment seg; - seg.ds_addr = (bus_addr_t) - (vm_offset_t)csio->data_ptr; - seg.ds_len = csio->dxfer_len; - (*cb)(req, &seg, 1, 0); - } - } else { - /* - * We have been given a list of addresses. - * This case could be easily supported but they are not - * currently generated by the CAM subsystem so there - * is no point in wasting the time right now. - */ - struct bus_dma_segment *sgs; - if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { - (*cb)(req, NULL, 0, EFAULT); - } else { - /* Just use the segments provided */ - sgs = (struct bus_dma_segment *)csio->data_ptr; - (*cb)(req, sgs, csio->sglist_cnt, 0); - } + error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, + cb, req, 0); + if (error == EINPROGRESS) { + xpt_freeze_simq(mpt->sim, 1); + ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; Index: dev/trm/trm.c =================================================================== --- dev/trm/trm.c (.../head/sys) (revision 244874) +++ dev/trm/trm.c (.../projects/physbio/sys) (revision 244874) @@ -559,6 +559,7 @@ trm_action(struct cam_sim *psim, union ccb *pccb) PDCB pDCB = NULL; PSRB pSRB; struct ccb_scsiio *pcsio; + int error; pcsio = &pccb->csio; TRM_DPRINTF(" XPT_SCSI_IO \n"); @@ -614,71 +615,18 @@ trm_action(struct cam_sim *psim, union ccb *pccb) } else bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len); - if ((pccb->ccb_h.flags & CAM_DIR_MASK) - != CAM_DIR_NONE) { - if ((pccb->ccb_h.flags & - CAM_SCATTER_VALID) == 0) { - if ((pccb->ccb_h.flags - & CAM_DATA_PHYS) == 0) { - int vmflags; - int error; - - vmflags = splsoftvm(); - error = bus_dmamap_load( - pACB->buffer_dmat, + error = bus_dmamap_load_ccb(pACB->buffer_dmat, pSRB->dmamap, - pcsio->data_ptr, - pcsio->dxfer_len, + pccb, trm_ExecuteSRB, pSRB, 0); - if (error == EINPROGRESS) { - xpt_freeze_simq( - pACB->psim, - 1); - pccb->ccb_h.status |= - CAM_RELEASE_SIMQ; - } - splx(vmflags); - } else { - struct bus_dma_segment seg; - - /* Pointer to physical buffer */ - seg.ds_addr = - (bus_addr_t)pcsio->data_ptr; - seg.ds_len = pcsio->dxfer_len; - trm_ExecuteSRB(pSRB, &seg, 1, - 0); - } - } else { - /* CAM_SCATTER_VALID */ - struct bus_dma_segment *segs; - - if ((pccb->ccb_h.flags & - CAM_SG_LIST_PHYS) == 0 || - (pccb->ccb_h.flags - & CAM_DATA_PHYS) != 0) { - pSRB->pNextSRB = pACB->pFreeSRB; - pACB->pFreeSRB = pSRB; - pccb->ccb_h.status = - CAM_PROVIDE_FAIL; - xpt_done(pccb); - splx(actionflags); - return; - } - - /* cam SG list is physical, - * cam data is virtual - */ - segs = (struct bus_dma_segment *) - pcsio->data_ptr; - trm_ExecuteSRB(pSRB, segs, - pcsio->sglist_cnt, 1); - } /* CAM_SCATTER_VALID */ - } else - trm_ExecuteSRB(pSRB, NULL, 0, 0); - } + if (error == EINPROGRESS) { + xpt_freeze_simq(pACB->psim, 1); + pccb->ccb_h.status |= CAM_RELEASE_SIMQ; + } break; + } case XPT_GDEV_TYPE: TRM_DPRINTF(" XPT_GDEV_TYPE \n"); pccb->ccb_h.status = CAM_REQ_INVALID; Index: dev/iir/iir.c =================================================================== --- dev/iir/iir.c (.../head/sys) (revision 244874) +++ dev/iir/iir.c (.../projects/physbio/sys) (revision 244874) @@ -794,6 +794,7 @@ gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, { struct gdt_ccb *gccb; struct cam_sim *sim; + int error; GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb)); @@ -844,51 +845,14 @@ gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA, gccb->gc_scratch_busbase); - /* - * If we have any data to send with this command, - * map it into bus space. - */ - /* Only use S/G if there is a transfer */ - if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { - if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { - int s; - int error; - - /* vorher unlock von splcam() ??? */ - s = splsoftvm(); - error = - bus_dmamap_load(gdt->sc_buffer_dmat, - gccb->gc_dmamap, - ccb->csio.data_ptr, - ccb->csio.dxfer_len, - gdtexecuteccb, - gccb, /*flags*/0); - if (error == EINPROGRESS) { - xpt_freeze_simq(sim, 1); - gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ; - } - splx(s); - } else { - panic("iir: CAM_DATA_PHYS not supported"); - } - } else { - struct bus_dma_segment *segs; - - if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) - panic("iir%d: iir_action - Physical " - "segment pointers unsupported", gdt->sc_hanum); - - if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0) - panic("iir%d: iir_action - Virtual " - "segment addresses unsupported", gdt->sc_hanum); - - /* Just use the segments provided */ - segs = (struct bus_dma_segment *)ccb->csio.data_ptr; - gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0); - } - } else { - gdtexecuteccb(gccb, NULL, 0, 0); + error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat, + gccb->gc_dmamap, + ccb, + gdtexecuteccb, + gccb, /*flags*/0); + if (error == EINPROGRESS) { + xpt_freeze_simq(sim, 1); + gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } *lock = splcam(); @@ -903,6 +867,7 @@ gdt_cache_cmd(struct gdt_softc *gdt, union ccb *cc u_int8_t *cmdp; u_int16_t opcode; u_int32_t blockno, blockcnt; + int error; GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb)); @@ -953,49 +918,15 @@ gdt_cache_cmd(struct gdt_softc *gdt, union ccb *cc gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT, blockcnt); - /* - * If we have any data to send with this command, - * map it into bus space. - */ - /* Only use S/G if there is a transfer */ - if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { - if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { - int s; - int error; - - /* vorher unlock von splcam() ??? */ - s = splsoftvm(); - error = - bus_dmamap_load(gdt->sc_buffer_dmat, + error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat, gccb->gc_dmamap, - ccb->csio.data_ptr, - ccb->csio.dxfer_len, + ccb, gdtexecuteccb, gccb, /*flags*/0); - if (error == EINPROGRESS) { - xpt_freeze_simq(sim, 1); - gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ; - } - splx(s); - } else { - panic("iir: CAM_DATA_PHYS not supported"); - } - } else { - struct bus_dma_segment *segs; - - if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) - panic("iir%d: iir_action - Physical " - "segment pointers unsupported", gdt->sc_hanum); - - if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0) - panic("iir%d: iir_action - Virtual " - "segment addresses unsupported", gdt->sc_hanum); - - /* Just use the segments provided */ - segs = (struct bus_dma_segment *)ccb->csio.data_ptr; - gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0); + if (error == EINPROGRESS) { + xpt_freeze_simq(sim, 1); + gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } - *lock = splcam(); return (gccb); } Index: dev/advansys/adwcam.c =================================================================== --- dev/advansys/adwcam.c (.../head/sys) (revision 244874) +++ dev/advansys/adwcam.c (.../projects/physbio/sys) (revision 244874) @@ -353,6 +353,7 @@ adw_action(struct cam_sim *sim, union ccb *ccb) struct ccb_scsiio *csio; struct ccb_hdr *ccbh; struct acb *acb; + int error; csio = &ccb->csio; ccbh = &ccb->ccb_h; @@ -427,66 +428,18 @@ adw_action(struct cam_sim *sim, union ccb *ccb) acb->queue.cdb, csio->cdb_len); } - /* - * If we have any data to send with this command, - * map it into bus space. - */ - if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { - /* - * We've been given a pointer - * to a single buffer. - */ - if ((ccbh->flags & CAM_DATA_PHYS) == 0) { - int error; - - error = - bus_dmamap_load(adw->buffer_dmat, - acb->dmamap, - csio->data_ptr, - csio->dxfer_len, - adwexecuteacb, - acb, /*flags*/0); - if (error == EINPROGRESS) { - /* - * So as to maintain ordering, - * freeze the controller queue - * until our mapping is - * returned. - */ - xpt_freeze_simq(sim, 1); - acb->state |= CAM_RELEASE_SIMQ; - } - } else { - struct bus_dma_segment seg; - - /* Pointer to physical buffer */ - seg.ds_addr = - (bus_addr_t)csio->data_ptr; - seg.ds_len = csio->dxfer_len; - adwexecuteacb(acb, &seg, 1, 0); - } - } else { - struct bus_dma_segment *segs; - - if ((ccbh->flags & CAM_DATA_PHYS) != 0) - panic("adw_action - Physical " - "segment pointers " - "unsupported"); - - if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) - panic("adw_action - Virtual " - "segment addresses " - "unsupported"); - - /* Just use the segments provided */ - segs = (struct bus_dma_segment *)csio->data_ptr; - adwexecuteacb(acb, segs, csio->sglist_cnt, - (csio->sglist_cnt < ADW_SGSIZE) - ? 0 : EFBIG); - } - } else { - adwexecuteacb(acb, NULL, 0, 0); + error = bus_dmamap_load_ccb(adw->buffer_dmat, + acb->dmamap, + ccb, + adwexecuteacb, + acb, /*flags*/0); + if (error == EINPROGRESS) { + /* + * So as to maintain ordering, freeze the controller + * queue until our mapping is returned. + */ + xpt_freeze_simq(sim, 1); + acb->state |= CAM_RELEASE_SIMQ; } break; } Index: dev/advansys/advansys.c =================================================================== --- dev/advansys/advansys.c (.../head/sys) (revision 244874) +++ dev/advansys/advansys.c (.../projects/physbio/sys) (revision 244874) @@ -207,6 +207,7 @@ adv_action(struct cam_sim *sim, union ccb *ccb) struct ccb_hdr *ccb_h; struct ccb_scsiio *csio; struct adv_ccb_info *cinfo; + int error; ccb_h = &ccb->ccb_h; csio = &ccb->csio; @@ -217,58 +218,17 @@ adv_action(struct cam_sim *sim, union ccb *ccb) ccb_h->ccb_cinfo_ptr = cinfo; cinfo->ccb = ccb; - /* Only use S/G if there is a transfer */ - if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { - /* - * We've been given a pointer - * to a single buffer - */ - if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { - int error; - - error = - bus_dmamap_load(adv->buffer_dmat, - cinfo->dmamap, - csio->data_ptr, - csio->dxfer_len, - adv_execute_ccb, - csio, /*flags*/0); - if (error == EINPROGRESS) { - /* - * So as to maintain ordering, - * freeze the controller queue - * until our mapping is - * returned. - */ - adv_set_state(adv, - ADV_BUSDMA_BLOCK); - } - } else { - struct bus_dma_segment seg; - - /* Pointer to physical buffer */ - seg.ds_addr = - (bus_addr_t)csio->data_ptr; - seg.ds_len = csio->dxfer_len; - adv_execute_ccb(csio, &seg, 1, 0); - } - } else { - struct bus_dma_segment *segs; - if ((ccb_h->flags & CAM_DATA_PHYS) != 0) - panic("adv_setup_data - Physical " - "segment pointers unsupported"); - - if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) - panic("adv_setup_data - Virtual " - "segment addresses unsupported"); - - /* Just use the segments provided */ - segs = (struct bus_dma_segment *)csio->data_ptr; - adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0); - } - } else { - adv_execute_ccb(ccb, NULL, 0, 0); + error = bus_dmamap_load_ccb(adv->buffer_dmat, + cinfo->dmamap, + ccb, + adv_execute_ccb, + csio, /*flags*/0); + if (error == EINPROGRESS) { + /* + * So as to maintain ordering, freeze the controller + * queue until our mapping is returned. + */ + adv_set_state(adv, ADV_BUSDMA_BLOCK); } break; } Index: dev/virtio/scsi/virtio_scsi.c =================================================================== --- dev/virtio/scsi/virtio_scsi.c (.../head/sys) (revision 244874) +++ dev/virtio/scsi/virtio_scsi.c (.../projects/physbio/sys) (revision 244874) @@ -961,28 +961,31 @@ vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, ccbh = &csio->ccb_h; error = 0; - if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { - - if ((ccbh->flags & CAM_DATA_PHYS) == 0) + switch ((ccbh->flags & CAM_DATA_MASK)) { + case CAM_DATA_VADDR: + error = sglist_append(sg, csio->data_ptr, csio->dxfer_len); + break; + case CAM_DATA_PADDR: + error = sglist_append_phys(sg, + (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len); + break; + case CAM_DATA_SG: + for (i = 0; i < csio->sglist_cnt && error == 0; i++) { + dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; error = sglist_append(sg, - csio->data_ptr, csio->dxfer_len); - else - error = sglist_append_phys(sg, - (vm_paddr_t)(vm_offset_t) csio->data_ptr, - csio->dxfer_len); - } else { - + (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len); + } + break; + case CAM_DATA_SG_PADDR: for (i = 0; i < csio->sglist_cnt && error == 0; i++) { dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; - - if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) - error = sglist_append(sg, - (void *)(vm_offset_t) dseg->ds_addr, - dseg->ds_len); - else - error = sglist_append_phys(sg, - (vm_paddr_t) dseg->ds_addr, dseg->ds_len); + error = sglist_append_phys(sg, + (vm_paddr_t) dseg->ds_addr, dseg->ds_len); } + break; + default: + error = EINVAL; + break; } return (error); Index: dev/hptmv/entry.c =================================================================== --- dev/hptmv/entry.c (.../head/sys) (revision 244874) +++ dev/hptmv/entry.c (.../projects/physbio/sys) (revision 244874) @@ -2620,32 +2620,7 @@ launch_worker_thread(void) int HPTLIBAPI fOsBuildSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSg, int logical) { - union ccb *ccb = (union ccb *)pCmd->pOrgCommand; - bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; - int idx; - if(logical) { - if (ccb->ccb_h.flags & CAM_DATA_PHYS) - panic("physical address unsupported"); - - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { - if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) - panic("physical address unsupported"); - - for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { - pSg[idx].dSgAddress = (ULONG_PTR)(UCHAR *)sgList[idx].ds_addr; - pSg[idx].wSgSize = sgList[idx].ds_len; - pSg[idx].wSgFlag = (idx==ccb->csio.sglist_cnt-1)? SG_FLAG_EOT : 0; - } - } - else { - pSg->dSgAddress = (ULONG_PTR)(UCHAR *)ccb->csio.data_ptr; - pSg->wSgSize = ccb->csio.dxfer_len; - pSg->wSgFlag = SG_FLAG_EOT; - } - return TRUE; - } - /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; @@ -2757,25 +2732,29 @@ hpt_io_dmamap_callback(void *arg, bus_dma_segment_ HPT_ASSERT(pCmd->cf_physical_sg); - if (error || nsegs == 0) + if (error) panic("busdma error"); HPT_ASSERT(nsegs<= MAX_SG_DESCRIPTORS); - for (idx = 0; idx < nsegs; idx++, psg++) { - psg->dSgAddress = (ULONG_PTR)(UCHAR *)segs[idx].ds_addr; - psg->wSgSize = segs[idx].ds_len; - psg->wSgFlag = (idx == nsegs-1)? SG_FLAG_EOT: 0; -/* KdPrint(("psg[%d]:add=%p,size=%x,flag=%x\n", idx, psg->dSgAddress,psg->wSgSize,psg->wSgFlag)); */ + if (nsegs != 0) { + for (idx = 0; idx < nsegs; idx++, psg++) { + psg->dSgAddress = (ULONG_PTR)(UCHAR *)segs[idx].ds_addr; + psg->wSgSize = segs[idx].ds_len; + psg->wSgFlag = (idx == nsegs-1)? SG_FLAG_EOT: 0; + /* KdPrint(("psg[%d]:add=%p,size=%x,flag=%x\n", idx, psg->dSgAddress,psg->wSgSize,psg->wSgFlag)); */ + } + /* psg[-1].wSgFlag = SG_FLAG_EOT; */ + + if (pCmd->cf_data_in) { + bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, + BUS_DMASYNC_PREREAD); + } + else if (pCmd->cf_data_out) { + bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, + BUS_DMASYNC_PREWRITE); + } } -/* psg[-1].wSgFlag = SG_FLAG_EOT; */ - - if (pCmd->cf_data_in) { - bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREREAD); - } - else if (pCmd->cf_data_out) { - bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREWRITE); - } ccb->ccb_h.timeout_ch = timeout(hpt_timeout, (caddr_t)ccb, 20*hz); pVDev->pfnSendCommand(_VBUS_P pCmd); @@ -2883,6 +2862,7 @@ OsSendCommand(_VBUS_ARG union ccb *ccb) UCHAR CdbLength; _VBUS_INST(pVDev->pVBus) PCommand pCmd = AllocateCommand(_VBUS_P0); + int error; HPT_ASSERT(pCmd); CdbLength = csio->cdb_len; @@ -2960,40 +2940,21 @@ OsSendCommand(_VBUS_ARG union ccb *ccb) break; } /*///////////////////////// */ - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { - int idx; - bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; - - if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) - pCmd->cf_physical_sg = 1; - - for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { - pCmd->pSgTable[idx].dSgAddress = (ULONG_PTR)(UCHAR *)sgList[idx].ds_addr; - pCmd->pSgTable[idx].wSgSize = sgList[idx].ds_len; - pCmd->pSgTable[idx].wSgFlag= (idx==ccb->csio.sglist_cnt-1)?SG_FLAG_EOT: 0; - } - - ccb->ccb_h.timeout_ch = timeout(hpt_timeout, (caddr_t)ccb, 20*hz); - pVDev->pfnSendCommand(_VBUS_P pCmd); - } - else { - int error; - pCmd->cf_physical_sg = 1; - error = bus_dmamap_load(pAdapter->io_dma_parent, - pmap->dma_map, - ccb->csio.data_ptr, ccb->csio.dxfer_len, - hpt_io_dmamap_callback, pCmd, - BUS_DMA_WAITOK - ); - KdPrint(("bus_dmamap_load return %d\n", error)); - if (error && error!=EINPROGRESS) { - hpt_printk(("bus_dmamap_load error %d\n", error)); - FreeCommand(_VBUS_P pCmd); - ccb->ccb_h.status = CAM_REQ_CMP_ERR; - dmamap_put(pmap); - pAdapter->outstandingCommands--; - xpt_done(ccb); - } + pCmd->cf_physical_sg = 1; + error = bus_dmamap_load_ccb(pAdapter->io_dma_parent, + pmap->dma_map, + ccb, + hpt_io_dmamap_callback, + pCmd, BUS_DMA_WAITOK + ); + KdPrint(("bus_dmamap_load return %d\n", error)); + if (error && error!=EINPROGRESS) { + hpt_printk(("bus_dmamap_load error %d\n", error)); + FreeCommand(_VBUS_P pCmd); + ccb->ccb_h.status = CAM_REQ_CMP_ERR; + dmamap_put(pmap); + pAdapter->outstandingCommands--; + xpt_done(ccb); } goto Command_Complished; } Index: dev/arcmsr/arcmsr.c =================================================================== --- dev/arcmsr/arcmsr.c (.../head/sys) (revision 244874) +++ dev/arcmsr/arcmsr.c (.../projects/physbio/sys) (revision 244874) @@ -2341,7 +2341,7 @@ static int arcmsr_iop_message_xfer(struct AdapterC (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8]; /* 4 bytes: Areca io control code */ - if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { + if ((pccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { buffer = pccb->csio.data_ptr; transfer_len = pccb->csio.dxfer_len; } else { @@ -2731,6 +2731,7 @@ static void arcmsr_action(struct cam_sim *psim, un case XPT_SCSI_IO: { struct CommandControlBlock *srb; int target = pccb->ccb_h.target_id; + int error; if(target == 16) { /* virtual device for iop message transfer */ @@ -2745,52 +2746,13 @@ static void arcmsr_action(struct cam_sim *psim, un pccb->ccb_h.arcmsr_ccbsrb_ptr = srb; pccb->ccb_h.arcmsr_ccbacb_ptr = acb; srb->pccb = pccb; - if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) { - /* Single buffer */ - if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) { - /* Buffer is virtual */ - u_int32_t error, s; - - s = splsoftvm(); - error = bus_dmamap_load(acb->dm_segs_dmat - , srb->dm_segs_dmamap - , pccb->csio.data_ptr - , pccb->csio.dxfer_len - , arcmsr_execute_srb, srb, /*flags*/0); - if(error == EINPROGRESS) { - xpt_freeze_simq(acb->psim, 1); - pccb->ccb_h.status |= CAM_RELEASE_SIMQ; - } - splx(s); - } - else { /* Buffer is physical */ -#ifdef PAE - panic("arcmsr: CAM_DATA_PHYS not supported"); -#else - struct bus_dma_segment seg; - - seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr; - seg.ds_len = pccb->csio.dxfer_len; - arcmsr_execute_srb(srb, &seg, 1, 0); -#endif - } - } else { - /* Scatter/gather list */ - struct bus_dma_segment *segs; - - if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 - || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { - pccb->ccb_h.status |= CAM_PROVIDE_FAIL; - xpt_done(pccb); - free(srb, M_DEVBUF); - return; - } - segs = (struct bus_dma_segment *)pccb->csio.data_ptr; - arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0); - } - } else { - arcmsr_execute_srb(srb, NULL, 0, 0); + error = bus_dmamap_load_ccb(acb->dm_segs_dmat + , srb->dm_segs_dmamap + , pccb + , arcmsr_execute_srb, srb, /*flags*/0); + if(error == EINPROGRESS) { + xpt_freeze_simq(acb->psim, 1); + pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } break; } Index: dev/isp/isp_pci.c =================================================================== --- dev/isp/isp_pci.c (.../head/sys) (revision 244874) +++ dev/isp/isp_pci.c (.../projects/physbio/sys) (revision 244874) @@ -1922,6 +1922,7 @@ isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsii mush_t mush, *mp; void (*eptr)(void *, bus_dma_segment_t *, int, int); void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); + int error; mp = &mush; mp->isp = isp; @@ -1942,70 +1943,17 @@ isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsii } - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || (csio->dxfer_len == 0)) { - (*eptr)(mp, NULL, 0, 0); - } else if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { - if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { - int error; - error = bus_dmamap_load(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, csio->data_ptr, csio->dxfer_len, eptr, mp, 0); -#if 0 - xpt_print(csio->ccb_h.path, "%s: bus_dmamap_load " "ptr %p len %d returned %d\n", __func__, csio->data_ptr, csio->dxfer_len, error); -#endif - - if (error == EINPROGRESS) { - bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); - mp->error = EINVAL; - isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); - } else if (error && mp->error == 0) { + error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, + (union ccb *)csio, eptr, mp, 0); + if (error == EINPROGRESS) { + bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); + mp->error = EINVAL; + isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); + } else if (error && mp->error == 0) { #ifdef DIAGNOSTIC - isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); + isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); #endif - mp->error = error; - } - } else { - /* Pointer to physical buffer */ - struct bus_dma_segment seg; - seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; - seg.ds_len = csio->dxfer_len; - (*eptr)(mp, &seg, 1, 0); - } - } else { - struct bus_dma_segment *segs; - - if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { - isp_prt(isp, ISP_LOGERR, "Physical segment pointers unsupported"); - mp->error = EINVAL; - } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { - struct uio sguio; - int error; - - /* - * We're taking advantage of the fact that - * the pointer/length sizes and layout of the iovec - * structure are the same as the bus_dma_segment - * structure. This might be a little dangerous, - * but only if they change the structures, which - * seems unlikely. - */ - KASSERT((sizeof (sguio.uio_iov) == sizeof (csio->data_ptr) && - sizeof (sguio.uio_iovcnt) >= sizeof (csio->sglist_cnt) && - sizeof (sguio.uio_resid) >= sizeof (csio->dxfer_len)), ("Ken's assumption failed")); - sguio.uio_iov = (struct iovec *)csio->data_ptr; - sguio.uio_iovcnt = csio->sglist_cnt; - sguio.uio_resid = csio->dxfer_len; - sguio.uio_segflg = UIO_SYSSPACE; - - error = bus_dmamap_load_uio(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, &sguio, eptr2, mp, 0); - - if (error != 0 && mp->error == 0) { - isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); - mp->error = error; - } - } else { - /* Just use the segments provided */ - segs = (struct bus_dma_segment *) csio->data_ptr; - (*eptr)(mp, segs, csio->sglist_cnt, 0); - } + mp->error = error; } if (mp->error) { int retval = CMD_COMPLETE; Index: dev/isp/isp_sbus.c =================================================================== --- dev/isp/isp_sbus.c (.../head/sys) (revision 244874) +++ dev/isp/isp_sbus.c (.../projects/physbio/sys) (revision 244874) @@ -635,6 +635,7 @@ isp_sbus_dmasetup(ispsoftc_t *isp, struct ccb_scsi { mush_t mush, *mp; void (*eptr)(void *, bus_dma_segment_t *, int, int); + int error; mp = &mush; mp->isp = isp; @@ -645,47 +646,18 @@ isp_sbus_dmasetup(ispsoftc_t *isp, struct ccb_scsi eptr = dma2; - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || (csio->dxfer_len == 0)) { - (*eptr)(mp, NULL, 0, 0); - } else if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { - if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { - int error; - error = bus_dmamap_load(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, csio->data_ptr, csio->dxfer_len, eptr, mp, 0); -#if 0 - xpt_print(csio->ccb_h.path, "%s: bus_dmamap_load " "ptr %p len %d returned %d\n", __func__, csio->data_ptr, csio->dxfer_len, error); -#endif - - if (error == EINPROGRESS) { - bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); - mp->error = EINVAL; - isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); - } else if (error && mp->error == 0) { + error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, + PISP_PCMD(csio)->dmap, (union ccb *)csio, eptr, mp, 0); + if (error == EINPROGRESS) { + bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); + mp->error = EINVAL; + isp_prt(isp, ISP_LOGERR, + "deferred dma allocation not supported"); + } else if (error && mp->error == 0) { #ifdef DIAGNOSTIC - isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); + isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); #endif - mp->error = error; - } - } else { - /* Pointer to physical buffer */ - struct bus_dma_segment seg; - seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; - seg.ds_len = csio->dxfer_len; - (*eptr)(mp, &seg, 1, 0); - } - } else { - struct bus_dma_segment *segs; - - if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { - isp_prt(isp, ISP_LOGERR, "Physical segment pointers unsupported"); - mp->error = EINVAL; - } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { - isp_prt(isp, ISP_LOGERR, "Physical SG/LIST Phys segment pointers unsupported"); - mp->error = EINVAL; - } else { - /* Just use the segments provided */ - segs = (struct bus_dma_segment *) csio->data_ptr; - (*eptr)(mp, segs, csio->sglist_cnt, 0); - } + mp->error = error; } if (mp->error) { int retval = CMD_COMPLETE; Index: dev/ata/atapi-cam.c =================================================================== --- dev/ata/atapi-cam.c (.../head/sys) (revision 244874) +++ dev/ata/atapi-cam.c (.../projects/physbio/sys) (revision 244874) @@ -514,12 +514,6 @@ atapi_action(struct cam_sim *sim, union ccb *ccb) ("CAM CCB too long for ATAPI")); goto action_invalid; } - if ((ccb_h->flags & CAM_SCATTER_VALID)) { - /* scatter-gather not supported */ - xpt_print_path(ccb_h->path); - printf("ATAPI/CAM does not support scatter-gather yet!\n"); - goto action_invalid; - } switch (ccb_h->flags & CAM_DIR_MASK) { case CAM_DIR_IN: Index: dev/ata/ata-dma.c =================================================================== --- dev/ata/ata-dma.c (.../head/sys) (revision 244874) +++ dev/ata/ata-dma.c (.../projects/physbio/sys) (revision 244874) @@ -304,10 +304,17 @@ ata_dmaload(struct ata_request *request, void *add else dspa.dmatab = request->dma->sg; - if ((error = bus_dmamap_load(request->dma->data_tag, request->dma->data_map, - request->data, request->bytecount, - ch->dma.setprd, &dspa, BUS_DMA_NOWAIT)) || - (error = dspa.error)) { +#ifdef ATA_CAM + if (request->ccb) + error = bus_dmamap_load_ccb(request->dma->data_tag, + request->dma->data_map, request->ccb, + ch->dma.setprd, &dspa, BUS_DMA_NOWAIT); + else +#endif + error = bus_dmamap_load(request->dma->data_tag, request->dma->data_map, + request->data, request->bytecount, + ch->dma.setprd, &dspa, BUS_DMA_NOWAIT); + if (error || (error = dspa.error)) { device_printf(request->parent, "FAILURE - load data\n"); goto error; } Index: x86/x86/busdma_machdep.c =================================================================== --- x86/x86/busdma_machdep.c (.../head/sys) (revision 244874) +++ x86/x86/busdma_machdep.c (.../projects/physbio/sys) (revision 244874) @@ -37,9 +37,8 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include -#include #include +#include #include #include @@ -86,6 +85,7 @@ struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ + bus_addr_t dataaddr; /* client physical address */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; @@ -124,8 +124,7 @@ struct bus_dmamap { int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; - void *buf; /* unmapped buffer pointer */ - bus_size_t buflen; /* unmapped buffer length */ + bus_dma_memory_t mem; bus_dmamap_callback_t *callback; void *callback_arg; STAILQ_ENTRY(bus_dmamap) links; @@ -141,11 +140,18 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_size_t size); + vm_offset_t vaddr, bus_addr_t addr, + bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); -int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, - void *buf, bus_size_t buflen, int flags); +static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, + pmap_t pmap, void *buf, bus_size_t buflen, + int flags); +static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, + int flags); +static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, + int flags); #ifdef XEN #undef pmap_kextract @@ -579,7 +585,33 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, b CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); } -int +static void +_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags) +{ + bus_addr_t curaddr; + bus_size_t sgsize; + + if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + curaddr = buf; + while (buflen != 0) { + sgsize = MIN(buflen, dmat->maxsegsz); + if (run_filter(dmat, curaddr)) { + sgsize = MIN(sgsize, PAGE_SIZE); + map->pagesneeded++; + } + curaddr += sgsize; + buflen -= sgsize; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + } +} + +static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags) { @@ -604,12 +636,11 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dm bus_size_t sg_len; sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); - if (pmap) - paddr = pmap_extract(pmap, vaddr); - else + if (pmap == kernel_pmap) paddr = pmap_kextract(vaddr); - if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && - run_filter(dmat, paddr) != 0) { + else + paddr = pmap_extract(pmap, vaddr); + if (run_filter(dmat, paddr) != 0) { sg_len = roundup2(sg_len, dmat->alignment); map->pagesneeded++; } @@ -617,78 +648,177 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dm } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } +} +static int +_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) +{ + /* Reserve Necessary Bounce Pages */ - if (map->pagesneeded != 0) { - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - map->dmat = dmat; - map->buf = buf; - map->buflen = buflen; - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, - map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } + mtx_lock(&bounce_lock); + if (flags & BUS_DMA_NOWAIT) { + if (reserve_bounce_pages(dmat, map, 0) != 0) { + mtx_unlock(&bounce_lock); + return (ENOMEM); } - mtx_unlock(&bounce_lock); + } else { + if (reserve_bounce_pages(dmat, map, 1) != 0) { + /* Queue us for resources */ + STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); + mtx_unlock(&bounce_lock); + return (EINPROGRESS); + } } + mtx_unlock(&bounce_lock); return (0); } /* - * Utility function to load a linear buffer. lastaddrp holds state - * between invocations (for multiple-buffer loads). segp contains + * Add a single contiguous physical range to the segment list. + */ +static int +_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, + bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) +{ + bus_addr_t baddr, bmask; + int seg; + + /* + * Make sure we don't cross any boundaries. + */ + bmask = ~(dmat->boundary - 1); + if (dmat->boundary > 0) { + baddr = (curaddr + dmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + seg = *segp; + if (seg == -1) { + seg = 0; + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } else { + if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && + (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && + (dmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= dmat->nsegments) + return (0); + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } + } + *segp = seg; + return (sgsize); +} + +/* + * Utility function to load a physical buffer. segp contains * the starting segment on entrace, and the ending segment on exit. - * first indicates if this is the first invocation of this function. */ -static __inline int +int +_bus_dmamap_load_phys(bus_dma_tag_t dmat, + bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, + int flags, + bus_dma_segment_t *segs, + int *segp) +{ + bus_size_t sgsize; + bus_addr_t curaddr; + int error; + + if (map == NULL || map == &contig_dmamap) + map = &nobounce_dmamap; + + if (segs == NULL) + segs = dmat->segments; + + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } + } + + while (buflen > 0) { + curaddr = buf; + sgsize = MIN(buflen, dmat->maxsegsz); + if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && + map->pagesneeded != 0 && run_filter(dmat, curaddr)) { + sgsize = MIN(sgsize, PAGE_SIZE); + curaddr = add_bounce_page(dmat, map, 0, curaddr, + sgsize); + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + buf += sgsize; + buflen -= sgsize; + } + + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ +} + +/* + * Utility function to load a linear buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. + */ +int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, - bus_dmamap_t map, + bus_dmamap_t map, void *buf, bus_size_t buflen, pmap_t pmap, int flags, - bus_addr_t *lastaddrp, bus_dma_segment_t *segs, - int *segp, - int first) + int *segp) { bus_size_t sgsize; - bus_addr_t curaddr, lastaddr, baddr, bmask; + bus_addr_t curaddr; vm_offset_t vaddr; - int seg, error; + int error; if (map == NULL || map == &contig_dmamap) map = &nobounce_dmamap; + if (segs == NULL) + segs = dmat->segments; + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { - error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); - if (error) - return (error); + _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } } vaddr = (vm_offset_t)buf; - lastaddr = *lastaddrp; - bmask = ~(dmat->boundary - 1); - for (seg = *segp; buflen > 0 ; ) { + while (buflen > 0) { bus_size_t max_sgsize; /* * Get the physical address for this segment. */ - if (pmap) - curaddr = pmap_extract(pmap, vaddr); - else + if (pmap == kernel_pmap) curaddr = pmap_kextract(vaddr); + else + curaddr = pmap_extract(pmap, vaddr); /* * Compute the segment size, and adjust counts. @@ -699,231 +829,49 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, map->pagesneeded != 0 && run_filter(dmat, curaddr)) { sgsize = roundup2(sgsize, dmat->alignment); sgsize = MIN(sgsize, max_sgsize); - curaddr = add_bounce_page(dmat, map, vaddr, sgsize); + curaddr = add_bounce_page(dmat, map, vaddr, curaddr, + sgsize); } else { sgsize = MIN(sgsize, max_sgsize); } - - /* - * Make sure we don't cross any boundaries. - */ - if (dmat->boundary > 0) { - baddr = (curaddr + dmat->boundary) & bmask; - if (sgsize > (baddr - curaddr)) - sgsize = (baddr - curaddr); - } - - /* - * Insert chunk into a segment, coalescing with - * previous segment if possible. - */ - if (first) { - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; - first = 0; - } else { - if (curaddr == lastaddr && - (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && - (dmat->boundary == 0 || - (segs[seg].ds_addr & bmask) == (curaddr & bmask))) - segs[seg].ds_len += sgsize; - else { - if (++seg >= dmat->nsegments) - break; - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; - } - } - - lastaddr = curaddr + sgsize; + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; vaddr += sgsize; buflen -= sgsize; } - *segp = seg; - *lastaddrp = lastaddr; - /* * Did we fit? */ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } -/* - * Map the buffer buf into bus space using the dmamap map. - */ -int -bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, bus_dmamap_callback_t *callback, - void *callback_arg, int flags) +void +__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_memory_t mem, bus_dmamap_callback_t *callback, + void *callback_arg) { - bus_addr_t lastaddr = 0; - int error, nsegs = 0; - if (map != NULL) { - flags |= BUS_DMA_WAITOK; + map->mem = mem; + map->dmat = dmat; map->callback = callback; map->callback_arg = callback_arg; } - - error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, - &lastaddr, dmat->segments, &nsegs, 1); - - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, nsegs + 1); - - if (error == EINPROGRESS) { - return (error); - } - - if (error) - (*callback)(callback_arg, dmat->segments, 0, error); - else - (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); - - /* - * Return ENOMEM to the caller so that it can pass it up the stack. - * This error only happens when NOWAIT is set, so deferal is disabled. - */ - if (error == ENOMEM) - return (error); - - return (0); } - -/* - * Like _bus_dmamap_load(), but for mbufs. - */ -static __inline int -_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, - int flags) +bus_dma_segment_t * +_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) { - int error; - M_ASSERTPKTHDR(m0); - - flags |= BUS_DMA_NOWAIT; - *nsegs = 0; - error = 0; - if (m0->m_pkthdr.len <= dmat->maxsize) { - int first = 1; - bus_addr_t lastaddr = 0; - struct mbuf *m; - - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, map, - m->m_data, m->m_len, - NULL, flags, &lastaddr, - segs, nsegs, first); - first = 0; - } - } - } else { - error = EINVAL; - } - - /* XXX FIXME: Having to increment nsegs is really annoying */ - ++*nsegs; - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, *nsegs); - return (error); + if (segs == NULL) + segs = dmat->segments; + return (segs); } -int -bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *m0, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags) -{ - int nsegs, error; - - error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs, - flags); - - if (error) { - /* force "no valid mappings" in callback */ - (*callback)(callback_arg, dmat->segments, 0, 0, error); - } else { - (*callback)(callback_arg, dmat->segments, - nsegs, m0->m_pkthdr.len, error); - } - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, nsegs); - return (error); -} - -int -bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, - int flags) -{ - return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags)); -} - /* - * Like _bus_dmamap_load(), but for uios. - */ -int -bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, - struct uio *uio, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags) -{ - bus_addr_t lastaddr = 0; - int nsegs, error, first, i; - bus_size_t resid; - struct iovec *iov; - pmap_t pmap; - - flags |= BUS_DMA_NOWAIT; - resid = uio->uio_resid; - iov = uio->uio_iov; - - if (uio->uio_segflg == UIO_USERSPACE) { - KASSERT(uio->uio_td != NULL, - ("bus_dmamap_load_uio: USERSPACE but no proc")); - pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); - } else - pmap = NULL; - - nsegs = 0; - error = 0; - first = 1; - for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { - /* - * Now at the first iovec to load. Load each iovec - * until we have exhausted the residual count. - */ - bus_size_t minlen = - resid < iov[i].iov_len ? resid : iov[i].iov_len; - caddr_t addr = (caddr_t) iov[i].iov_base; - - if (minlen > 0) { - error = _bus_dmamap_load_buffer(dmat, map, - addr, minlen, pmap, flags, &lastaddr, - dmat->segments, &nsegs, first); - first = 0; - - resid -= minlen; - } - } - - if (error) { - /* force "no valid mappings" in callback */ - (*callback)(callback_arg, dmat->segments, 0, 0, error); - } else { - (*callback)(callback_arg, dmat->segments, - nsegs+1, uio->uio_resid, error); - } - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, nsegs + 1); - return (error); -} - -/* * Release the mapping held by map. */ void @@ -953,9 +901,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t if (op & BUS_DMASYNC_PREWRITE) { while (bpage != NULL) { - bcopy((void *)bpage->datavaddr, - (void *)bpage->vaddr, - bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)bpage->datavaddr, + (void *)bpage->vaddr, + bpage->datacount); + else + physcopyout(bpage->dataaddr, + (void *)bpage->vaddr, + bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; @@ -963,9 +916,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t if (op & BUS_DMASYNC_POSTREAD) { while (bpage != NULL) { - bcopy((void *)bpage->vaddr, - (void *)bpage->datavaddr, - bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)bpage->vaddr, + (void *)bpage->datavaddr, + bpage->datacount); + else + physcopyin((void *)bpage->vaddr, + bpage->dataaddr, + bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; @@ -1137,7 +1095,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmama static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_size_t size) + bus_addr_t addr, bus_size_t size) { struct bounce_zone *bz; struct bounce_page *bpage; @@ -1171,6 +1129,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t m bpage->busaddr |= vaddr & PAGE_MASK; } bpage->datavaddr = vaddr; + bpage->dataaddr = addr; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); @@ -1224,8 +1183,9 @@ busdma_swi(void) mtx_unlock(&bounce_lock); dmat = map->dmat; (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load(map->dmat, map, map->buf, map->buflen, - map->callback, map->callback_arg, /*flags*/0); + bus_dmamap_load_mem(map->dmat, map, &map->mem, + map->callback, map->callback_arg, + BUS_DMA_WAITOK); (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); mtx_lock(&bounce_lock); } Property changes on: i386/conf/XENHVM ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/i386/conf/XENHVM:r243873-244873 Property changes on: contrib/libfdt ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/libfdt:r243873-244873 Property changes on: contrib/octeon-sdk ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/octeon-sdk:r243873-244873 Property changes on: contrib/x86emu ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/x86emu:r243873-244873 Property changes on: contrib/dev/acpica/common ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/common:r243873-244873 Property changes on: contrib/dev/acpica/compiler ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/compiler:r243873-244873 Property changes on: contrib/dev/acpica/include ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/include:r243873-244873 Property changes on: contrib/dev/acpica/components/resources ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/components/resources:r243873-244873 Property changes on: contrib/dev/acpica/components/tables ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/components/tables:r243873-244873 Property changes on: contrib/dev/acpica/components/utilities ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/components/utilities:r243873-244873 Property changes on: contrib/dev/acpica/components/namespace ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/components/namespace:r243873-244873 Property changes on: contrib/dev/acpica/components/parser ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/components/parser:r243873-244873 Property changes on: contrib/dev/acpica/components/disassembler ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/components/disassembler:r243873-244873 Property changes on: contrib/dev/acpica/components/hardware ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/components/hardware:r243873-244873 Property changes on: contrib/dev/acpica/components/debugger ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/components/debugger:r243873-244873 Property changes on: contrib/dev/acpica/components/events ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/components/events:r243873-244873 Property changes on: contrib/dev/acpica/components/executer ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/components/executer:r243873-244873 Property changes on: contrib/dev/acpica/components/dispatcher ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/components/dispatcher:r243873-244873 Property changes on: contrib/dev/acpica/changes.txt ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/changes.txt:r243873-244873 Property changes on: contrib/dev/acpica/os_specific ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica/os_specific:r243873-244873 Property changes on: contrib/dev/acpica ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/contrib/dev/acpica:r243873-244873 Property changes on: cddl/contrib/opensolaris ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/cddl/contrib/opensolaris:r243873-244873 Index: security/audit/bsm_fcntl.c =================================================================== --- security/audit/bsm_fcntl.c (.../head/sys) (revision 244874) +++ security/audit/bsm_fcntl.c (.../projects/physbio/sys) (revision 244874) @@ -1,292 +0,0 @@ -/*- - * Copyright (c) 2008-2009 Apple Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Inc. ("Apple") nor the names of - * its contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_fcntl.c#2 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include - -#include - -#include -#include - -struct bsm_fcntl_cmd { - u_short bfc_bsm_fcntl_cmd; - int bfc_local_fcntl_cmd; -}; -typedef struct bsm_fcntl_cmd bsm_fcntl_cmd_t; - -static const bsm_fcntl_cmd_t bsm_fcntl_cmdtab[] = { - { BSM_F_DUPFD, F_DUPFD }, - { BSM_F_GETFD, F_GETFD }, - { BSM_F_SETFD, F_SETFD }, - { BSM_F_GETFL, F_GETFL }, - { BSM_F_SETFL, F_SETFL }, -#ifdef F_O_GETLK - { BSM_F_O_GETLK, F_O_GETLK }, -#endif - { BSM_F_SETLK, F_SETLK }, - { BSM_F_SETLKW, F_SETLK }, -#ifdef F_CHFL - { BSM_F_CHKFL, F_CHKFL }, -#endif -#ifdef F_DUP2FD - { BSM_F_DUP2FD, F_DUP2FD }, -#endif -#ifdef F_ALLOCSP - { BSM_F_ALLOCSP, F_ALLOCSP }, -#endif -#ifdef F_FREESP - { BSM_F_FREESP, F_FREESP }, -#endif -#ifdef F_ISSTREAM - { BSM_F_ISSTREAM, F_ISSTREAM}, -#endif - { BSM_F_GETLK, F_GETLK }, -#ifdef F_PRIV - { BSM_F_PRIV, F_PRIV }, -#endif -#ifdef F_NPRIV - { BSM_F_NPRIV, F_NPRIV }, -#endif -#ifdef F_QUOTACTL - { BSM_F_QUOTACTL, F_QUOTACTL }, -#endif -#ifdef F_BLOCKS - { BSM_F_BLOCKS, F_BLOCKS }, -#endif -#ifdef F_BLKSIZE - { BSM_F_BLKSIZE, F_BLKSIZE }, -#endif - { BSM_F_GETOWN, F_GETOWN }, - { BSM_F_SETOWN, F_SETOWN }, -#ifdef F_REVOKE - { BSM_F_REVOKE, F_REVOKE }, -#endif -#ifdef F_HASREMOTEBLOCKS - { BSM_F_HASREMOTEBLOCKS, - F_HASREMOTEBLOCKS }, -#endif -#ifdef F_FREESP - { BSM_F_FREESP, F_FREESP }, -#endif -#ifdef F_ALLOCSP - { BSM_F_ALLOCSP, F_ALLOCSP }, -#endif -#ifdef F_FREESP64 - { BSM_F_FREESP64, F_FREESP64 }, -#endif -#ifdef F_ALLOCSP64 - { BSM_F_ALLOCSP64, F_ALLOCSP64 }, -#endif -#ifdef F_GETLK64 - { BSM_F_GETLK64, F_GETLK64 }, -#endif -#ifdef F_SETLK64 - { BSM_F_SETLK64, F_SETLK64 }, -#endif -#ifdef F_SETLKW64 - { BSM_F_SETLKW64, F_SETLKW64 }, -#endif -#ifdef F_SHARE - { BSM_F_SHARE, F_SHARE }, -#endif -#ifdef F_UNSHARE - { BSM_F_UNSHARE, F_UNSHARE }, -#endif -#ifdef F_SETLK_NBMAND - { BSM_F_SETLK_NBMAND, F_SETLK_NBMAND }, -#endif -#ifdef F_SHARE_NBMAND - { BSM_F_SHARE_NBMAND, F_SHARE_NBMAND }, -#endif -#ifdef F_SETLK64_NBMAND - { BSM_F_SETLK64_NBMAND, F_SETLK64_NBMAND }, -#endif -#ifdef F_GETXFL - { BSM_F_GETXFL, F_GETXFL }, -#endif -#ifdef F_BADFD - { BSM_F_BADFD, F_BADFD }, -#endif -#ifdef F_OGETLK - { BSM_F_OGETLK, F_OGETLK }, -#endif -#ifdef F_OSETLK - { BSM_F_OSETLK, F_OSETLK }, -#endif -#ifdef F_OSETLKW - { BSM_F_OSETLKW, F_OSETLKW }, -#endif -#ifdef F_SETLK_REMOTE - { BSM_F_SETLK_REMOTE, F_SETLK_REMOTE }, -#endif - -#ifdef F_SETSIG - { BSM_F_SETSIG, F_SETSIG }, -#endif -#ifdef F_GETSIG - { BSM_F_GETSIG, F_GETSIG }, -#endif - -#ifdef F_CHKCLEAN - { BSM_F_CHKCLEAN, F_CHKCLEAN }, -#endif -#ifdef F_PREALLOCATE - { BSM_F_PREALLOCATE, F_PREALLOCATE }, -#endif -#ifdef F_SETSIZE - { BSM_F_SETSIZE, F_SETSIZE }, -#endif -#ifdef F_RDADVISE - { BSM_F_RDADVISE, F_RDADVISE }, -#endif -#ifdef F_RDAHEAD - { BSM_F_RDAHEAD, F_RDAHEAD }, -#endif -#ifdef F_READBOOTSTRAP - { BSM_F_READBOOTSTRAP, F_READBOOTSTRAP }, -#endif -#ifdef F_WRITEBOOTSTRAP - { BSM_F_WRITEBOOTSTRAP, F_WRITEBOOTSTRAP }, -#endif -#ifdef F_NOCACHE - { BSM_F_NOCACHE, F_NOCACHE }, -#endif -#ifdef F_LOG2PHYS - { BSM_F_LOG2PHYS, F_LOG2PHYS }, -#endif -#ifdef F_GETPATH - { BSM_F_GETPATH, F_GETPATH }, -#endif -#ifdef F_FULLFSYNC - { BSM_F_FULLFSYNC, F_FULLFSYNC }, -#endif -#ifdef F_PATHPKG_CHECK - { BSM_F_PATHPKG_CHECK, F_PATHPKG_CHECK }, -#endif -#ifdef F_FREEZE_FS - { BSM_F_FREEZE_FS, F_FREEZE_FS }, -#endif -#ifdef F_THAW_FS - { BSM_F_THAW_FS, F_THAW_FS }, -#endif -#ifdef F_GLOBAL_NOCACHE - { BSM_F_GLOBAL_NOCACHE, F_GLOBAL_NOCACHE }, -#endif -#ifdef F_OPENFROM - { BSM_F_OPENFROM, F_OPENFROM }, -#endif -#ifdef F_UNLINKFROM - { BSM_F_UNLINKFROM, F_UNLINKFROM }, -#endif -#ifdef F_CHECK_OPENEVT - { BSM_F_CHECK_OPENEVT, F_CHECK_OPENEVT }, -#endif -#ifdef F_ADDSIGS - { BSM_F_ADDSIGS, F_ADDSIGS }, -#endif -#ifdef F_MARKDEPENDENCY - { BSM_F_MARKDEPENDENCY, F_MARKDEPENDENCY }, -#endif - -#ifdef FCNTL_FS_SPECIFIC_BASE - { BSM_F_FS_SPECIFIC_0, FCNTL_FS_SPECIFIC_BASE}, - { BSM_F_FS_SPECIFIC_1, FCNTL_FS_SPECIFIC_BASE + 1}, - { BSM_F_FS_SPECIFIC_2, FCNTL_FS_SPECIFIC_BASE + 2}, - { BSM_F_FS_SPECIFIC_3, FCNTL_FS_SPECIFIC_BASE + 3}, - { BSM_F_FS_SPECIFIC_4, FCNTL_FS_SPECIFIC_BASE + 4}, - { BSM_F_FS_SPECIFIC_5, FCNTL_FS_SPECIFIC_BASE + 5}, - { BSM_F_FS_SPECIFIC_6, FCNTL_FS_SPECIFIC_BASE + 6}, - { BSM_F_FS_SPECIFIC_7, FCNTL_FS_SPECIFIC_BASE + 7}, - { BSM_F_FS_SPECIFIC_8, FCNTL_FS_SPECIFIC_BASE + 8}, - { BSM_F_FS_SPECIFIC_9, FCNTL_FS_SPECIFIC_BASE + 9}, - { BSM_F_FS_SPECIFIC_10, FCNTL_FS_SPECIFIC_BASE + 10}, - { BSM_F_FS_SPECIFIC_11, FCNTL_FS_SPECIFIC_BASE + 11}, - { BSM_F_FS_SPECIFIC_12, FCNTL_FS_SPECIFIC_BASE + 12}, - { BSM_F_FS_SPECIFIC_13, FCNTL_FS_SPECIFIC_BASE + 13}, - { BSM_F_FS_SPECIFIC_14, FCNTL_FS_SPECIFIC_BASE + 14}, - { BSM_F_FS_SPECIFIC_15, FCNTL_FS_SPECIFIC_BASE + 15}, -#endif /* FCNTL_FS_SPECIFIC_BASE */ -}; -static const int bsm_fcntl_cmd_count = sizeof(bsm_fcntl_cmdtab) / - sizeof(bsm_fcntl_cmdtab[0]); - -static const bsm_fcntl_cmd_t * -bsm_lookup_local_fcntl_cmd(int local_fcntl_cmd) -{ - int i; - - for (i = 0; i < bsm_fcntl_cmd_count; i++) { - if (bsm_fcntl_cmdtab[i].bfc_local_fcntl_cmd == - local_fcntl_cmd) - return (&bsm_fcntl_cmdtab[i]); - } - return (NULL); -} - -u_short -au_fcntl_cmd_to_bsm(int local_fcntl_cmd) -{ - const bsm_fcntl_cmd_t *bfcp; - - bfcp = bsm_lookup_local_fcntl_cmd(local_fcntl_cmd); - if (bfcp == NULL) - return (BSM_F_UNKNOWN); - return (bfcp->bfc_bsm_fcntl_cmd); -} - -static const bsm_fcntl_cmd_t * -bsm_lookup_bsm_fcntl_cmd(u_short bsm_fcntl_cmd) -{ - int i; - - for (i = 0; i < bsm_fcntl_cmd_count; i++) { - if (bsm_fcntl_cmdtab[i].bfc_bsm_fcntl_cmd == - bsm_fcntl_cmd) - return (&bsm_fcntl_cmdtab[i]); - } - return (NULL); -} - -int -au_bsm_to_fcntl_cmd(u_short bsm_fcntl_cmd, int *local_fcntl_cmdp) -{ - const bsm_fcntl_cmd_t *bfcp; - - bfcp = bsm_lookup_bsm_fcntl_cmd(bsm_fcntl_cmd); - if (bfcp == NULL || bfcp->bfc_local_fcntl_cmd) - return (-1); - *local_fcntl_cmdp = bfcp->bfc_local_fcntl_cmd; - return (0); -} Index: security/audit/bsm_domain.c =================================================================== --- security/audit/bsm_domain.c (.../head/sys) (revision 244874) +++ security/audit/bsm_domain.c (.../projects/physbio/sys) (revision 244874) @@ -1,495 +0,0 @@ -/*- - * Copyright (c) 2008 Apple Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Inc. ("Apple") nor the names of - * its contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_domain.c#3 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include - -#include - -#include -#include - -struct bsm_domain { - u_short bd_bsm_domain; - int bd_local_domain; -}; - -#define PF_NO_LOCAL_MAPPING -600 - -static const struct bsm_domain bsm_domains[] = { - { BSM_PF_UNSPEC, PF_UNSPEC }, - { BSM_PF_LOCAL, PF_LOCAL }, - { BSM_PF_INET, PF_INET }, - { BSM_PF_IMPLINK, -#ifdef PF_IMPLINK - PF_IMPLINK -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_PUP, -#ifdef PF_PUP - PF_PUP -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_CHAOS, -#ifdef PF_CHAOS - PF_CHAOS -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_NS, -#ifdef PF_NS - PF_NS -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_NBS, -#ifdef PF_NBS - PF_NBS -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_ECMA, -#ifdef PF_ECMA - PF_ECMA -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_DATAKIT, -#ifdef PF_DATAKIT - PF_DATAKIT -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_CCITT, -#ifdef PF_CCITT - PF_CCITT -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_SNA, PF_SNA }, - { BSM_PF_DECnet, PF_DECnet }, - { BSM_PF_DLI, -#ifdef PF_DLI - PF_DLI -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_LAT, -#ifdef PF_LAT - PF_LAT -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_HYLINK, -#ifdef PF_HYLINK - PF_HYLINK -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_APPLETALK, PF_APPLETALK }, - { BSM_PF_NIT, -#ifdef PF_NIT - PF_NIT -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_802, -#ifdef PF_802 - PF_802 -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_OSI, -#ifdef PF_OSI - PF_OSI -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_X25, -#ifdef PF_X25 - PF_X25 -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_OSINET, -#ifdef PF_OSINET - PF_OSINET -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_GOSIP, -#ifdef PF_GOSIP - PF_GOSIP -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_IPX, PF_IPX }, - { BSM_PF_ROUTE, PF_ROUTE }, - { BSM_PF_LINK, -#ifdef PF_LINK - PF_LINK -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_INET6, PF_INET6 }, - { BSM_PF_KEY, PF_KEY }, - { BSM_PF_NCA, -#ifdef PF_NCA - PF_NCA -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_POLICY, -#ifdef PF_POLICY - PF_POLICY -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_INET_OFFLOAD, -#ifdef PF_INET_OFFLOAD - PF_INET_OFFLOAD -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_NETBIOS, -#ifdef PF_NETBIOS - PF_NETBIOS -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_ISO, -#ifdef PF_ISO - PF_ISO -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_XTP, -#ifdef PF_XTP - PF_XTP -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_COIP, -#ifdef PF_COIP - PF_COIP -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_CNT, -#ifdef PF_CNT - PF_CNT -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_RTIP, -#ifdef PF_RTIP - PF_RTIP -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_SIP, -#ifdef PF_SIP - PF_SIP -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_PIP, -#ifdef PF_PIP - PF_PIP -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_ISDN, -#ifdef PF_ISDN - PF_ISDN -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_E164, -#ifdef PF_E164 - PF_E164 -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_NATM, -#ifdef PF_NATM - PF_NATM -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_ATM, -#ifdef PF_ATM - PF_ATM -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_NETGRAPH, -#ifdef PF_NETGRAPH - PF_NETGRAPH -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_SLOW, -#ifdef PF_SLOW - PF_SLOW -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_SCLUSTER, -#ifdef PF_SCLUSTER - PF_SCLUSTER -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_ARP, -#ifdef PF_ARP - PF_ARP -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_BLUETOOTH, -#ifdef PF_BLUETOOTH - PF_BLUETOOTH -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_AX25, -#ifdef PF_AX25 - PF_AX25 -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_ROSE, -#ifdef PF_ROSE - PF_ROSE -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_NETBEUI, -#ifdef PF_NETBEUI - PF_NETBEUI -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_SECURITY, -#ifdef PF_SECURITY - PF_SECURITY -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_PACKET, -#ifdef PF_PACKET - PF_PACKET -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_ASH, -#ifdef PF_ASH - PF_ASH -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_ECONET, -#ifdef PF_ECONET - PF_ECONET -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_ATMSVC, -#ifdef PF_ATMSVC - PF_ATMSVC -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_IRDA, -#ifdef PF_IRDA - PF_IRDA -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_PPPOX, -#ifdef PF_PPPOX - PF_PPPOX -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_WANPIPE, -#ifdef PF_WANPIPE - PF_WANPIPE -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_LLC, -#ifdef PF_LLC - PF_LLC -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_CAN, -#ifdef PF_CAN - PF_CAN -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_TIPC, -#ifdef PF_TIPC - PF_TIPC -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_IUCV, -#ifdef PF_IUCV - PF_IUCV -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_RXRPC, -#ifdef PF_RXRPC - PF_RXRPC -#else - PF_NO_LOCAL_MAPPING -#endif - }, - { BSM_PF_PHONET, -#ifdef PF_PHONET - PF_PHONET -#else - PF_NO_LOCAL_MAPPING -#endif - }, -}; -static const int bsm_domains_count = sizeof(bsm_domains) / - sizeof(bsm_domains[0]); - -static const struct bsm_domain * -bsm_lookup_local_domain(int local_domain) -{ - int i; - - for (i = 0; i < bsm_domains_count; i++) { - if (bsm_domains[i].bd_local_domain == local_domain) - return (&bsm_domains[i]); - } - return (NULL); -} - -u_short -au_domain_to_bsm(int local_domain) -{ - const struct bsm_domain *bstp; - - bstp = bsm_lookup_local_domain(local_domain); - if (bstp == NULL) - return (BSM_PF_UNKNOWN); - return (bstp->bd_bsm_domain); -} - -static const struct bsm_domain * -bsm_lookup_bsm_domain(u_short bsm_domain) -{ - int i; - - for (i = 0; i < bsm_domains_count; i++) { - if (bsm_domains[i].bd_bsm_domain == bsm_domain) - return (&bsm_domains[i]); - } - return (NULL); -} - -int -au_bsm_to_domain(u_short bsm_domain, int *local_domainp) -{ - const struct bsm_domain *bstp; - - bstp = bsm_lookup_bsm_domain(bsm_domain); - if (bstp == NULL || bstp->bd_local_domain) - return (-1); - *local_domainp = bstp->bd_local_domain; - return (0); -} Index: security/audit/bsm_token.c =================================================================== --- security/audit/bsm_token.c (.../head/sys) (revision 244874) +++ security/audit/bsm_token.c (.../projects/physbio/sys) (revision 244874) @@ -1,1597 +0,0 @@ -/*- - * Copyright (c) 2004-2009 Apple Inc. - * Copyright (c) 2005 SPARTA, Inc. - * All rights reserved. - * - * This code was developed in part by Robert N. M. Watson, Senior Principal - * Scientist, SPARTA, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Inc. ("Apple") nor the names of - * its contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_token.c#99 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include - - -#include -#include -#include -#include -#include - -#define GET_TOKEN_AREA(t, dptr, length) do { \ - t = malloc(sizeof(token_t), M_AUDITBSM, M_WAITOK); \ - t->t_data = malloc(length, M_AUDITBSM, M_WAITOK | M_ZERO); \ - t->len = length; \ - dptr = t->t_data; \ -} while (0) - -/* - * token ID 1 byte - * success/failure 1 byte - * privstrlen 2 bytes - * privstr N bytes + 1 (\0 byte) - */ -token_t * -au_to_upriv(char sorf, char *priv) -{ - u_int16_t textlen; - u_char *dptr; - token_t *t; - - textlen = strlen(priv) + 1; - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_char) + - sizeof(u_int16_t) + textlen); - - ADD_U_CHAR(dptr, AUT_UPRIV); - ADD_U_CHAR(dptr, sorf); - ADD_U_INT16(dptr, textlen); - ADD_STRING(dptr, priv, textlen); - return (t); -} - -/* - * token ID 1 byte - * privtstrlen 2 bytes - * privtstr N bytes + 1 - * privstrlen 2 bytes - * privstr N bytes + 1 - */ -token_t * -au_to_privset(char *privtypestr, char *privstr) -{ - u_int16_t type_len, priv_len; - u_char *dptr; - token_t *t; - - type_len = strlen(privtypestr) + 1; - priv_len = strlen(privstr) + 1; - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int16_t) + - sizeof(u_int16_t) + type_len + priv_len); - - ADD_U_CHAR(dptr, AUT_PRIV); - ADD_U_INT16(dptr, type_len); - ADD_STRING(dptr, privtypestr, type_len); - ADD_U_INT16(dptr, priv_len); - ADD_STRING(dptr, privstr, priv_len); - return (t); -} - -/* - * token ID 1 byte - * argument # 1 byte - * argument value 4 bytes/8 bytes (32-bit/64-bit value) - * text length 2 bytes - * text N bytes + 1 terminating NULL byte - */ -token_t * -au_to_arg32(char n, const char *text, u_int32_t v) -{ - token_t *t; - u_char *dptr = NULL; - u_int16_t textlen; - - textlen = strlen(text); - textlen += 1; - - GET_TOKEN_AREA(t, dptr, 2 * sizeof(u_char) + sizeof(u_int32_t) + - sizeof(u_int16_t) + textlen); - - ADD_U_CHAR(dptr, AUT_ARG32); - ADD_U_CHAR(dptr, n); - ADD_U_INT32(dptr, v); - ADD_U_INT16(dptr, textlen); - ADD_STRING(dptr, text, textlen); - - return (t); -} - -token_t * -au_to_arg64(char n, const char *text, u_int64_t v) -{ - token_t *t; - u_char *dptr = NULL; - u_int16_t textlen; - - textlen = strlen(text); - textlen += 1; - - GET_TOKEN_AREA(t, dptr, 2 * sizeof(u_char) + sizeof(u_int64_t) + - sizeof(u_int16_t) + textlen); - - ADD_U_CHAR(dptr, AUT_ARG64); - ADD_U_CHAR(dptr, n); - ADD_U_INT64(dptr, v); - ADD_U_INT16(dptr, textlen); - ADD_STRING(dptr, text, textlen); - - return (t); -} - -token_t * -au_to_arg(char n, const char *text, u_int32_t v) -{ - - return (au_to_arg32(n, text, v)); -} - -#if defined(_KERNEL) || defined(KERNEL) -/* - * token ID 1 byte - * file access mode 4 bytes - * owner user ID 4 bytes - * owner group ID 4 bytes - * file system ID 4 bytes - * node ID 8 bytes - * device 4 bytes/8 bytes (32-bit/64-bit) - */ -token_t * -au_to_attr32(struct vnode_au_info *vni) -{ - token_t *t; - u_char *dptr = NULL; - u_int16_t pad0_16 = 0; - u_int32_t pad0_32 = 0; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 2 * sizeof(u_int16_t) + - 3 * sizeof(u_int32_t) + sizeof(u_int64_t) + sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_ATTR32); - - /* - * BSD defines the size for the file mode as 2 bytes; BSM defines 4 - * so pad with 0. - * - * XXXRW: Possibly should be conditionally compiled. - * - * XXXRW: Should any conversions take place on the mode? - */ - ADD_U_INT16(dptr, pad0_16); - ADD_U_INT16(dptr, vni->vn_mode); - - ADD_U_INT32(dptr, vni->vn_uid); - ADD_U_INT32(dptr, vni->vn_gid); - ADD_U_INT32(dptr, vni->vn_fsid); - - /* - * Some systems use 32-bit file ID's, others use 64-bit file IDs. - * Attempt to handle both, and let the compiler sort it out. If we - * could pick this out at compile-time, it would be better, so as to - * avoid the else case below. - */ - if (sizeof(vni->vn_fileid) == sizeof(uint32_t)) { - ADD_U_INT32(dptr, pad0_32); - ADD_U_INT32(dptr, vni->vn_fileid); - } else if (sizeof(vni->vn_fileid) == sizeof(uint64_t)) - ADD_U_INT64(dptr, vni->vn_fileid); - else - ADD_U_INT64(dptr, 0LL); - - ADD_U_INT32(dptr, vni->vn_dev); - - return (t); -} - -token_t * -au_to_attr64(struct vnode_au_info *vni) -{ - token_t *t; - u_char *dptr = NULL; - u_int16_t pad0_16 = 0; - u_int32_t pad0_32 = 0; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 2 * sizeof(u_int16_t) + - 3 * sizeof(u_int32_t) + sizeof(u_int64_t) * 2); - - ADD_U_CHAR(dptr, AUT_ATTR64); - - /* - * BSD defines the size for the file mode as 2 bytes; BSM defines 4 - * so pad with 0. - * - * XXXRW: Possibly should be conditionally compiled. - * - * XXXRW: Should any conversions take place on the mode? - */ - ADD_U_INT16(dptr, pad0_16); - ADD_U_INT16(dptr, vni->vn_mode); - - ADD_U_INT32(dptr, vni->vn_uid); - ADD_U_INT32(dptr, vni->vn_gid); - ADD_U_INT32(dptr, vni->vn_fsid); - - /* - * Some systems use 32-bit file ID's, other's use 64-bit file IDs. - * Attempt to handle both, and let the compiler sort it out. If we - * could pick this out at compile-time, it would be better, so as to - * avoid the else case below. - */ - if (sizeof(vni->vn_fileid) == sizeof(uint32_t)) { - ADD_U_INT32(dptr, pad0_32); - ADD_U_INT32(dptr, vni->vn_fileid); - } else if (sizeof(vni->vn_fileid) == sizeof(uint64_t)) - ADD_U_INT64(dptr, vni->vn_fileid); - else - ADD_U_INT64(dptr, 0LL); - - ADD_U_INT64(dptr, vni->vn_dev); - - return (t); -} - -token_t * -au_to_attr(struct vnode_au_info *vni) -{ - - return (au_to_attr32(vni)); -} -#endif /* !(defined(_KERNEL) || defined(KERNEL) */ - -/* - * token ID 1 byte - * how to print 1 byte - * basic unit 1 byte - * unit count 1 byte - * data items (depends on basic unit) - */ -token_t * -au_to_data(char unit_print, char unit_type, char unit_count, const char *p) -{ - token_t *t; - u_char *dptr = NULL; - size_t datasize, totdata; - - /* Determine the size of the basic unit. */ - switch (unit_type) { - case AUR_BYTE: - /* case AUR_CHAR: */ - datasize = AUR_BYTE_SIZE; - break; - - case AUR_SHORT: - datasize = AUR_SHORT_SIZE; - break; - - case AUR_INT32: - /* case AUR_INT: */ - datasize = AUR_INT32_SIZE; - break; - - case AUR_INT64: - datasize = AUR_INT64_SIZE; - break; - - default: - return (NULL); - } - - totdata = datasize * unit_count; - - GET_TOKEN_AREA(t, dptr, 4 * sizeof(u_char) + totdata); - - /* - * XXXRW: We should be byte-swapping each data item for multi-byte - * types. - */ - ADD_U_CHAR(dptr, AUT_DATA); - ADD_U_CHAR(dptr, unit_print); - ADD_U_CHAR(dptr, unit_type); - ADD_U_CHAR(dptr, unit_count); - ADD_MEM(dptr, p, totdata); - - return (t); -} - - -/* - * token ID 1 byte - * status 4 bytes - * return value 4 bytes - */ -token_t * -au_to_exit(int retval, int err) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 2 * sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_EXIT); - ADD_U_INT32(dptr, err); - ADD_U_INT32(dptr, retval); - - return (t); -} - -/* - */ -token_t * -au_to_groups(int *groups) -{ - - return (au_to_newgroups(AUDIT_MAX_GROUPS, (gid_t *)groups)); -} - -/* - * token ID 1 byte - * number groups 2 bytes - * group list count * 4 bytes - */ -token_t * -au_to_newgroups(u_int16_t n, gid_t *groups) -{ - token_t *t; - u_char *dptr = NULL; - int i; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int16_t) + - n * sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_NEWGROUPS); - ADD_U_INT16(dptr, n); - for (i = 0; i < n; i++) - ADD_U_INT32(dptr, groups[i]); - - return (t); -} - -/* - * token ID 1 byte - * internet address 4 bytes - */ -token_t * -au_to_in_addr(struct in_addr *internet_addr) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(uint32_t)); - - ADD_U_CHAR(dptr, AUT_IN_ADDR); - ADD_MEM(dptr, &internet_addr->s_addr, sizeof(uint32_t)); - - return (t); -} - -/* - * token ID 1 byte - * address type/length 4 bytes - * address 16 bytes - */ -token_t * -au_to_in_addr_ex(struct in6_addr *internet_addr) -{ - token_t *t; - u_char *dptr = NULL; - u_int32_t type = AU_IPv6; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 5 * sizeof(uint32_t)); - - ADD_U_CHAR(dptr, AUT_IN_ADDR_EX); - ADD_U_INT32(dptr, type); - ADD_MEM(dptr, internet_addr, 4 * sizeof(uint32_t)); - - return (t); -} - -/* - * token ID 1 byte - * ip header 20 bytes - * - * The IP header should be submitted in network byte order. - */ -token_t * -au_to_ip(struct ip *ip) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(struct ip)); - - ADD_U_CHAR(dptr, AUT_IP); - ADD_MEM(dptr, ip, sizeof(struct ip)); - - return (t); -} - -/* - * token ID 1 byte - * object ID type 1 byte - * object ID 4 bytes - */ -token_t * -au_to_ipc(char type, int id) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, 2 * sizeof(u_char) + sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_IPC); - ADD_U_CHAR(dptr, type); - ADD_U_INT32(dptr, id); - - return (t); -} - -/* - * token ID 1 byte - * owner user ID 4 bytes - * owner group ID 4 bytes - * creator user ID 4 bytes - * creator group ID 4 bytes - * access mode 4 bytes - * slot sequence # 4 bytes - * key 4 bytes - */ -token_t * -au_to_ipc_perm(struct ipc_perm *perm) -{ - token_t *t; - u_char *dptr = NULL; - u_int16_t pad0 = 0; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 12 * sizeof(u_int16_t) + - sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_IPC_PERM); - - /* - * Systems vary significantly in what types they use in struct - * ipc_perm; at least a few still use 16-bit uid's and gid's, so - * allow for that, as BSM define 32-bit values here. - * Some systems define the sizes for ipc_perm members as 2 bytes; - * BSM defines 4 so pad with 0. - * - * XXXRW: Possibly shoulid be conditionally compiled, and more cases - * need to be handled. - */ - if (sizeof(perm->uid) != sizeof(u_int32_t)) { - ADD_U_INT16(dptr, pad0); - ADD_U_INT16(dptr, perm->uid); - ADD_U_INT16(dptr, pad0); - ADD_U_INT16(dptr, perm->gid); - ADD_U_INT16(dptr, pad0); - ADD_U_INT16(dptr, perm->cuid); - ADD_U_INT16(dptr, pad0); - ADD_U_INT16(dptr, perm->cgid); - } else { - ADD_U_INT32(dptr, perm->uid); - ADD_U_INT32(dptr, perm->gid); - ADD_U_INT32(dptr, perm->cuid); - ADD_U_INT32(dptr, perm->cgid); - } - - ADD_U_INT16(dptr, pad0); - ADD_U_INT16(dptr, perm->mode); - - ADD_U_INT16(dptr, pad0); - - ADD_U_INT16(dptr, perm->seq); - - ADD_U_INT32(dptr, perm->key); - - return (t); -} - -/* - * token ID 1 byte - * port IP address 2 bytes - */ -token_t * -au_to_iport(u_int16_t iport) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int16_t)); - - ADD_U_CHAR(dptr, AUT_IPORT); - ADD_U_INT16(dptr, iport); - - return (t); -} - -/* - * token ID 1 byte - * size 2 bytes - * data size bytes - */ -token_t * -au_to_opaque(const char *data, u_int16_t bytes) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int16_t) + bytes); - - ADD_U_CHAR(dptr, AUT_OPAQUE); - ADD_U_INT16(dptr, bytes); - ADD_MEM(dptr, data, bytes); - - return (t); -} - -/* - * token ID 1 byte - * seconds of time 4 bytes - * milliseconds of time 4 bytes - * file name len 2 bytes - * file pathname N bytes + 1 terminating NULL byte - */ -token_t * -au_to_file(const char *file, struct timeval tm) -{ - token_t *t; - u_char *dptr = NULL; - u_int16_t filelen; - u_int32_t timems; - - filelen = strlen(file); - filelen += 1; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 2 * sizeof(u_int32_t) + - sizeof(u_int16_t) + filelen); - - timems = tm.tv_usec/1000; - - ADD_U_CHAR(dptr, AUT_OTHER_FILE32); - ADD_U_INT32(dptr, tm.tv_sec); - ADD_U_INT32(dptr, timems); /* We need time in ms. */ - ADD_U_INT16(dptr, filelen); - ADD_STRING(dptr, file, filelen); - - return (t); -} - -/* - * token ID 1 byte - * text length 2 bytes - * text N bytes + 1 terminating NULL byte - */ -token_t * -au_to_text(const char *text) -{ - token_t *t; - u_char *dptr = NULL; - u_int16_t textlen; - - textlen = strlen(text); - textlen += 1; - - /* XXXRW: Should validate length against token size limit. */ - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int16_t) + textlen); - - ADD_U_CHAR(dptr, AUT_TEXT); - ADD_U_INT16(dptr, textlen); - ADD_STRING(dptr, text, textlen); - - return (t); -} - -/* - * token ID 1 byte - * path length 2 bytes - * path N bytes + 1 terminating NULL byte - */ -token_t * -au_to_path(const char *text) -{ - token_t *t; - u_char *dptr = NULL; - u_int16_t textlen; - - textlen = strlen(text); - textlen += 1; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int16_t) + textlen); - - ADD_U_CHAR(dptr, AUT_PATH); - ADD_U_INT16(dptr, textlen); - ADD_STRING(dptr, text, textlen); - - return (t); -} - -/* - * token ID 1 byte - * audit ID 4 bytes - * effective user ID 4 bytes - * effective group ID 4 bytes - * real user ID 4 bytes - * real group ID 4 bytes - * process ID 4 bytes - * session ID 4 bytes - * terminal ID - * port ID 4 bytes/8 bytes (32-bit/64-bit value) - * machine address 4 bytes - */ -token_t * -au_to_process32(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, - pid_t pid, au_asid_t sid, au_tid_t *tid) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 9 * sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_PROCESS32); - ADD_U_INT32(dptr, auid); - ADD_U_INT32(dptr, euid); - ADD_U_INT32(dptr, egid); - ADD_U_INT32(dptr, ruid); - ADD_U_INT32(dptr, rgid); - ADD_U_INT32(dptr, pid); - ADD_U_INT32(dptr, sid); - ADD_U_INT32(dptr, tid->port); - - /* - * Note: Solaris will write out IPv6 addresses here as a 32-bit - * address type and 16 bytes of address, but for IPv4 addresses it - * simply writes the 4-byte address directly. We support only IPv4 - * addresses for process32 tokens. - */ - ADD_MEM(dptr, &tid->machine, sizeof(u_int32_t)); - - return (t); -} - -token_t * -au_to_process64(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, - pid_t pid, au_asid_t sid, au_tid_t *tid) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 8 * sizeof(u_int32_t) + - sizeof(u_int64_t)); - - ADD_U_CHAR(dptr, AUT_PROCESS64); - ADD_U_INT32(dptr, auid); - ADD_U_INT32(dptr, euid); - ADD_U_INT32(dptr, egid); - ADD_U_INT32(dptr, ruid); - ADD_U_INT32(dptr, rgid); - ADD_U_INT32(dptr, pid); - ADD_U_INT32(dptr, sid); - ADD_U_INT64(dptr, tid->port); - - /* - * Note: Solaris will write out IPv6 addresses here as a 32-bit - * address type and 16 bytes of address, but for IPv4 addresses it - * simply writes the 4-byte address directly. We support only IPv4 - * addresses for process64 tokens. - */ - ADD_MEM(dptr, &tid->machine, sizeof(u_int32_t)); - - return (t); -} - -token_t * -au_to_process(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, - pid_t pid, au_asid_t sid, au_tid_t *tid) -{ - - return (au_to_process32(auid, euid, egid, ruid, rgid, pid, sid, - tid)); -} - -/* - * token ID 1 byte - * audit ID 4 bytes - * effective user ID 4 bytes - * effective group ID 4 bytes - * real user ID 4 bytes - * real group ID 4 bytes - * process ID 4 bytes - * session ID 4 bytes - * terminal ID - * port ID 4 bytes/8 bytes (32-bit/64-bit value) - * address type-len 4 bytes - * machine address 16 bytes - */ -token_t * -au_to_process32_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) -{ - token_t *t; - u_char *dptr = NULL; - - KASSERT((tid->at_type == AU_IPv4) || (tid->at_type == AU_IPv6), - ("au_to_process32_ex: type %u", (unsigned int)tid->at_type)); - if (tid->at_type == AU_IPv4) - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + - 10 * sizeof(u_int32_t)); - else - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + - 13 * sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_PROCESS32_EX); - ADD_U_INT32(dptr, auid); - ADD_U_INT32(dptr, euid); - ADD_U_INT32(dptr, egid); - ADD_U_INT32(dptr, ruid); - ADD_U_INT32(dptr, rgid); - ADD_U_INT32(dptr, pid); - ADD_U_INT32(dptr, sid); - ADD_U_INT32(dptr, tid->at_port); - ADD_U_INT32(dptr, tid->at_type); - ADD_MEM(dptr, &tid->at_addr[0], sizeof(u_int32_t)); - if (tid->at_type == AU_IPv6) { - ADD_MEM(dptr, &tid->at_addr[1], sizeof(u_int32_t)); - ADD_MEM(dptr, &tid->at_addr[2], sizeof(u_int32_t)); - ADD_MEM(dptr, &tid->at_addr[3], sizeof(u_int32_t)); - } - - return (t); -} - -token_t * -au_to_process64_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) -{ - token_t *t; - u_char *dptr = NULL; - - if (tid->at_type == AU_IPv4) - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + - 7 * sizeof(u_int32_t) + sizeof(u_int64_t) + - 2 * sizeof(u_int32_t)); - else if (tid->at_type == AU_IPv6) - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + - 7 * sizeof(u_int32_t) + sizeof(u_int64_t) + - 5 * sizeof(u_int32_t)); - else - panic("au_to_process64_ex: invalidate at_type (%d)", - tid->at_type); - - ADD_U_CHAR(dptr, AUT_PROCESS64_EX); - ADD_U_INT32(dptr, auid); - ADD_U_INT32(dptr, euid); - ADD_U_INT32(dptr, egid); - ADD_U_INT32(dptr, ruid); - ADD_U_INT32(dptr, rgid); - ADD_U_INT32(dptr, pid); - ADD_U_INT32(dptr, sid); - ADD_U_INT64(dptr, tid->at_port); - ADD_U_INT32(dptr, tid->at_type); - ADD_MEM(dptr, &tid->at_addr[0], sizeof(u_int32_t)); - if (tid->at_type == AU_IPv6) { - ADD_MEM(dptr, &tid->at_addr[1], sizeof(u_int32_t)); - ADD_MEM(dptr, &tid->at_addr[2], sizeof(u_int32_t)); - ADD_MEM(dptr, &tid->at_addr[3], sizeof(u_int32_t)); - } - - return (t); -} - -token_t * -au_to_process_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) -{ - - return (au_to_process32_ex(auid, euid, egid, ruid, rgid, pid, sid, - tid)); -} - -/* - * token ID 1 byte - * error status 1 byte - * return value 4 bytes/8 bytes (32-bit/64-bit value) - */ -token_t * -au_to_return32(char status, u_int32_t ret) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, 2 * sizeof(u_char) + sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_RETURN32); - ADD_U_CHAR(dptr, status); - ADD_U_INT32(dptr, ret); - - return (t); -} - -token_t * -au_to_return64(char status, u_int64_t ret) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, 2 * sizeof(u_char) + sizeof(u_int64_t)); - - ADD_U_CHAR(dptr, AUT_RETURN64); - ADD_U_CHAR(dptr, status); - ADD_U_INT64(dptr, ret); - - return (t); -} - -token_t * -au_to_return(char status, u_int32_t ret) -{ - - return (au_to_return32(status, ret)); -} - -/* - * token ID 1 byte - * sequence number 4 bytes - */ -token_t * -au_to_seq(long audit_count) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_SEQ); - ADD_U_INT32(dptr, audit_count); - - return (t); -} - -/* - * token ID 1 byte - * socket domain 2 bytes - * socket type 2 bytes - * address type 2 byte - * local port 2 bytes - * local address 4 bytes/16 bytes (IPv4/IPv6 address) - * remote port 2 bytes - * remote address 4 bytes/16 bytes (IPv4/IPv6 address) - * - * Domain and type arguments to this routine are assumed to already have been - * converted to the BSM constant space, so we don't do that here. - */ -token_t * -au_to_socket_ex(u_short so_domain, u_short so_type, - struct sockaddr *sa_local, struct sockaddr *sa_remote) -{ - token_t *t; - u_char *dptr = NULL; - struct sockaddr_in *sin; - struct sockaddr_in6 *sin6; - - if (so_domain == AF_INET) - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + - 5 * sizeof(u_int16_t) + 2 * sizeof(u_int32_t)); - else if (so_domain == AF_INET6) - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + - 5 * sizeof(u_int16_t) + 8 * sizeof(u_int32_t)); - else - return (NULL); - - ADD_U_CHAR(dptr, AUT_SOCKET_EX); - ADD_U_INT16(dptr, au_domain_to_bsm(so_domain)); - ADD_U_INT16(dptr, au_socket_type_to_bsm(so_type)); - if (so_domain == AF_INET) { - ADD_U_INT16(dptr, AU_IPv4); - sin = (struct sockaddr_in *)sa_local; - ADD_MEM(dptr, &sin->sin_port, sizeof(uint16_t)); - ADD_MEM(dptr, &sin->sin_addr.s_addr, sizeof(uint32_t)); - sin = (struct sockaddr_in *)sa_remote; - ADD_MEM(dptr, &sin->sin_port, sizeof(uint16_t)); - ADD_MEM(dptr, &sin->sin_addr.s_addr, sizeof(uint32_t)); - } else { - ADD_U_INT16(dptr, AU_IPv6); - sin6 = (struct sockaddr_in6 *)sa_local; - ADD_MEM(dptr, &sin6->sin6_port, sizeof(uint16_t)); - ADD_MEM(dptr, &sin6->sin6_addr, 4 * sizeof(uint32_t)); - sin6 = (struct sockaddr_in6 *)sa_remote; - ADD_MEM(dptr, &sin6->sin6_port, sizeof(uint16_t)); - ADD_MEM(dptr, &sin6->sin6_addr, 4 * sizeof(uint32_t)); - } - - return (t); -} - -/* - * Kernel-specific version of the above function. - * - * XXXRW: Should now use au_to_socket_ex() here. - */ -#ifdef _KERNEL -token_t * -kau_to_socket(struct socket_au_info *soi) -{ - token_t *t; - u_char *dptr; - u_int16_t so_type; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 2 * sizeof(u_int16_t) + - sizeof(u_int32_t) + sizeof(u_int16_t) + sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_SOCKET); - /* Coerce the socket type into a short value */ - so_type = soi->so_type; - ADD_U_INT16(dptr, so_type); - ADD_U_INT16(dptr, soi->so_lport); - ADD_U_INT32(dptr, soi->so_laddr); - ADD_U_INT16(dptr, soi->so_rport); - ADD_U_INT32(dptr, soi->so_raddr); - - return (t); -} -#endif - -/* - * token ID 1 byte - * socket family 2 bytes - * path (up to) 104 bytes + NULL (NULL terminated string) - */ -token_t * -au_to_sock_unix(struct sockaddr_un *so) -{ - token_t *t; - u_char *dptr; - - GET_TOKEN_AREA(t, dptr, 3 * sizeof(u_char) + strlen(so->sun_path) + 1); - - ADD_U_CHAR(dptr, AUT_SOCKUNIX); - /* BSM token has two bytes for family */ - ADD_U_CHAR(dptr, 0); - ADD_U_CHAR(dptr, so->sun_family); - ADD_STRING(dptr, so->sun_path, strlen(so->sun_path) + 1); - - return (t); -} - -/* - * token ID 1 byte - * socket family 2 bytes - * local port 2 bytes - * socket address 4 bytes - */ -token_t * -au_to_sock_inet32(struct sockaddr_in *so) -{ - token_t *t; - u_char *dptr = NULL; - uint16_t family; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 2 * sizeof(uint16_t) + - sizeof(uint32_t)); - - ADD_U_CHAR(dptr, AUT_SOCKINET32); - /* - * BSM defines the family field as 16 bits, but many operating - * systems have an 8-bit sin_family field. Extend to 16 bits before - * writing into the token. Assume that both the port and the address - * in the sockaddr_in are already in network byte order, but family - * is in local byte order. - * - * XXXRW: Should a name space conversion be taking place on the value - * of sin_family? - */ - family = so->sin_family; - ADD_U_INT16(dptr, family); - ADD_MEM(dptr, &so->sin_port, sizeof(uint16_t)); - ADD_MEM(dptr, &so->sin_addr.s_addr, sizeof(uint32_t)); - - return (t); -} - -token_t * -au_to_sock_inet128(struct sockaddr_in6 *so) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, 3 * sizeof(u_char) + sizeof(u_int16_t) + - 4 * sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_SOCKINET128); - /* - * In BSD, sin6_family is one octet, but BSM defines the token to - * store two. So we copy in a 0 first. XXXRW: Possibly should be - * conditionally compiled. - */ - ADD_U_CHAR(dptr, 0); - ADD_U_CHAR(dptr, so->sin6_family); - - ADD_U_INT16(dptr, so->sin6_port); - ADD_MEM(dptr, &so->sin6_addr, 4 * sizeof(uint32_t)); - - return (t); -} - -token_t * -au_to_sock_inet(struct sockaddr_in *so) -{ - - return (au_to_sock_inet32(so)); -} - -/* - * token ID 1 byte - * audit ID 4 bytes - * effective user ID 4 bytes - * effective group ID 4 bytes - * real user ID 4 bytes - * real group ID 4 bytes - * process ID 4 bytes - * session ID 4 bytes - * terminal ID - * port ID 4 bytes/8 bytes (32-bit/64-bit value) - * machine address 4 bytes - */ -token_t * -au_to_subject32(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, - pid_t pid, au_asid_t sid, au_tid_t *tid) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 9 * sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_SUBJECT32); - ADD_U_INT32(dptr, auid); - ADD_U_INT32(dptr, euid); - ADD_U_INT32(dptr, egid); - ADD_U_INT32(dptr, ruid); - ADD_U_INT32(dptr, rgid); - ADD_U_INT32(dptr, pid); - ADD_U_INT32(dptr, sid); - ADD_U_INT32(dptr, tid->port); - ADD_MEM(dptr, &tid->machine, sizeof(u_int32_t)); - - return (t); -} - -token_t * -au_to_subject64(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, - pid_t pid, au_asid_t sid, au_tid_t *tid) -{ - token_t *t; - u_char *dptr = NULL; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 7 * sizeof(u_int32_t) + - sizeof(u_int64_t) + sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_SUBJECT64); - ADD_U_INT32(dptr, auid); - ADD_U_INT32(dptr, euid); - ADD_U_INT32(dptr, egid); - ADD_U_INT32(dptr, ruid); - ADD_U_INT32(dptr, rgid); - ADD_U_INT32(dptr, pid); - ADD_U_INT32(dptr, sid); - ADD_U_INT64(dptr, tid->port); - ADD_MEM(dptr, &tid->machine, sizeof(u_int32_t)); - - return (t); -} - -token_t * -au_to_subject(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, - pid_t pid, au_asid_t sid, au_tid_t *tid) -{ - - return (au_to_subject32(auid, euid, egid, ruid, rgid, pid, sid, - tid)); -} - -/* - * token ID 1 byte - * audit ID 4 bytes - * effective user ID 4 bytes - * effective group ID 4 bytes - * real user ID 4 bytes - * real group ID 4 bytes - * process ID 4 bytes - * session ID 4 bytes - * terminal ID - * port ID 4 bytes/8 bytes (32-bit/64-bit value) - * address type/length 4 bytes - * machine address 16 bytes - */ -token_t * -au_to_subject32_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) -{ - token_t *t; - u_char *dptr = NULL; - - KASSERT((tid->at_type == AU_IPv4) || (tid->at_type == AU_IPv6), - ("au_to_subject32_ex: type %u", (unsigned int)tid->at_type)); - - if (tid->at_type == AU_IPv4) - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 10 * - sizeof(u_int32_t)); - else - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 13 * - sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_SUBJECT32_EX); - ADD_U_INT32(dptr, auid); - ADD_U_INT32(dptr, euid); - ADD_U_INT32(dptr, egid); - ADD_U_INT32(dptr, ruid); - ADD_U_INT32(dptr, rgid); - ADD_U_INT32(dptr, pid); - ADD_U_INT32(dptr, sid); - ADD_U_INT32(dptr, tid->at_port); - ADD_U_INT32(dptr, tid->at_type); - if (tid->at_type == AU_IPv6) - ADD_MEM(dptr, &tid->at_addr[0], 4 * sizeof(u_int32_t)); - else - ADD_MEM(dptr, &tid->at_addr[0], sizeof(u_int32_t)); - - return (t); -} - -token_t * -au_to_subject64_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) -{ - token_t *t; - u_char *dptr = NULL; - - KASSERT((tid->at_type == AU_IPv4) || (tid->at_type == AU_IPv6), - ("au_to_subject64_ex: type %u", (unsigned int)tid->at_type)); - - if (tid->at_type == AU_IPv4) - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + - 7 * sizeof(u_int32_t) + sizeof(u_int64_t) + - 2 * sizeof(u_int32_t)); - else - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + - 7 * sizeof(u_int32_t) + sizeof(u_int64_t) + - 5 * sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_SUBJECT64_EX); - ADD_U_INT32(dptr, auid); - ADD_U_INT32(dptr, euid); - ADD_U_INT32(dptr, egid); - ADD_U_INT32(dptr, ruid); - ADD_U_INT32(dptr, rgid); - ADD_U_INT32(dptr, pid); - ADD_U_INT32(dptr, sid); - ADD_U_INT64(dptr, tid->at_port); - ADD_U_INT32(dptr, tid->at_type); - if (tid->at_type == AU_IPv6) - ADD_MEM(dptr, &tid->at_addr[0], 4 * sizeof(u_int32_t)); - else - ADD_MEM(dptr, &tid->at_addr[0], sizeof(u_int32_t)); - - return (t); -} - -token_t * -au_to_subject_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) -{ - - return (au_to_subject32_ex(auid, euid, egid, ruid, rgid, pid, sid, - tid)); -} - -#if !defined(_KERNEL) && !defined(KERNEL) && defined(HAVE_AUDIT_SYSCALLS) -/* - * Collects audit information for the current process and creates a subject - * token from it. - */ -token_t * -au_to_me(void) -{ - auditinfo_t auinfo; - auditinfo_addr_t aia; - - /* - * Try to use getaudit_addr(2) first. If this kernel does not support - * it, then fall back on to getaudit(2). - */ - if (getaudit_addr(&aia, sizeof(aia)) != 0) { - if (errno == ENOSYS) { - if (getaudit(&auinfo) != 0) - return (NULL); - return (au_to_subject32(auinfo.ai_auid, geteuid(), - getegid(), getuid(), getgid(), getpid(), - auinfo.ai_asid, &auinfo.ai_termid)); - } else { - /* getaudit_addr(2) failed for some other reason. */ - return (NULL); - } - } - - return (au_to_subject32_ex(aia.ai_auid, geteuid(), getegid(), getuid(), - getgid(), getpid(), aia.ai_asid, &aia.ai_termid)); -} -#endif - -#if defined(_KERNEL) || defined(KERNEL) -static token_t * -au_to_exec_strings(char *strs, int count, u_char type) -{ - token_t *t; - u_char *dptr = NULL; - u_int32_t totlen; - int ctr; - char *p; - - totlen = 0; - ctr = count; - p = strs; - while (ctr-- > 0) { - totlen += strlen(p) + 1; - p = strs + totlen; - } - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int32_t) + totlen); - ADD_U_CHAR(dptr, type); - ADD_U_INT32(dptr, count); - ADD_STRING(dptr, strs, totlen); - - return (t); -} - -/* - * token ID 1 byte - * count 4 bytes - * text count null-terminated strings - */ -token_t * -au_to_exec_args(char *args, int argc) -{ - - return (au_to_exec_strings(args, argc, AUT_EXEC_ARGS)); -} - -/* - * token ID 1 byte - * count 4 bytes - * text count null-terminated strings - */ -token_t * -au_to_exec_env(char *envs, int envc) -{ - - return (au_to_exec_strings(envs, envc, AUT_EXEC_ENV)); -} -#else -/* - * token ID 1 byte - * count 4 bytes - * text count null-terminated strings - */ -token_t * -au_to_exec_args(char **argv) -{ - token_t *t; - u_char *dptr = NULL; - const char *nextarg; - int i, count = 0; - size_t totlen = 0; - - nextarg = *argv; - - while (nextarg != NULL) { - int nextlen; - - nextlen = strlen(nextarg); - totlen += nextlen + 1; - count++; - nextarg = *(argv + count); - } - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int32_t) + totlen); - - ADD_U_CHAR(dptr, AUT_EXEC_ARGS); - ADD_U_INT32(dptr, count); - - for (i = 0; i < count; i++) { - nextarg = *(argv + i); - ADD_MEM(dptr, nextarg, strlen(nextarg) + 1); - } - - return (t); -} - -/* - * token ID 1 byte - * count 4 bytes - * text count null-terminated strings - */ -token_t * -au_to_exec_env(char **envp) -{ - token_t *t; - u_char *dptr = NULL; - int i, count = 0; - size_t totlen = 0; - const char *nextenv; - - nextenv = *envp; - - while (nextenv != NULL) { - int nextlen; - - nextlen = strlen(nextenv); - totlen += nextlen + 1; - count++; - nextenv = *(envp + count); - } - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int32_t) + totlen); - - ADD_U_CHAR(dptr, AUT_EXEC_ENV); - ADD_U_INT32(dptr, count); - - for (i = 0; i < count; i++) { - nextenv = *(envp + i); - ADD_MEM(dptr, nextenv, strlen(nextenv) + 1); - } - - return (t); -} -#endif - -/* - * token ID 1 byte - * zonename length 2 bytes - * zonename N bytes + 1 terminating NULL byte - */ -token_t * -au_to_zonename(const char *zonename) -{ - u_char *dptr = NULL; - u_int16_t textlen; - token_t *t; - - textlen = strlen(zonename) + 1; - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int16_t) + textlen); - - ADD_U_CHAR(dptr, AUT_ZONENAME); - ADD_U_INT16(dptr, textlen); - ADD_STRING(dptr, zonename, textlen); - return (t); -} - -/* - * token ID 1 byte - * record byte count 4 bytes - * version # 1 byte [2] - * event type 2 bytes - * event modifier 2 bytes - * seconds of time 4 bytes/8 bytes (32-bit/64-bit value) - * milliseconds of time 4 bytes/8 bytes (32-bit/64-bit value) - */ -token_t * -au_to_header32_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, - struct timeval tm) -{ - token_t *t; - u_char *dptr = NULL; - u_int32_t timems; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int32_t) + - sizeof(u_char) + 2 * sizeof(u_int16_t) + 2 * sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_HEADER32); - ADD_U_INT32(dptr, rec_size); - ADD_U_CHAR(dptr, AUDIT_HEADER_VERSION_OPENBSM); - ADD_U_INT16(dptr, e_type); - ADD_U_INT16(dptr, e_mod); - - timems = tm.tv_usec/1000; - /* Add the timestamp */ - ADD_U_INT32(dptr, tm.tv_sec); - ADD_U_INT32(dptr, timems); /* We need time in ms. */ - - return (t); -} - -/* - * token ID 1 byte - * record byte count 4 bytes - * version # 1 byte [2] - * event type 2 bytes - * event modifier 2 bytes - * address type/length 4 bytes - * machine address 4 bytes/16 bytes (IPv4/IPv6 address) - * seconds of time 4 bytes/8 bytes (32-bit/64-bit value) - * milliseconds of time 4 bytes/8 bytes (32-bit/64-bit value) - */ -token_t * -au_to_header32_ex_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, - struct timeval tm, struct auditinfo_addr *aia) -{ - token_t *t; - u_char *dptr = NULL; - u_int32_t timems; - au_tid_addr_t *tid; - - tid = &aia->ai_termid; - KASSERT(tid->at_type == AU_IPv4 || tid->at_type == AU_IPv6, - ("au_to_header32_ex_tm: invalid address family")); - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int32_t) + - sizeof(u_char) + 2 * sizeof(u_int16_t) + 3 * - sizeof(u_int32_t) + tid->at_type); - - ADD_U_CHAR(dptr, AUT_HEADER32_EX); - ADD_U_INT32(dptr, rec_size); - ADD_U_CHAR(dptr, AUDIT_HEADER_VERSION_OPENBSM); - ADD_U_INT16(dptr, e_type); - ADD_U_INT16(dptr, e_mod); - - ADD_U_INT32(dptr, tid->at_type); - if (tid->at_type == AU_IPv6) - ADD_MEM(dptr, &tid->at_addr[0], 4 * sizeof(u_int32_t)); - else - ADD_MEM(dptr, &tid->at_addr[0], sizeof(u_int32_t)); - timems = tm.tv_usec/1000; - /* Add the timestamp */ - ADD_U_INT32(dptr, tm.tv_sec); - ADD_U_INT32(dptr, timems); /* We need time in ms. */ - - return (t); -} - -token_t * -au_to_header64_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, - struct timeval tm) -{ - token_t *t; - u_char *dptr = NULL; - u_int32_t timems; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int32_t) + - sizeof(u_char) + 2 * sizeof(u_int16_t) + 2 * sizeof(u_int64_t)); - - ADD_U_CHAR(dptr, AUT_HEADER64); - ADD_U_INT32(dptr, rec_size); - ADD_U_CHAR(dptr, AUDIT_HEADER_VERSION_OPENBSM); - ADD_U_INT16(dptr, e_type); - ADD_U_INT16(dptr, e_mod); - - timems = tm.tv_usec/1000; - /* Add the timestamp */ - ADD_U_INT64(dptr, tm.tv_sec); - ADD_U_INT64(dptr, timems); /* We need time in ms. */ - - return (t); -} - -#if !defined(KERNEL) && !defined(_KERNEL) -#ifdef HAVE_AUDIT_SYSCALLS -token_t * -au_to_header32_ex(int rec_size, au_event_t e_type, au_emod_t e_mod) -{ - struct timeval tm; - struct auditinfo_addr aia; - - if (gettimeofday(&tm, NULL) == -1) - return (NULL); - if (audit_get_kaudit(&aia, sizeof(aia)) != 0) { - if (errno != ENOSYS) - return (NULL); - return (au_to_header32_tm(rec_size, e_type, e_mod, tm)); - } - return (au_to_header32_ex_tm(rec_size, e_type, e_mod, tm, &aia)); -} -#endif /* HAVE_AUDIT_SYSCALLS */ - -token_t * -au_to_header32(int rec_size, au_event_t e_type, au_emod_t e_mod) -{ - struct timeval tm; - - if (gettimeofday(&tm, NULL) == -1) - return (NULL); - return (au_to_header32_tm(rec_size, e_type, e_mod, tm)); -} - -token_t * -au_to_header64(__unused int rec_size, __unused au_event_t e_type, - __unused au_emod_t e_mod) -{ - struct timeval tm; - - if (gettimeofday(&tm, NULL) == -1) - return (NULL); - return (au_to_header64_tm(rec_size, e_type, e_mod, tm)); -} - -token_t * -au_to_header(int rec_size, au_event_t e_type, au_emod_t e_mod) -{ - - return (au_to_header32(rec_size, e_type, e_mod)); -} - -#ifdef HAVE_AUDIT_SYSCALLS -token_t * -au_to_header_ex(int rec_size, au_event_t e_type, au_emod_t e_mod) -{ - - return (au_to_header32_ex(rec_size, e_type, e_mod)); -} -#endif /* HAVE_AUDIT_SYSCALLS */ -#endif /* !defined(KERNEL) && !defined(_KERNEL) */ - -/* - * token ID 1 byte - * trailer magic number 2 bytes - * record byte count 4 bytes - */ -token_t * -au_to_trailer(int rec_size) -{ - token_t *t; - u_char *dptr = NULL; - u_int16_t magic = AUT_TRAILER_MAGIC; - - GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int16_t) + - sizeof(u_int32_t)); - - ADD_U_CHAR(dptr, AUT_TRAILER); - ADD_U_INT16(dptr, magic); - ADD_U_INT32(dptr, rec_size); - - return (t); -} Index: security/audit/bsm_errno.c =================================================================== --- security/audit/bsm_errno.c (.../head/sys) (revision 244874) +++ security/audit/bsm_errno.c (.../projects/physbio/sys) (revision 244874) @@ -1,775 +0,0 @@ -/*- - * Copyright (c) 2008 Apple Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Inc. ("Apple") nor the names of - * its contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_errno.c#22 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include - -#include - -#include -#include - -#include - -/* - * Different operating systems use different numeric constants for different - * error numbers, and sometimes error numbers don't exist in more than one - * operating system. These routines convert between BSM and local error - * number spaces, subject to the above realities. BSM error numbers are - * stored in a single 8-bit character, so don't have a byte order. - * - * Don't include string definitions when this code is compiled into a kernel. - */ -struct bsm_errno { - int be_bsm_errno; - int be_local_errno; -#if !defined(KERNEL) && !defined(_KERNEL) - const char *be_strerror; -#endif -}; - -#define ERRNO_NO_LOCAL_MAPPING -600 - -#if !defined(KERNEL) && !defined(_KERNEL) -#define ES(x) x -#else -#define ES(x) -#endif - -/* - * Mapping table -- please maintain in numeric sorted order with respect to - * the BSM constant. Today we do a linear lookup, but could switch to a - * binary search if it makes sense. We only ifdef errors that aren't - * generally available, but it does make the table a lot more ugly. - * - * XXXRW: It would be nice to have a similar ordered table mapping to BSM - * constant from local constant, but the order of local constants varies by - * OS. Really we need to build that table at compile-time but don't do that - * yet. - * - * XXXRW: We currently embed English-language error strings here, but should - * support catalogues; these are only used if the OS doesn't have an error - * string using strerror(3). - */ -static const struct bsm_errno bsm_errnos[] = { - { BSM_ERRNO_ESUCCESS, 0, ES("Success") }, - { BSM_ERRNO_EPERM, EPERM, ES("Operation not permitted") }, - { BSM_ERRNO_ENOENT, ENOENT, ES("No such file or directory") }, - { BSM_ERRNO_ESRCH, ESRCH, ES("No such process") }, - { BSM_ERRNO_EINTR, EINTR, ES("Interrupted system call") }, - { BSM_ERRNO_EIO, EIO, ES("Input/output error") }, - { BSM_ERRNO_ENXIO, ENXIO, ES("Device not configured") }, - { BSM_ERRNO_E2BIG, E2BIG, ES("Argument list too long") }, - { BSM_ERRNO_ENOEXEC, ENOEXEC, ES("Exec format error") }, - { BSM_ERRNO_EBADF, EBADF, ES("Bad file descriptor") }, - { BSM_ERRNO_ECHILD, ECHILD, ES("No child processes") }, - { BSM_ERRNO_EAGAIN, EAGAIN, ES("Resource temporarily unavailable") }, - { BSM_ERRNO_ENOMEM, ENOMEM, ES("Cannot allocate memory") }, - { BSM_ERRNO_EACCES, EACCES, ES("Permission denied") }, - { BSM_ERRNO_EFAULT, EFAULT, ES("Bad address") }, - { BSM_ERRNO_ENOTBLK, ENOTBLK, ES("Block device required") }, - { BSM_ERRNO_EBUSY, EBUSY, ES("Device busy") }, - { BSM_ERRNO_EEXIST, EEXIST, ES("File exists") }, - { BSM_ERRNO_EXDEV, EXDEV, ES("Cross-device link") }, - { BSM_ERRNO_ENODEV, ENODEV, ES("Operation not supported by device") }, - { BSM_ERRNO_ENOTDIR, ENOTDIR, ES("Not a directory") }, - { BSM_ERRNO_EISDIR, EISDIR, ES("Is a directory") }, - { BSM_ERRNO_EINVAL, EINVAL, ES("Invalid argument") }, - { BSM_ERRNO_ENFILE, ENFILE, ES("Too many open files in system") }, - { BSM_ERRNO_EMFILE, EMFILE, ES("Too many open files") }, - { BSM_ERRNO_ENOTTY, ENOTTY, ES("Inappropriate ioctl for device") }, - { BSM_ERRNO_ETXTBSY, ETXTBSY, ES("Text file busy") }, - { BSM_ERRNO_EFBIG, EFBIG, ES("File too large") }, - { BSM_ERRNO_ENOSPC, ENOSPC, ES("No space left on device") }, - { BSM_ERRNO_ESPIPE, ESPIPE, ES("Illegal seek") }, - { BSM_ERRNO_EROFS, EROFS, ES("Read-only file system") }, - { BSM_ERRNO_EMLINK, EMLINK, ES("Too many links") }, - { BSM_ERRNO_EPIPE, EPIPE, ES("Broken pipe") }, - { BSM_ERRNO_EDOM, EDOM, ES("Numerical argument out of domain") }, - { BSM_ERRNO_ERANGE, ERANGE, ES("Result too large") }, - { BSM_ERRNO_ENOMSG, ENOMSG, ES("No message of desired type") }, - { BSM_ERRNO_EIDRM, EIDRM, ES("Identifier removed") }, - { BSM_ERRNO_ECHRNG, -#ifdef ECHRNG - ECHRNG, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Channel number out of range") }, - { BSM_ERRNO_EL2NSYNC, -#ifdef EL2NSYNC - EL2NSYNC, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Level 2 not synchronized") }, - { BSM_ERRNO_EL3HLT, -#ifdef EL3HLT - EL3HLT, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Level 3 halted") }, - { BSM_ERRNO_EL3RST, -#ifdef EL3RST - EL3RST, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Level 3 reset") }, - { BSM_ERRNO_ELNRNG, -#ifdef ELNRNG - ELNRNG, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Link number out of range") }, - { BSM_ERRNO_EUNATCH, -#ifdef EUNATCH - EUNATCH, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Protocol driver not attached") }, - { BSM_ERRNO_ENOCSI, -#ifdef ENOCSI - ENOCSI, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("No CSI structure available") }, - { BSM_ERRNO_EL2HLT, -#ifdef EL2HLT - EL2HLT, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Level 2 halted") }, - { BSM_ERRNO_EDEADLK, EDEADLK, ES("Resource deadlock avoided") }, - { BSM_ERRNO_ENOLCK, ENOLCK, ES("No locks available") }, - { BSM_ERRNO_ECANCELED, ECANCELED, ES("Operation canceled") }, - { BSM_ERRNO_ENOTSUP, ENOTSUP, ES("Operation not supported") }, - { BSM_ERRNO_EDQUOT, EDQUOT, ES("Disc quota exceeded") }, - { BSM_ERRNO_EBADE, -#ifdef EBADE - EBADE, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Invalid exchange") }, - { BSM_ERRNO_EBADR, -#ifdef EBADR - EBADR, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Invalid request descriptor") }, - { BSM_ERRNO_EXFULL, -#ifdef EXFULL - EXFULL, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Exchange full") }, - { BSM_ERRNO_ENOANO, -#ifdef ENOANO - ENOANO, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("No anode") }, - { BSM_ERRNO_EBADRQC, -#ifdef EBADRQC - EBADRQC, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Invalid request descriptor") }, - { BSM_ERRNO_EBADSLT, -#ifdef EBADSLT - EBADSLT, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Invalid slot") }, - { BSM_ERRNO_EDEADLOCK, -#ifdef EDEADLOCK - EDEADLOCK, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Resource deadlock avoided") }, - { BSM_ERRNO_EBFONT, -#ifdef EBFONT - EBFONT, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Bad font file format") }, - { BSM_ERRNO_EOWNERDEAD, -#ifdef EOWNERDEAD - EOWNERDEAD, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Process died with the lock") }, - { BSM_ERRNO_ENOTRECOVERABLE, -#ifdef ENOTRECOVERABLE - ENOTRECOVERABLE, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Lock is not recoverable") }, - { BSM_ERRNO_ENOSTR, -#ifdef ENOSTR - ENOSTR, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Device not a stream") }, - { BSM_ERRNO_ENONET, -#ifdef ENONET - ENONET, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Machine is not on the network") }, - { BSM_ERRNO_ENOPKG, -#ifdef ENOPKG - ENOPKG, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Package not installed") }, - { BSM_ERRNO_EREMOTE, EREMOTE, - ES("Too many levels of remote in path") }, - { BSM_ERRNO_ENOLINK, -#ifdef ENOLINK - ENOLINK, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Link has been severed") }, - { BSM_ERRNO_EADV, -#ifdef EADV - EADV, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Advertise error") }, - { BSM_ERRNO_ESRMNT, -#ifdef ESRMNT - ESRMNT, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("srmount error") }, - { BSM_ERRNO_ECOMM, -#ifdef ECOMM - ECOMM, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Communication error on send") }, - { BSM_ERRNO_EPROTO, -#ifdef EPROTO - EPROTO, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Protocol error") }, - { BSM_ERRNO_ELOCKUNMAPPED, -#ifdef ELOCKUNMAPPED - ELOCKUNMAPPED, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Locked lock was unmapped") }, - { BSM_ERRNO_ENOTACTIVE, -#ifdef ENOTACTIVE - ENOTACTIVE, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Facility is not active") }, - { BSM_ERRNO_EMULTIHOP, -#ifdef EMULTIHOP - EMULTIHOP, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Multihop attempted") }, - { BSM_ERRNO_EBADMSG, -#ifdef EBADMSG - EBADMSG, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Bad message") }, - { BSM_ERRNO_ENAMETOOLONG, ENAMETOOLONG, ES("File name too long") }, - { BSM_ERRNO_EOVERFLOW, EOVERFLOW, - ES("Value too large to be stored in data type") }, - { BSM_ERRNO_ENOTUNIQ, -#ifdef ENOTUNIQ - ENOTUNIQ, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Given log name not unique") }, - { BSM_ERRNO_EBADFD, -#ifdef EBADFD - EBADFD, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Given f.d. invalid for this operation") }, - { BSM_ERRNO_EREMCHG, -#ifdef EREMCHG - EREMCHG, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Remote address changed") }, - { BSM_ERRNO_ELIBACC, -#ifdef ELIBACC - ELIBACC, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Can't access a needed shared lib") }, - { BSM_ERRNO_ELIBBAD, -#ifdef ELIBBAD - ELIBBAD, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Accessing a corrupted shared lib") }, - { BSM_ERRNO_ELIBSCN, -#ifdef ELIBSCN - ELIBSCN, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES(".lib section in a.out corrupted") }, - { BSM_ERRNO_ELIBMAX, -#ifdef ELIBMAX - ELIBMAX, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Attempting to link in too many libs") }, - { BSM_ERRNO_ELIBEXEC, -#ifdef ELIBEXEC - ELIBEXEC, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Attempting to exec a shared library") }, - { BSM_ERRNO_EILSEQ, EILSEQ, ES("Illegal byte sequence") }, - { BSM_ERRNO_ENOSYS, ENOSYS, ES("Function not implemented") }, - { BSM_ERRNO_ELOOP, ELOOP, ES("Too many levels of symbolic links") }, - { BSM_ERRNO_ERESTART, -#ifdef ERESTART - ERESTART, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Restart syscall") }, - { BSM_ERRNO_ESTRPIPE, -#ifdef ESTRPIPE - ESTRPIPE, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("If pipe/FIFO, don't sleep in stream head") }, - { BSM_ERRNO_ENOTEMPTY, ENOTEMPTY, ES("Directory not empty") }, - { BSM_ERRNO_EUSERS, EUSERS, ES("Too many users") }, - { BSM_ERRNO_ENOTSOCK, ENOTSOCK, - ES("Socket operation on non-socket") }, - { BSM_ERRNO_EDESTADDRREQ, EDESTADDRREQ, - ES("Destination address required") }, - { BSM_ERRNO_EMSGSIZE, EMSGSIZE, ES("Message too long") }, - { BSM_ERRNO_EPROTOTYPE, EPROTOTYPE, - ES("Protocol wrong type for socket") }, - { BSM_ERRNO_ENOPROTOOPT, ENOPROTOOPT, ES("Protocol not available") }, - { BSM_ERRNO_EPROTONOSUPPORT, EPROTONOSUPPORT, - ES("Protocol not supported") }, - { BSM_ERRNO_ESOCKTNOSUPPORT, ESOCKTNOSUPPORT, - ES("Socket type not supported") }, - { BSM_ERRNO_EOPNOTSUPP, EOPNOTSUPP, ES("Operation not supported") }, - { BSM_ERRNO_EPFNOSUPPORT, EPFNOSUPPORT, - ES("Protocol family not supported") }, - { BSM_ERRNO_EAFNOSUPPORT, EAFNOSUPPORT, - ES("Address family not supported by protocol family") }, - { BSM_ERRNO_EADDRINUSE, EADDRINUSE, ES("Address already in use") }, - { BSM_ERRNO_EADDRNOTAVAIL, EADDRNOTAVAIL, - ES("Can't assign requested address") }, - { BSM_ERRNO_ENETDOWN, ENETDOWN, ES("Network is down") }, - { BSM_ERRNO_ENETRESET, ENETRESET, - ES("Network dropped connection on reset") }, - { BSM_ERRNO_ECONNABORTED, ECONNABORTED, - ES("Software caused connection abort") }, - { BSM_ERRNO_ECONNRESET, ECONNRESET, ES("Connection reset by peer") }, - { BSM_ERRNO_ENOBUFS, ENOBUFS, ES("No buffer space available") }, - { BSM_ERRNO_EISCONN, EISCONN, ES("Socket is already connected") }, - { BSM_ERRNO_ENOTCONN, ENOTCONN, ES("Socket is not connected") }, - { BSM_ERRNO_ESHUTDOWN, ESHUTDOWN, - ES("Can't send after socket shutdown") }, - { BSM_ERRNO_ETOOMANYREFS, ETOOMANYREFS, - ES("Too many references: can't splice") }, - { BSM_ERRNO_ETIMEDOUT, ETIMEDOUT, ES("Operation timed out") }, - { BSM_ERRNO_ECONNREFUSED, ECONNREFUSED, ES("Connection refused") }, - { BSM_ERRNO_EHOSTDOWN, EHOSTDOWN, ES("Host is down") }, - { BSM_ERRNO_EHOSTUNREACH, EHOSTUNREACH, ES("No route to host") }, - { BSM_ERRNO_EALREADY, EALREADY, ES("Operation already in progress") }, - { BSM_ERRNO_EINPROGRESS, EINPROGRESS, - ES("Operation now in progress") }, - { BSM_ERRNO_ESTALE, ESTALE, ES("Stale NFS file handle") }, - { BSM_ERRNO_EPROCLIM, -#ifdef EPROCLIM - EPROCLIM, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Too many processes") }, - { BSM_ERRNO_EBADRPC, -#ifdef EBADRPC - EBADRPC, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("RPC struct is bad") }, - { BSM_ERRNO_ERPCMISMATCH, -#ifdef ERPCMISMATCH - ERPCMISMATCH, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("RPC version wrong") }, - { BSM_ERRNO_EPROGUNAVAIL, -#ifdef EPROGUNAVAIL - EPROGUNAVAIL, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("RPC prog. not avail") }, - { BSM_ERRNO_EPROGMISMATCH, -#ifdef EPROGMISMATCH - EPROGMISMATCH, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("RPC version wrong") }, - { BSM_ERRNO_EPROCUNAVAIL, -#ifdef EPROCUNAVAIL - EPROCUNAVAIL, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Bad procedure for program") }, - { BSM_ERRNO_EFTYPE, -#ifdef EFTYPE - EFTYPE, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Inappropriate file type or format") }, - { BSM_ERRNO_EAUTH, -#ifdef EAUTH - EAUTH, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Authenticateion error") }, - { BSM_ERRNO_ENEEDAUTH, -#ifdef ENEEDAUTH - ENEEDAUTH, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Need authenticator") }, - { BSM_ERRNO_ENOATTR, -#ifdef ENOATTR - ENOATTR, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Attribute not found") }, - { BSM_ERRNO_EDOOFUS, -#ifdef EDOOFUS - EDOOFUS, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Programming error") }, - { BSM_ERRNO_EJUSTRETURN, -#ifdef EJUSTRETURN - EJUSTRETURN, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Just return") }, - { BSM_ERRNO_ENOIOCTL, -#ifdef ENOIOCTL - ENOIOCTL, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("ioctl not handled by this layer") }, - { BSM_ERRNO_EDIRIOCTL, -#ifdef EDIRIOCTL - EDIRIOCTL, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("do direct ioctl in GEOM") }, - { BSM_ERRNO_EPWROFF, -#ifdef EPWROFF - EPWROFF, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Device power is off") }, - { BSM_ERRNO_EDEVERR, -#ifdef EDEVERR - EDEVERR, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Device error") }, - { BSM_ERRNO_EBADEXEC, -#ifdef EBADEXEC - EBADEXEC, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Bad executable") }, - { BSM_ERRNO_EBADARCH, -#ifdef EBADARCH - EBADARCH, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Bad CPU type in executable") }, - { BSM_ERRNO_ESHLIBVERS, -#ifdef ESHLIBVERS - ESHLIBVERS, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Shared library version mismatch") }, - { BSM_ERRNO_EBADMACHO, -#ifdef EBADMACHO - EBADMACHO, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Malformed Macho file") }, - { BSM_ERRNO_EPOLICY, -#ifdef EPOLICY - EPOLICY, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Operation failed by policy") }, - { BSM_ERRNO_EDOTDOT, -#ifdef EDOTDOT - EDOTDOT, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("RFS specific error") }, - { BSM_ERRNO_EUCLEAN, -#ifdef EUCLEAN - EUCLEAN, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Structure needs cleaning") }, - { BSM_ERRNO_ENOTNAM, -#ifdef ENOTNAM - ENOTNAM, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Not a XENIX named type file") }, - { BSM_ERRNO_ENAVAIL, -#ifdef ENAVAIL - ENAVAIL, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("No XENIX semaphores available") }, - { BSM_ERRNO_EISNAM, -#ifdef EISNAM - EISNAM, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Is a named type file") }, - { BSM_ERRNO_EREMOTEIO, -#ifdef EREMOTEIO - EREMOTEIO, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Remote I/O error") }, - { BSM_ERRNO_ENOMEDIUM, -#ifdef ENOMEDIUM - ENOMEDIUM, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("No medium found") }, - { BSM_ERRNO_EMEDIUMTYPE, -#ifdef EMEDIUMTYPE - EMEDIUMTYPE, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Wrong medium type") }, - { BSM_ERRNO_ENOKEY, -#ifdef ENOKEY - ENOKEY, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Required key not available") }, - { BSM_ERRNO_EKEYEXPIRED, -#ifdef EKEYEXPIRED - EKEYEXPIRED, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Key has expired") }, - { BSM_ERRNO_EKEYREVOKED, -#ifdef EKEYREVOKED - EKEYREVOKED, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Key has been revoked") }, - { BSM_ERRNO_EKEYREJECTED, -#ifdef EKEYREJECTED - EKEYREJECTED, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Key was rejected by service") }, - { BSM_ERRNO_ENOTCAPABLE, -#ifdef ENOTCAPABLE - ENOTCAPABLE, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Capabilities insufficient") }, - { BSM_ERRNO_ECAPMODE, -#ifdef ECAPMODE - ECAPMODE, -#else - ERRNO_NO_LOCAL_MAPPING, -#endif - ES("Not permitted in capability mode") }, -}; -static const int bsm_errnos_count = sizeof(bsm_errnos) / sizeof(bsm_errnos[0]); - -static const struct bsm_errno * -bsm_lookup_errno_local(int local_errno) -{ - int i; - - for (i = 0; i < bsm_errnos_count; i++) { - if (bsm_errnos[i].be_local_errno == local_errno) - return (&bsm_errnos[i]); - } - return (NULL); -} - -/* - * Conversion to the BSM errno space isn't allowed to fail; we simply map to - * BSM_ERRNO_UNKNOWN and let the remote endpoint deal with it. - */ -u_char -au_errno_to_bsm(int local_errno) -{ - const struct bsm_errno *bsme; - - bsme = bsm_lookup_errno_local(local_errno); - if (bsme == NULL) - return (BSM_ERRNO_UNKNOWN); - return (bsme->be_bsm_errno); -} - -static const struct bsm_errno * -bsm_lookup_errno_bsm(u_char bsm_errno) -{ - int i; - - for (i = 0; i < bsm_errnos_count; i++) { - if (bsm_errnos[i].be_bsm_errno == bsm_errno) - return (&bsm_errnos[i]); - } - return (NULL); -} - -/* - * Converstion from a BSM error to a local error number may fail if either - * OpenBSM doesn't recognize the error on the wire, or because there is no - * appropriate local mapping. - */ -int -au_bsm_to_errno(u_char bsm_errno, int *errorp) -{ - const struct bsm_errno *bsme; - - bsme = bsm_lookup_errno_bsm(bsm_errno); - if (bsme == NULL || bsme->be_local_errno == ERRNO_NO_LOCAL_MAPPING) - return (-1); - *errorp = bsme->be_local_errno; - return (0); -} - -#if !defined(KERNEL) && !defined(_KERNEL) -const char * -au_strerror(u_char bsm_errno) -{ - const struct bsm_errno *bsme; - - bsme = bsm_lookup_errno_bsm(bsm_errno); - if (bsme == NULL) - return ("Unrecognized BSM error"); - if (bsme->be_local_errno != ERRNO_NO_LOCAL_MAPPING) - return (strerror(bsme->be_local_errno)); - return (bsme->be_strerror); -} -#endif Index: security/audit/bsm_socket_type.c =================================================================== --- security/audit/bsm_socket_type.c (.../head/sys) (revision 244874) +++ security/audit/bsm_socket_type.c (.../projects/physbio/sys) (revision 244874) @@ -1,107 +0,0 @@ -/*- - * Copyright (c) 2008 Apple Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Inc. ("Apple") nor the names of - * its contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_socket_type.c#1 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include - -#include - -#include -#include - -struct bsm_socket_type { - u_short bst_bsm_socket_type; - int bst_local_socket_type; -}; - -#define ST_NO_LOCAL_MAPPING -600 - -static const struct bsm_socket_type bsm_socket_types[] = { - { BSM_SOCK_DGRAM, SOCK_DGRAM }, - { BSM_SOCK_STREAM, SOCK_STREAM }, - { BSM_SOCK_RAW, SOCK_RAW }, - { BSM_SOCK_RDM, SOCK_RDM }, - { BSM_SOCK_SEQPACKET, SOCK_SEQPACKET }, -}; -static const int bsm_socket_types_count = sizeof(bsm_socket_types) / - sizeof(bsm_socket_types[0]); - -static const struct bsm_socket_type * -bsm_lookup_local_socket_type(int local_socket_type) -{ - int i; - - for (i = 0; i < bsm_socket_types_count; i++) { - if (bsm_socket_types[i].bst_local_socket_type == - local_socket_type) - return (&bsm_socket_types[i]); - } - return (NULL); -} - -u_short -au_socket_type_to_bsm(int local_socket_type) -{ - const struct bsm_socket_type *bstp; - - bstp = bsm_lookup_local_socket_type(local_socket_type); - if (bstp == NULL) - return (BSM_SOCK_UNKNOWN); - return (bstp->bst_bsm_socket_type); -} - -static const struct bsm_socket_type * -bsm_lookup_bsm_socket_type(u_short bsm_socket_type) -{ - int i; - - for (i = 0; i < bsm_socket_types_count; i++) { - if (bsm_socket_types[i].bst_bsm_socket_type == - bsm_socket_type) - return (&bsm_socket_types[i]); - } - return (NULL); -} - -int -au_bsm_to_socket_type(u_short bsm_socket_type, int *local_socket_typep) -{ - const struct bsm_socket_type *bstp; - - bstp = bsm_lookup_bsm_socket_type(bsm_socket_type); - if (bstp == NULL || bstp->bst_local_socket_type) - return (-1); - *local_socket_typep = bstp->bst_local_socket_type; - return (0); -} Property changes on: amd64/include/xen ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/amd64/include/xen:r243873-244873 Index: sys/busdma_bufalloc.h =================================================================== --- sys/busdma_bufalloc.h (.../head/sys) (revision 244874) +++ sys/busdma_bufalloc.h (.../projects/physbio/sys) (revision 244874) @@ -1,118 +0,0 @@ -/*- - * Copyright (c) 2012 Ian Lepore - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * $FreeBSD$ - */ - -/* - * A buffer pool manager, for use by a platform's busdma implementation. - */ - -#ifndef _MACHINE_BUSDMA_BUFALLOC_H_ -#define _MACHINE_BUSDMA_BUFALLOC_H_ - -#include -#include - -/* - * Information about a buffer zone, returned by busdma_bufalloc_findzone(). - */ -struct busdma_bufzone { - bus_size_t size; - uma_zone_t umazone; - char name[24]; -}; - -/* - * Opaque handle type returned by busdma_bufalloc_create(). - */ -struct busdma_bufalloc; -typedef struct busdma_bufalloc *busdma_bufalloc_t; - -/* - * Create an allocator that manages a pool of DMA buffers. - * - * The allocator manages a collection of uma(9) zones of buffers in power-of-two - * sized increments ranging from minimum_alignment to the platform's PAGE_SIZE. - * The buffers within each zone are aligned on boundaries corresponding to the - * buffer size, and thus by implication each buffer is contiguous within a page - * and does not cross a power of two boundary larger than the buffer size. - * These rules are intended to make it easy for a busdma implementation to - * check whether a tag's constraints allow use of a buffer from the allocator. - * - * minimum_alignment is also the minimum buffer allocation size. For platforms - * with software-assisted cache coherency, this is typically the data cache line - * size (and MUST not be smaller than the cache line size). - * - * name appears in zone stats as 'dma name nnnnn' where 'dma' is fixed and - * 'nnnnn' is the size of buffers in that zone. - * - * If if the alloc/free function pointers are NULL, the regular uma internal - * allocators are used (I.E., you get "plain old kernel memory"). On a platform - * with an exclusion zone that applies to all DMA operations, a custom allocator - * could be used to ensure no buffer memory is ever allocated from that zone, - * allowing the bus_dmamem_alloc() implementation to make the assumption that - * buffers provided by the allocation could never lead to the need for a bounce. - */ -busdma_bufalloc_t busdma_bufalloc_create(const char *name, - bus_size_t minimum_alignment, - uma_alloc uma_alloc_func, uma_free uma_free_func, - u_int32_t uma_zcreate_flags); - -/* - * Destroy an allocator created by busdma_bufalloc_create(). - * Safe to call with a NULL pointer. - */ -void busdma_bufalloc_destroy(busdma_bufalloc_t ba); - -/* - * Return a pointer to the busdma_bufzone that should be used to allocate or - * free a buffer of the given size. Returns NULL if the size is larger than the - * largest zone handled by the allocator. - */ -struct busdma_bufzone * busdma_bufalloc_findzone(busdma_bufalloc_t ba, - bus_size_t size); - -/* - * These built-in allocation routines are available for managing a pools of - * uncacheable memory on platforms that support VM_MEMATTR_UNCACHEABLE. - * - * Allocation is done using kmem_alloc_attr() with these parameters: - * lowaddr = 0 - * highaddr = BUS_SPACE_MAXADDR - * memattr = VM_MEMATTR_UNCACHEABLE. - * - * If your platform has no exclusion region (lowaddr/highaddr), and its pmap - * routines support pmap_page_set_memattr() and the VM_MEMATTR_UNCACHEABLE flag - * you can probably use these when you need uncacheable buffers. - */ -void * busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, int size, - u_int8_t *pflag, int wait); -void busdma_bufalloc_free_uncacheable(void *item, int size, u_int8_t pflag); - -#endif /* _MACHINE_BUSDMA_BUFALLOC_H_ */ - Index: sys/uio.h =================================================================== --- sys/uio.h (.../head/sys) (revision 244874) +++ sys/uio.h (.../projects/physbio/sys) (revision 244874) @@ -96,6 +96,8 @@ int copyinstrfrom(const void * __restrict src, voi int copyinuio(const struct iovec *iovp, u_int iovcnt, struct uio **uiop); int copyout_map(struct thread *td, vm_offset_t *addr, size_t sz); int copyout_unmap(struct thread *td, vm_offset_t addr, size_t sz); +int physcopyin(void *src, vm_paddr_t dst, size_t len); +int physcopyout(vm_paddr_t src, void *dst, size_t len); int uiomove(void *cp, int n, struct uio *uio); int uiomove_frombuf(void *buf, int buflen, struct uio *uio); int uiomove_fromphys(struct vm_page *ma[], vm_offset_t offset, int n, Index: sys/bus_dma.h =================================================================== --- sys/bus_dma.h (.../head/sys) (revision 244874) +++ sys/bus_dma.h (.../projects/physbio/sys) (revision 244874) @@ -110,8 +110,11 @@ #define BUS_DMA_KEEP_PG_OFFSET 0x400 /* Forwards needed by prototypes below. */ +struct pmap; struct mbuf; struct uio; +struct bio; +union ccb; /* * Operations performed by bus_dmamap_sync(). @@ -133,6 +136,128 @@ typedef struct bus_dma_segment { } bus_dma_segment_t; /* + * bus_dma_memory_t + * + * Encapsulates various memory descriptors that devices may DMA + * to or from. + */ + +typedef struct bus_dma_memory { + union { + void *dm_vaddr; + vm_paddr_t dm_paddr; + bus_dma_segment_t *dm_list; + struct bio *dm_bio; + struct uio *dm_uio; + struct mbuf *dm_mbuf; + union ccb *dm_ccb; + } u; + bus_size_t dm_opaque; /* type specific data. */ + uint32_t dm_type; /* Type of memory. */ +} bus_dma_memory_t; + +#define BUS_DMAMEM_VADDR 1 /* Contiguous virtual address. */ +#define BUS_DMAMEM_PADDR 2 /* Contiguous physical address. */ +#define BUS_DMAMEM_VLIST 3 /* sglist of kva. */ +#define BUS_DMAMEM_PLIST 4 /* sglist of physical addresses. */ +#define BUS_DMAMEM_BIO 5 /* Pointer to a bio (block io). */ +#define BUS_DMAMEM_UIO 6 /* Pointer to a uio (any io). */ +#define BUS_DMAMEM_MBUF 7 /* Pointer to a mbuf (network io). */ +#define BUS_DMAMEM_CCB 8 /* Cam control block. (scsi/ata io). */ + +static inline bus_dma_memory_t +dma_mem_vaddr(void *vaddr, bus_size_t len) +{ + bus_dma_memory_t mem; + + mem.u.dm_vaddr = vaddr; + mem.dm_opaque = len; + mem.dm_type = BUS_DMAMEM_VADDR; + + return (mem); +} + +static inline bus_dma_memory_t +dma_mem_paddr(vm_paddr_t paddr, bus_size_t len) +{ + bus_dma_memory_t mem; + + mem.u.dm_paddr = paddr; + mem.dm_opaque = len; + mem.dm_type = BUS_DMAMEM_PADDR; + + return (mem); +} + +static inline bus_dma_memory_t +dma_mem_vlist(bus_dma_segment_t *vlist, int sglist_cnt) +{ + bus_dma_memory_t mem; + + mem.u.dm_list = vlist; + mem.dm_opaque = sglist_cnt; + mem.dm_type = BUS_DMAMEM_VLIST; + + return (mem); +} + +static inline bus_dma_memory_t +dma_mem_plist(bus_dma_segment_t *plist, int sglist_cnt) +{ + bus_dma_memory_t mem; + + mem.u.dm_list = plist; + mem.dm_opaque = sglist_cnt; + mem.dm_type = BUS_DMAMEM_PLIST; + + return (mem); +} + +static inline bus_dma_memory_t +dma_mem_bio(struct bio *bio) +{ + bus_dma_memory_t mem; + + mem.u.dm_bio = bio; + mem.dm_type = BUS_DMAMEM_BIO; + + return (mem); +} + +static inline bus_dma_memory_t +dma_mem_uio(struct uio *uio) +{ + bus_dma_memory_t mem; + + mem.u.dm_uio = uio; + mem.dm_type = BUS_DMAMEM_UIO; + + return (mem); +} + +static inline bus_dma_memory_t +dma_mem_mbuf(struct mbuf *mbuf) +{ + bus_dma_memory_t mem; + + mem.u.dm_mbuf = mbuf; + mem.dm_type = BUS_DMAMEM_MBUF; + + return (mem); +} + +static inline bus_dma_memory_t +dma_mem_ccb(union ccb *ccb) +{ + bus_dma_memory_t mem; + + mem.u.dm_ccb = ccb; + mem.dm_type = BUS_DMAMEM_CCB; + + return (mem); +} + +/* * A function that returns 1 if the address cannot be accessed by * a device and 0 if it can be. */ @@ -191,6 +316,49 @@ typedef void bus_dmamap_callback_t(void *, bus_dma typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int); /* + * Map the buffer buf into bus space using the dmamap map. + */ +int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, bus_dmamap_callback_t *callback, + void *callback_arg, int flags); + +/* + * Like bus_dmamap_load but for mbufs. Note the use of the + * bus_dmamap_callback2_t interface. + */ +int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, + struct mbuf *mbuf, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags); + +int bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, + struct mbuf *mbuf, bus_dma_segment_t *segs, + int *nsegs, int flags); + +/* + * Like bus_dmamap_load but for uios. Note the use of the + * bus_dmamap_callback2_t interface. + */ +int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, + struct uio *ui, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags); + +/* + * Like bus_dmamap_load but for cam control blocks. + */ +int bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, + bus_dmamap_callback_t *callback, void *callback_arg, + int flags); + +/* + * Loads any memory descriptor. + */ +int bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_memory_t *mem, bus_dmamap_callback_t *callback, + void *callback_arg, int flags); + +/* * XXX sparc64 uses the same interface, but a much different implementation. * for the sparc64 arch contains the equivalent * declarations. @@ -224,35 +392,6 @@ int bus_dmamem_alloc(bus_dma_tag_t dmat, void** va void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map); /* - * Map the buffer buf into bus space using the dmamap map. - */ -int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, bus_dmamap_callback_t *callback, - void *callback_arg, int flags); - -/* - * Like bus_dmamap_load but for mbufs. Note the use of the - * bus_dmamap_callback2_t interface. - */ -int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *mbuf, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags); - -int bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *mbuf, bus_dma_segment_t *segs, - int *nsegs, int flags); - -/* - * Like bus_dmamap_load but for uios. Note the use of the - * bus_dmamap_callback2_t interface. - */ -int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, - struct uio *ui, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags); - -/* * Perform a synchronization operation on the given map. */ void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t); @@ -272,6 +411,36 @@ void _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dm _bus_dmamap_unload(dmat, dmamap); \ } while (0) +/* + * The following functions define the interface between the MD and MI + * busdma layers. These are not intended for consumption by driver + * software. + */ +void __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_memory_t mem, + bus_dmamap_callback_t *callback, + void *callback_arg); + +#define _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg) \ + do { \ + if ((map) != NULL) \ + __bus_dmamap_waitok(dmat, map, mem, callback, \ + callback_arg); \ + } while (0); + +int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, + void *buf, bus_size_t buflen, struct pmap *pmap, + int flags, bus_dma_segment_t *segs, int *segp); + +int _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t paddr, bus_size_t buflen, + int flags, bus_dma_segment_t *segs, int *segp); + +bus_dma_segment_t *_bus_dmamap_complete(bus_dma_tag_t dmat, + bus_dmamap_t map, + bus_dma_segment_t *segs, + int nsegs, int error); + #endif /* __sparc64__ */ #endif /* _BUS_DMA_H_ */ Index: arm/conf/VERSATILEPB =================================================================== --- arm/conf/VERSATILEPB (.../head/sys) (revision 244874) +++ arm/conf/VERSATILEPB (.../projects/physbio/sys) (revision 244874) @@ -1,101 +0,0 @@ -# VERSATILEPB - Configuration for QEMU version of Versatile Platform Board -# -# For more information on this file, please read the handbook section on -# Kernel Configuration Files: -# -# http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html -# -# The handbook is also available locally in /usr/share/doc/handbook -# if you've installed the doc distribution, otherwise always see the -# FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the -# latest information. -# -# An exhaustive list of options and more detailed explanations of the -# device lines is also present in the ../../conf/NOTES and NOTES files. -# If you are in doubt as to the purpose or necessity of a line, check first -# in NOTES. -# -# $FreeBSD$ - -ident VERSATILEPB -machine arm armv6 -cpu CPU_ARM1176 - -files "../versatile/files.versatile" -makeoptions MODULES_OVERRIDE="" - -options KERNVIRTADDR=0xc0100000 -makeoptions KERNVIRTADDR=0xc0100000 -options KERNPHYSADDR=0x00100000 -makeoptions KERNPHYSADDR=0x00100000 -options PHYSADDR=0x00000000 -options STARTUP_PAGETABLE_ADDR=0x01000000 -options FREEBSD_BOOT_LOADER -options LINUX_BOOT_ABI - -makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols -options HZ=100 - -options SCHED_4BSD #4BSD scheduler -options INET #InterNETworking -options FFS #Berkeley Fast Filesystem -options SOFTUPDATES #Enable FFS soft updates support -options UFS_ACL #Support for access control lists -options UFS_DIRHASH #Improve performance on big directories -device snp - -options PSEUDOFS #Pseudo-filesystem framework -options COMPAT_43 #Compatible with BSD 4.3 [KEEP THIS!] -options SCSI_DELAY=5000 #Delay (in ms) before probing SCSI -options KTRACE #ktrace(1) support -options SYSVSHM #SYSV-style shared memory -options SYSVMSG #SYSV-style message queues -options SYSVSEM #SYSV-style semaphores -options _KPOSIX_PRIORITY_SCHEDULING #Posix P1003_1B real-time extensions -options KBD_INSTALL_CDEV # install a CDEV entry in /dev -options ROOTDEVNAME=\"ufs:da0s2a\" - -options PREEMPTION - -device bpf -device loop -device mii -device mii_bitbang -device smc -device smcphy -device ether -device uart -device pl011 -device pl190 - -device pty - -device pci - -# SCSI Controllers -device sym # NCR/Symbios/LSI Logic 53C8XX/53C1010/53C1510D - -# ATA/SCSI peripherals -device scbus # SCSI bus (required for ATA/SCSI) -device da # Direct Access (disks) -device pass # Passthrough device (direct ATA/SCSI access) - -# NOTE: serial console is disabled if syscons enabled -# Comment following lines for headless setup -device sc -device kbdmux -options SC_DFLT_FONT # compile font in -makeoptions SC_DFLT_FONT=cp437 - -options KDB -options DDB #Enable the kernel debugger -options INVARIANTS #Enable calls of extra sanity checking -options INVARIANT_SUPPORT #Extra sanity checks of internal structures, required by INVARIANTS - -device md -device random # Entropy device - -# Flattened Device Tree -options FDT -options FDT_DTB_STATIC -makeoptions FDT_DTS_FILE=versatilepb.dts Index: arm/arm/pl190.c =================================================================== --- arm/arm/pl190.c (.../head/sys) (revision 244874) +++ arm/arm/pl190.c (.../projects/physbio/sys) (revision 244874) @@ -1,187 +0,0 @@ -/*- - * Copyright (c) 2012 Oleksandr Tymoshenko - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#ifdef DEBUG -#define dprintf(fmt, args...) printf(fmt, ##args) -#else -#define dprintf(fmt, args...) -#endif - -#define VICIRQSTATUS 0x000 -#define VICFIQSTATUS 0x004 -#define VICRAWINTR 0x008 -#define VICINTSELECT 0x00C -#define VICINTENABLE 0x010 -#define VICINTENCLEAR 0x014 -#define VICSOFTINT 0x018 -#define VICSOFTINTCLEAR 0x01C -#define VICPROTECTION 0x020 -#define VICPERIPHID 0xFE0 -#define VICPRIMECELLID 0xFF0 - -#define VIC_NIRQS 32 - -struct pl190_intc_softc { - device_t sc_dev; - struct resource * intc_res; -}; - -static struct pl190_intc_softc *pl190_intc_sc = NULL; - -#define intc_vic_read_4(reg) \ - bus_read_4(pl190_intc_sc->intc_res, (reg)) -#define intc_vic_write_4(reg, val) \ - bus_write_4(pl190_intc_sc->intc_res, (reg), (val)) - -static int -pl190_intc_probe(device_t dev) -{ - if (!ofw_bus_is_compatible(dev, "arm,versatile-vic")) - return (ENXIO); - device_set_desc(dev, "ARM PL190 VIC"); - return (BUS_PROBE_DEFAULT); -} - -static int -pl190_intc_attach(device_t dev) -{ - struct pl190_intc_softc *sc = device_get_softc(dev); - uint32_t id; - int i, rid; - - sc->sc_dev = dev; - - if (pl190_intc_sc) - return (ENXIO); - - /* Request memory resources */ - rid = 0; - sc->intc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, - RF_ACTIVE); - if (sc->intc_res == NULL) { - device_printf(dev, "Error: could not allocate memory resources\n"); - return (ENXIO); - } - - pl190_intc_sc = sc; - /* - * All interrupts should use IRQ line - */ - intc_vic_write_4(VICINTSELECT, 0x00000000); - /* Disable all interrupts */ - intc_vic_write_4(VICINTENCLEAR, 0xffffffff); - /* Enable INT31, SIC IRQ */ - intc_vic_write_4(VICINTENABLE, (1 << 31)); - - id = 0; - for (i = 3; i >= 0; i--) { - id = (id << 8) | - (intc_vic_read_4(VICPERIPHID + i*4) & 0xff); - } - - device_printf(dev, "Peripheral ID: %08x\n", id); - - id = 0; - for (i = 3; i >= 0; i--) { - id = (id << 8) | - (intc_vic_read_4(VICPRIMECELLID + i*4) & 0xff); - } - - device_printf(dev, "PrimeCell ID: %08x\n", id); - - return (0); -} - -static device_method_t pl190_intc_methods[] = { - DEVMETHOD(device_probe, pl190_intc_probe), - DEVMETHOD(device_attach, pl190_intc_attach), - { 0, 0 } -}; - -static driver_t pl190_intc_driver = { - "intc", - pl190_intc_methods, - sizeof(struct pl190_intc_softc), -}; - -static devclass_t pl190_intc_devclass; - -DRIVER_MODULE(intc, simplebus, pl190_intc_driver, pl190_intc_devclass, 0, 0); - -int -arm_get_next_irq(int last_irq) -{ - uint32_t pending; - int32_t irq = last_irq + 1; - - /* Sanity check */ - if (irq < 0) - irq = 0; - - pending = intc_vic_read_4(VICIRQSTATUS); - while (irq < VIC_NIRQS) { - if (pending & (1 << irq)) - return (irq); - irq++; - } - - return (-1); -} - -void -arm_mask_irq(uintptr_t nb) -{ - - dprintf("%s: %d\n", __func__, nb); - intc_vic_write_4(VICINTENCLEAR, (1 << nb)); -} - -void -arm_unmask_irq(uintptr_t nb) -{ - - dprintf("%s: %d\n", __func__, nb); - intc_vic_write_4(VICINTENABLE, (1 << nb)); -} Index: arm/arm/cpufunc_asm_arm11x6.S =================================================================== --- arm/arm/cpufunc_asm_arm11x6.S (.../head/sys) (revision 244874) +++ arm/arm/cpufunc_asm_arm11x6.S (.../projects/physbio/sys) (revision 244874) @@ -1,216 +0,0 @@ -/* $NetBSD: cpufunc_asm_arm11x6.S,v 1.1 2012/07/21 12:19:15 skrll Exp $ */ - -/* - * Copyright (c) 2007 Microsoft - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Microsoft - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/*- - * Copyright (c) 2012 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Eben Upton - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - - -#include -__FBSDID("$FreeBSD$"); - -#if 0 -#define Invalidate_I_cache(Rtmp1, Rtmp2) \ - mcr p15, 0, Rtmp1, c7, c5, 0 /* Invalidate Entire I cache */ -#else -/* - * Workaround for - * - * Erratum 411920 in ARM1136 (fixed in r1p4) - * Erratum 415045 in ARM1176 (fixed in r0p5?) - * - * - value of arg 'reg' Should Be Zero - */ -#define Invalidate_I_cache(Rtmp1, Rtmp2) \ - mov Rtmp1, #0; /* SBZ */ \ - mrs Rtmp2, cpsr; \ - cpsid ifa; \ - mcr p15, 0, Rtmp1, c7, c5, 0; /* Nuke Whole Icache */ \ - mcr p15, 0, Rtmp1, c7, c5, 0; /* Nuke Whole Icache */ \ - mcr p15, 0, Rtmp1, c7, c5, 0; /* Nuke Whole Icache */ \ - mcr p15, 0, Rtmp1, c7, c5, 0; /* Nuke Whole Icache */ \ - msr cpsr_cx, Rtmp2; \ - nop; \ - nop; \ - nop; \ - nop; \ - nop; \ - nop; \ - nop; \ - nop; \ - nop; \ - nop; \ - nop; -#endif - -#if 1 -#define Flush_D_cache(reg) \ - mov reg, #0; /* SBZ */ \ - mcr p15, 0, reg, c7, c14, 0;/* Clean and Invalidate Entire Data Cache */ \ - mcr p15, 0, reg, c7, c10, 4;/* Data Synchronization Barrier */ -#else -#define Flush_D_cache(reg) \ -1: mov reg, #0; /* SBZ */ \ - mcr p15, 0, reg, c7, c14, 0;/* Clean and Invalidate Entire Data Cache */ \ - mrc p15, 0, reg, C7, C10, 6;/* Read Cache Dirty Status Register */ \ - ands reg, reg, #01; /* Check if it is clean */ \ - bne 1b; /* loop if not */ \ - mcr p15, 0, reg, c7, c10, 4;/* Data Synchronization Barrier */ -#endif - -ENTRY(arm11x6_setttb) -#ifdef PMAP_CACHE_VIVT - Flush_D_cache(r1) - Invalidate_I_cache(r1, r2) -#else - mov r1, #0 -#endif - mcr p15, 0, r0, c2, c0, 0 /* load new TTB */ - mcr p15, 0, r1, c8, c7, 0 /* invalidate I+D TLBs */ - mcr p15, 0, r1, c7, c10, 4 /* drain write buffer */ - RET - -ENTRY_NP(arm11x6_idcache_wbinv_all) - Flush_D_cache(r0) - Invalidate_I_cache(r0, r1) - RET - -ENTRY_NP(arm11x6_dcache_wbinv_all) - Flush_D_cache(r0) - RET - -ENTRY_NP(arm11x6_icache_sync_all) - Flush_D_cache(r0) - Invalidate_I_cache(r0, r1) - RET - -ENTRY_NP(arm11x6_flush_prefetchbuf) - mcr p15, 0, r0, c7, c5, 4 /* Flush Prefetch Buffer */ - RET - -ENTRY_NP(arm11x6_icache_sync_range) - add r1, r1, r0 - sub r1, r1, #1 - /* Erratum ARM1136 371025, workaround #2 */ - /* Erratum ARM1176 371367 */ - mrs r2, cpsr /* save the CPSR */ - cpsid ifa /* disable interrupts (irq,fiq,abort) */ - mov r3, #0 - mcr p15, 0, r3, c13, c0, 0 /* write FCSE (uTLB invalidate) */ - mcr p15, 0, r3, c7, c5, 4 /* flush prefetch buffer */ - add r3, pc, #0x24 - mcr p15, 0, r3, c7, c13, 1 /* prefetch I-cache line */ - mcrr p15, 0, r1, r0, c5 /* invalidate I-cache range */ - msr cpsr_cx, r2 /* local_irq_restore */ - nop - nop - nop - nop - nop - nop - nop - - mcrr p15, 0, r1, r0, c12 /* clean and invalidate D cache range */ /* XXXNH */ - mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ - RET - -ENTRY_NP(arm11x6_idcache_wbinv_range) - add r1, r1, r0 - sub r1, r1, #1 - /* Erratum ARM1136 371025, workaround #2 */ - /* Erratum ARM1176 371367 */ - mrs r2, cpsr /* save the CPSR */ - cpsid ifa /* disable interrupts (irq,fiq,abort) */ - mov r3, #0 - mcr p15, 0, r3, c13, c0, 0 /* write FCSE (uTLB invalidate) */ - mcr p15, 0, r3, c7, c5, 4 /* flush prefetch buffer */ - add r3, pc, #0x24 - mcr p15, 0, r3, c7, c13, 1 /* prefetch I-cache line */ - mcrr p15, 0, r1, r0, c5 /* invalidate I-cache range */ - msr cpsr_cx, r2 /* local_irq_restore */ - nop - nop - nop - nop - nop - nop - nop - - mcrr p15, 0, r1, r0, c14 /* clean and invalidate D cache range */ - mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ - RET - -/* - * Preload the cache before issuing the WFI by conditionally disabling the - * mcr intstructions the first time around the loop. Ensure the function is - * cacheline aligned. - */ - .arch armv6 - .p2align 5 - -ENTRY_NP(arm11x6_sleep) - mov r0, #0 - mov r1, #2 -1: - subs r1, #1 - nop - mcreq p15, 0, r0, c7, c10, 4 /* data sync barrier */ - mcreq p15, 0, r0, c7, c0, 4 /* wait for interrupt */ - nop - nop - nop - bne 1b - RET Index: arm/arm/busdma_machdep-v6.c =================================================================== --- arm/arm/busdma_machdep-v6.c (.../head/sys) (revision 244874) +++ arm/arm/busdma_machdep-v6.c (.../projects/physbio/sys) (revision 244874) @@ -48,9 +48,8 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include -#include #include +#include #include #include @@ -111,6 +110,7 @@ struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ + bus_addr_t dataaddr; /* client physical address */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; @@ -119,7 +119,6 @@ struct sync_list { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ bus_size_t datacount; /* client data count */ - STAILQ_ENTRY(sync_list) slinks; }; int busdma_swi_pending; @@ -156,15 +155,15 @@ struct bus_dmamap { int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; - void *buf; /* unmapped buffer pointer */ - bus_size_t buflen; /* unmapped buffer length */ + bus_dma_memory_t mem; pmap_t pmap; bus_dmamap_callback_t *callback; void *callback_arg; int flags; #define DMAMAP_COHERENT (1 << 0) STAILQ_ENTRY(bus_dmamap) links; - STAILQ_HEAD(,sync_list) slist; + int sync_count; + struct sync_list slist[]; }; static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; @@ -176,11 +175,16 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_size_t size); + vm_offset_t vaddr, bus_addr_t addr, + bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); -static int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, +static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, int flags); +static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, int flags); +static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, + int flags); static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ @@ -493,17 +497,18 @@ out: int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { + int mapsize; int error; error = 0; - *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, - M_NOWAIT | M_ZERO); + mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments); + *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO); if (*mapp == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (ENOMEM); } - STAILQ_INIT(&((*mapp)->slist)); + (*mapp)->sync_count = 0; if (dmat->segments == NULL) { dmat->segments = (bus_dma_segment_t *)malloc( @@ -578,8 +583,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, b int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { - if (STAILQ_FIRST(&map->bpages) != NULL || - STAILQ_FIRST(&map->slist) != NULL) { + if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY); return (EBUSY); @@ -606,6 +610,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, struct busdma_bufzone *bufzone; vm_memattr_t memattr; int mflags; + int mapsize; if (flags & BUS_DMA_NOWAIT) mflags = M_NOWAIT; @@ -614,15 +619,15 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, /* ARM non-snooping caches need a map for the VA cache sync structure */ - *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, - M_NOWAIT | M_ZERO); + mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments); + *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO); if (*mapp == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, ENOMEM); return (ENOMEM); } - STAILQ_INIT(&((*mapp)->slist)); + (*mapp)->sync_count = 0; if (dmat->segments == NULL) { dmat->segments = (bus_dma_segment_t *)malloc( @@ -733,7 +738,37 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, b CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); } -static int +static void +_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags) +{ + bus_addr_t curaddr; + bus_size_t sgsize; + + if (map->pagesneeded == 0) { + CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" + " map= %p, pagesneeded= %d", + dmat->lowaddr, dmat->boundary, dmat->alignment, + map, map->pagesneeded); + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + curaddr = buf; + while (buflen != 0) { + sgsize = MIN(buflen, dmat->maxsegsz); + if (run_filter(dmat, curaddr) != 0) { + sgsize = MIN(sgsize, PAGE_SIZE); + map->pagesneeded++; + } + curaddr += sgsize; + buflen -= sgsize; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); + } +} + +static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, int flags) { @@ -754,12 +789,11 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dm vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { - if (__predict_true(map->pmap == pmap_kernel())) + if (__predict_true(map->pmap == kernel_pmap)) paddr = pmap_kextract(vaddr); else paddr = pmap_extract(map->pmap, vaddr); - if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && - run_filter(dmat, paddr) != 0) { + if (run_filter(dmat, paddr) != 0) { map->pagesneeded++; } vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); @@ -767,72 +801,190 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dm } CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); } +} +static int +_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) +{ + /* Reserve Necessary Bounce Pages */ - if (map->pagesneeded != 0) { - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - map->pagesneeded = 0; - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - map->dmat = dmat; - map->buf = buf; - map->buflen = buflen; - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, - map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } + mtx_lock(&bounce_lock); + if (flags & BUS_DMA_NOWAIT) { + if (reserve_bounce_pages(dmat, map, 0) != 0) { + map->pagesneeded = 0; + mtx_unlock(&bounce_lock); + return (ENOMEM); } - mtx_unlock(&bounce_lock); + } else { + if (reserve_bounce_pages(dmat, map, 1) != 0) { + /* Queue us for resources */ + STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); + mtx_unlock(&bounce_lock); + return (EINPROGRESS); + } } + mtx_unlock(&bounce_lock); return (0); } /* - * Utility function to load a linear buffer. lastaddrp holds state - * between invocations (for multiple-buffer loads). segp contains + * Add a single contiguous physical range to the segment list. + */ +static int +_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, + bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) +{ + bus_addr_t baddr, bmask; + int seg; + + /* + * Make sure we don't cross any boundaries. + */ + bmask = ~(dmat->boundary - 1); + if (dmat->boundary > 0) { + baddr = (curaddr + dmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + + if (dmat->ranges) { + struct arm32_dma_range *dr; + + dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, + curaddr); + if (dr == NULL) { + _bus_dmamap_unload(dmat, map); + return (EINVAL); + } + /* + * In a valid DMA range. Translate the physical + * memory address to an address in the DMA window. + */ + curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; + } + + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + seg = *segp; + if (seg == -1) { + seg = 0; + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } else { + if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && + (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && + (dmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= dmat->nsegments) + return (0); + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } + } + *segp = seg; + return (sgsize); +} + +/* + * Utility function to load a physical buffer. segp contains * the starting segment on entrace, and the ending segment on exit. - * first indicates if this is the first invocation of this function. */ -static __inline int +int +_bus_dmamap_load_phys(bus_dma_tag_t dmat, + bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, + int flags, + bus_dma_segment_t *segs, + int *segp) +{ + bus_addr_t curaddr; + bus_size_t sgsize; + int error; + + if (segs == NULL) + segs = dmat->segments; + + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } + } + + while (buflen > 0) { + curaddr = buf; + sgsize = MIN(buflen, dmat->maxsegsz); + if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && + map->pagesneeded != 0 && run_filter(dmat, curaddr)) { + sgsize = MIN(sgsize, PAGE_SIZE); + curaddr = add_bounce_page(dmat, map, 0, curaddr, + sgsize); + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + buf += sgsize; + buflen -= sgsize; + } + + /* + * Did we fit? + */ + if (buflen != 0) { + _bus_dmamap_unload(dmat, map); + return (EFBIG); /* XXX better return value here? */ + } + return (0); +} + +/* + * Utility function to load a linear buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. + */ +int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, + pmap_t pmap, int flags, - bus_addr_t *lastaddrp, bus_dma_segment_t *segs, - int *segp, - int first) + int *segp) { bus_size_t sgsize; - bus_addr_t curaddr, lastaddr, baddr, bmask; + bus_addr_t curaddr; vm_offset_t vaddr; struct sync_list *sl; - int seg, error; + int error; + if (segs == NULL) + segs = dmat->segments; + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { - error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags); - if (error) - return (error); + _bus_dmamap_count_pages(dmat, map, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } } sl = NULL; vaddr = (vm_offset_t)buf; - lastaddr = *lastaddrp; - bmask = ~(dmat->boundary - 1); + map->pmap = pmap; - for (seg = *segp; buflen > 0 ; ) { + while (buflen > 0) { /* * Get the physical address for this segment. */ - if (__predict_true(map->pmap == pmap_kernel())) + if (__predict_true(map->pmap == kernel_pmap)) curaddr = pmap_kextract(vaddr); else curaddr = pmap_extract(map->pmap, vaddr); @@ -846,262 +998,66 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, if (buflen < sgsize) sgsize = buflen; - /* - * Make sure we don't cross any boundaries. - */ - if (dmat->boundary > 0) { - baddr = (curaddr + dmat->boundary) & bmask; - if (sgsize > (baddr - curaddr)) - sgsize = (baddr - curaddr); - } - if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && map->pagesneeded != 0 && run_filter(dmat, curaddr)) { - curaddr = add_bounce_page(dmat, map, vaddr, sgsize); + curaddr = add_bounce_page(dmat, map, vaddr, curaddr, + sgsize); } else { - /* add_sync_list(dmat, map, vaddr, sgsize, cflag); */ - sl = (struct sync_list *)malloc(sizeof(struct sync_list), - M_DEVBUF, M_NOWAIT | M_ZERO); - if (sl == NULL) - goto cleanup; - STAILQ_INSERT_TAIL(&(map->slist), sl, slinks); - sl->vaddr = vaddr; - sl->datacount = sgsize; - sl->busaddr = curaddr; + sl = &map->slist[map->sync_count - 1]; + if (map->sync_count == 0 || + vaddr != sl->vaddr + sl->datacount) { + if (++map->sync_count > dmat->nsegments) + goto cleanup; + sl++; + sl->vaddr = vaddr; + sl->datacount = sgsize; + sl->busaddr = curaddr; + } else + sl->datacount += sgsize; } - - - if (dmat->ranges) { - struct arm32_dma_range *dr; - - dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, - curaddr); - if (dr == NULL) { - _bus_dmamap_unload(dmat, map); - return (EINVAL); - } - /* - * In a valid DMA range. Translate the physical - * memory address to an address in the DMA window. - */ - curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; - } - - /* - * Insert chunk into a segment, coalescing with - * previous segment if possible. - */ - if (first) { - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; - first = 0; - } else { - if (curaddr == lastaddr && - (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && - (dmat->boundary == 0 || - (segs[seg].ds_addr & bmask) == (curaddr & bmask))) - segs[seg].ds_len += sgsize; - else { - if (++seg >= dmat->nsegments) - break; - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; - } - } - - lastaddr = curaddr + sgsize; + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; vaddr += sgsize; buflen -= sgsize; } - *segp = seg; - *lastaddrp = lastaddr; cleanup: /* * Did we fit? */ if (buflen != 0) { _bus_dmamap_unload(dmat, map); - return(EFBIG); /* XXX better return value here? */ + return (EFBIG); /* XXX better return value here? */ } return (0); } -/* - * Map the buffer buf into bus space using the dmamap map. - */ -int -bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, bus_dmamap_callback_t *callback, - void *callback_arg, int flags) + +void +__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_memory_t mem, bus_dmamap_callback_t *callback, + void *callback_arg) { - bus_addr_t lastaddr = 0; - int error, nsegs = 0; - flags |= BUS_DMA_WAITOK; + map->mem = mem; + map->dmat = dmat; map->callback = callback; map->callback_arg = callback_arg; - map->pmap = kernel_pmap; - - error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, flags, - &lastaddr, dmat->segments, &nsegs, 1); - - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, nsegs + 1); - - if (error == EINPROGRESS) { - return (error); - } - - if (error) - (*callback)(callback_arg, dmat->segments, 0, error); - else - (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); - - /* - * Return ENOMEM to the caller so that it can pass it up the stack. - * This error only happens when NOWAIT is set, so deferal is disabled. - */ - if (error == ENOMEM) - return (error); - - return (0); } - -/* - * Like _bus_dmamap_load(), but for mbufs. - */ -static __inline int -_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, - int flags) +bus_dma_segment_t * +_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) { - int error; - M_ASSERTPKTHDR(m0); - map->pmap = kernel_pmap; - - flags |= BUS_DMA_NOWAIT; - *nsegs = 0; - error = 0; - if (m0->m_pkthdr.len <= dmat->maxsize) { - int first = 1; - bus_addr_t lastaddr = 0; - struct mbuf *m; - - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, map, - m->m_data, m->m_len, - flags, &lastaddr, - segs, nsegs, first); - first = 0; - } - } - } else { - error = EINVAL; - } - - /* XXX FIXME: Having to increment nsegs is really annoying */ - ++*nsegs; - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, *nsegs); - return (error); + if (segs == NULL) + segs = dmat->segments; + return (segs); } -int -bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *m0, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags) -{ - int nsegs, error; - - error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs, - flags); - - if (error) { - /* force "no valid mappings" in callback */ - (*callback)(callback_arg, dmat->segments, 0, 0, error); - } else { - (*callback)(callback_arg, dmat->segments, - nsegs, m0->m_pkthdr.len, error); - } - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, nsegs); - - return (error); -} - -int -bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, - int flags) -{ - return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags)); -} - /* - * Like _bus_dmamap_load(), but for uios. - */ -int -bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, - struct uio *uio, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags) -{ - bus_addr_t lastaddr; - int nsegs, error, first, i; - bus_size_t resid; - struct iovec *iov; - - flags |= BUS_DMA_NOWAIT; - resid = uio->uio_resid; - iov = uio->uio_iov; - - if (uio->uio_segflg == UIO_USERSPACE) { - KASSERT(uio->uio_td != NULL, - ("bus_dmamap_load_uio: USERSPACE but no proc")); - map->pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); - } else - map->pmap = kernel_pmap; - - nsegs = 0; - error = 0; - first = 1; - lastaddr = (bus_addr_t) 0; - for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { - /* - * Now at the first iovec to load. Load each iovec - * until we have exhausted the residual count. - */ - bus_size_t minlen = - resid < iov[i].iov_len ? resid : iov[i].iov_len; - caddr_t addr = (caddr_t) iov[i].iov_base; - - if (minlen > 0) { - error = _bus_dmamap_load_buffer(dmat, map, - addr, minlen, flags, &lastaddr, - dmat->segments, &nsegs, first); - first = 0; - resid -= minlen; - } - } - - if (error) { - /* force "no valid mappings" in callback */ - (*callback)(callback_arg, dmat->segments, 0, 0, error); - } else { - (*callback)(callback_arg, dmat->segments, - nsegs+1, uio->uio_resid, error); - } - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, nsegs + 1); - return (error); -} - -/* * Release the mapping held by map. */ void @@ -1109,13 +1065,7 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_ { struct bounce_page *bpage; struct bounce_zone *bz; - struct sync_list *sl; - while ((sl = STAILQ_FIRST(&map->slist)) != NULL) { - STAILQ_REMOVE_HEAD(&map->slist, slinks); - free(sl, M_DEVBUF); - } - if ((bz = dmat->bounce_zone) != NULL) { while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { STAILQ_REMOVE_HEAD(&map->bpages, links); @@ -1128,6 +1078,7 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_ map->pagesreserved = 0; map->pagesneeded = 0; } + map->sync_count = 0; } #ifdef notyetbounceuser @@ -1187,15 +1138,13 @@ void _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { struct bounce_page *bpage; - struct sync_list *sl; + struct sync_list *sl, *end; bus_size_t len, unalign; vm_offset_t buf, ebuf; #ifdef FIX_DMAP_BUS_DMASYNC_POSTREAD vm_offset_t bbuf; char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; #endif - int listcount = 0; - /* if buffer was from user space, it it possible that this * is not the same vm map. The fix is to map each page in * the buffer into the current address space (KVM) and then @@ -1215,9 +1164,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t if (op & BUS_DMASYNC_PREWRITE) { while (bpage != NULL) { - bcopy((void *)bpage->datavaddr, - (void *)bpage->vaddr, - bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)bpage->datavaddr, + (void *)bpage->vaddr, + bpage->datacount); + else + physcopyout(bpage->dataaddr, + (void *)bpage->vaddr, + bpage->datacount); cpu_dcache_wb_range((vm_offset_t)bpage->vaddr, bpage->datacount); l2cache_wb_range((vm_offset_t)bpage->vaddr, @@ -1254,9 +1208,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t arm_dcache_align; cpu_dcache_inv_range(startv, len); l2cache_inv_range(startv, startp, len); - bcopy((void *)bpage->vaddr, - (void *)bpage->datavaddr, - bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)bpage->vaddr, + (void *)bpage->datavaddr, + bpage->datacount); + else + physcopyin((void *)bpage->vaddr, + bpage->dataaddr, + bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; @@ -1265,29 +1224,26 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t if (map->flags & DMAMAP_COHERENT) return; - sl = STAILQ_FIRST(&map->slist); - while (sl) { - listcount++; - sl = STAILQ_NEXT(sl, slinks); - } - if ((sl = STAILQ_FIRST(&map->slist)) != NULL) { + if (map->sync_count != 0) { /* ARM caches are not self-snooping for dma */ + sl = &map->slist[0]; + end = &map->slist[map->sync_count]; CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " "performing sync", __func__, dmat, dmat->flags, op); switch (op) { case BUS_DMASYNC_PREWRITE: - while (sl != NULL) { + while (sl != end) { cpu_dcache_wb_range(sl->vaddr, sl->datacount); l2cache_wb_range(sl->vaddr, sl->busaddr, sl->datacount); - sl = STAILQ_NEXT(sl, slinks); + sl++; } break; case BUS_DMASYNC_PREREAD: - while (sl != NULL) { + while (sl != end) { /* write back the unaligned portions */ vm_paddr_t physaddr = sl->busaddr, ephysaddr; buf = sl->vaddr; @@ -1327,16 +1283,16 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t cpu_dcache_inv_range(buf, len); l2cache_inv_range(buf, physaddr, len); } - sl = STAILQ_NEXT(sl, slinks); + sl++; } break; case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD: - while (sl != NULL) { + while (sl != end) { cpu_dcache_wbinv_range(sl->vaddr, sl->datacount); l2cache_wbinv_range(sl->vaddr, sl->busaddr, sl->datacount); - sl = STAILQ_NEXT(sl, slinks); + sl++; } break; @@ -1344,7 +1300,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t case BUS_DMASYNC_POSTREAD: if (!pmap_dmap_iscurrent(map->pmap)) panic("_bus_dmamap_sync: wrong user map. apply fix"); - while (sl != NULL) { + while (sl != end) { /* write back the unaligned portions */ vm_paddr_t physaddr; buf = sl->vaddr; @@ -1377,7 +1333,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t unalign = arm_dcache_align - unalign; memcpy((void *)ebuf, _tmp_clend, unalign); } - sl = STAILQ_NEXT(sl, slinks); + sl++; } break; #endif /* FIX_DMAP_BUS_DMASYNC_POSTREAD */ @@ -1545,7 +1501,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmama static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_size_t size) + bus_addr_t addr, bus_size_t size) { struct bounce_zone *bz; struct bounce_page *bpage; @@ -1579,6 +1535,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t m bpage->busaddr |= vaddr & PAGE_MASK; } bpage->datavaddr = vaddr; + bpage->dataaddr = addr; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); @@ -1632,8 +1589,8 @@ busdma_swi(void) mtx_unlock(&bounce_lock); dmat = map->dmat; (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load(map->dmat, map, map->buf, map->buflen, - map->callback, map->callback_arg, /*flags*/0); + bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, + map->callback_arg, BUS_DMA_WAITOK); (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); mtx_lock(&bounce_lock); } Index: arm/arm/busdma_machdep.c =================================================================== --- arm/arm/busdma_machdep.c (.../head/sys) (revision 244874) +++ arm/arm/busdma_machdep.c (.../projects/physbio/sys) (revision 244874) @@ -62,11 +62,10 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include -#include #include #include #include +#include #include #include @@ -125,10 +124,17 @@ struct bounce_page { vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ + bus_addr_t dataaddr; /* client physical address */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; +struct sync_list { + vm_offset_t vaddr; /* kva of bounce buffer */ + bus_addr_t busaddr; /* Physical address */ + bus_size_t datacount; /* client data count */ +}; + int busdma_swi_pending; struct bounce_zone { @@ -158,24 +164,21 @@ static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_ SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, "Total bounce pages"); -#define DMAMAP_LINEAR 0x1 -#define DMAMAP_MBUF 0x2 -#define DMAMAP_UIO 0x4 -#define DMAMAP_CACHE_ALIGNED 0x10 -#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) #define DMAMAP_COHERENT 0x8 +#define DMAMAP_CACHE_ALIGNED 0x10 + struct bus_dmamap { struct bp_list bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; + bus_dma_memory_t mem; int flags; - void *buffer; - int len; STAILQ_ENTRY(bus_dmamap) links; bus_dmamap_callback_t *callback; void *callback_arg; - + int sync_count; + struct sync_list *slist; }; static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; @@ -191,7 +194,8 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_size_t size); + vm_offset_t vaddr, bus_addr_t addr, + bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); /* Default tag, as most drivers provide no parent tag. */ @@ -564,13 +568,20 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat) int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { + struct sync_list *slist; bus_dmamap_t map; int error = 0; + slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT); + if (slist == NULL) + return (ENOMEM); + map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT); *mapp = map; - if (map == NULL) + if (map == NULL) { + free(slist, M_DEVBUF); return (ENOMEM); + } /* * If the tag's segments haven't been allocated yet we need to do it @@ -580,6 +591,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, b dmat->segments = malloc(dmat->nsegments * sizeof(*dmat->segments), M_DEVBUF, M_NOWAIT); if (dmat->segments == NULL) { + free(slist, M_DEVBUF); uma_zfree(dmamap_zone, map); *mapp = NULL; return (ENOMEM); @@ -599,6 +611,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, b if (dmat->bounce_zone == NULL) { if ((error = alloc_bounce_zone(dmat)) != 0) { + free(slist, M_DEVBUF); uma_zfree(dmamap_zone, map); *mapp = NULL; return (error); @@ -633,6 +646,8 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, b } bz->map_count++; } + map->sync_count = 0; + map->slist = slist; CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, error); @@ -647,11 +662,12 @@ int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { - if (STAILQ_FIRST(&map->bpages) != NULL) { + if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY); return (EBUSY); } + free(map->slist, M_DEVBUF); uma_zfree(dmamap_zone, map); if (dmat->bounce_zone) dmat->bounce_zone->map_count--; @@ -668,9 +684,9 @@ int bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags, bus_dmamap_t *mapp) { + struct sync_list *slist; void * vaddr; struct busdma_bufzone *bufzone; - busdma_bufalloc_t ba; bus_dmamap_t map; int mflags; vm_memattr_t memattr; @@ -679,7 +695,6 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp mflags = M_NOWAIT; else mflags = M_WAITOK; - /* * If the tag's segments haven't been allocated yet we need to do it * now, because we can't sleep for resources at map load time. @@ -688,10 +703,14 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp dmat->segments = malloc(dmat->nsegments * sizeof(*dmat->segments), M_DEVBUF, mflags); + slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT); + if (slist == NULL) + return (ENOMEM); map = uma_zalloc_arg(dmamap_zone, dmat, mflags); - if (map == NULL) + if (map == NULL) { + free(slist, M_DEVBUF); return (ENOMEM); - + } if (flags & BUS_DMA_COHERENT) { memattr = VM_MEMATTR_UNCACHEABLE; ba = coherent_allocator; @@ -738,12 +757,14 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, memattr); } - if (vaddr == NULL) { + free(slist, M_DEVBUF); uma_zfree(dmamap_zone, map); map = NULL; + } else { + map->slist = slist; + map->sync_count = 0; } - *vaddrp = vaddr; *mapp = map; @@ -762,10 +783,11 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, b if (map->flags & DMAMAP_COHERENT) ba = coherent_allocator; - else + else ba = standard_allocator; - uma_zfree(dmamap_zone, map); + uma_zfree(dmamap_zone, map); + free(map->slist, M_DEVBUF); /* Be careful not to access map from here on. */ bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); @@ -777,7 +799,37 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, b kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize); } -static int +static void +_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags) +{ + bus_addr_t curaddr; + bus_size_t sgsize; + + if ((map->pagesneeded == 0)) { + CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", + dmat->lowaddr, dmat->boundary, dmat->alignment); + CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", + map, map->pagesneeded); + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + curaddr = buf; + while (buflen != 0) { + sgsize = MIN(buflen, dmat->maxsegsz); + if (run_filter(dmat, curaddr) != 0) { + sgsize = MIN(sgsize, PAGE_SIZE); + map->pagesneeded++; + } + curaddr += sgsize; + buflen -= sgsize; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + } +} + +static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags) { @@ -798,81 +850,187 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dm vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { - if (__predict_true(pmap == pmap_kernel())) + if (__predict_true(pmap == kernel_pmap)) paddr = pmap_kextract(vaddr); else paddr = pmap_extract(pmap, vaddr); - if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && - run_filter(dmat, paddr) != 0) + if (run_filter(dmat, paddr) != 0) map->pagesneeded++; vaddr += PAGE_SIZE; } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } +} +static int +_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) +{ + /* Reserve Necessary Bounce Pages */ - if (map->pagesneeded != 0) { - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, - map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } + mtx_lock(&bounce_lock); + if (flags & BUS_DMA_NOWAIT) { + if (reserve_bounce_pages(dmat, map, 0) != 0) { + mtx_unlock(&bounce_lock); + return (ENOMEM); } - mtx_unlock(&bounce_lock); + } else { + if (reserve_bounce_pages(dmat, map, 1) != 0) { + /* Queue us for resources */ + STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); + mtx_unlock(&bounce_lock); + return (EINPROGRESS); + } } + mtx_unlock(&bounce_lock); return (0); } /* - * Utility function to load a linear buffer. lastaddrp holds state - * between invocations (for multiple-buffer loads). segp contains + * Add a single contiguous physical range to the segment list. + */ +static int +_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, + bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) +{ + bus_addr_t baddr, bmask; + int seg; + + /* + * Make sure we don't cross any boundaries. + */ + bmask = ~(dmat->boundary - 1); + if (dmat->boundary > 0) { + baddr = (curaddr + dmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + if (dmat->ranges) { + struct arm32_dma_range *dr; + + dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, + curaddr); + if (dr == NULL) + return (EINVAL); + /* + * In a valid DMA range. Translate the physical + * memory address to an address in the DMA window. + */ + curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; + + } + + seg = *segp; + /* + * Insert chunk into a segment, coalescing with + * the previous segment if possible. + */ + if (seg >= 0 && + curaddr == segs[seg].ds_addr + segs[seg].ds_len && + (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && + (dmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) { + segs[seg].ds_len += sgsize; + } else { + if (++seg >= dmat->nsegments) + return (EFBIG); + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } + *segp = seg; + return (0); +} + +/* + * Utility function to load a physical buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. + */ +int +_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) +{ + bus_size_t sgsize; + bus_addr_t curaddr; + int error; + + if (segs == NULL) + segs = dmat->segments; + + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } + } + + while (buflen > 0) { + curaddr = buf; + sgsize = MIN(buflen, dmat->maxsegsz); + if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && + map->pagesneeded != 0 && run_filter(dmat, curaddr)) { + sgsize = MIN(sgsize, PAGE_SIZE); + curaddr = add_bounce_page(dmat, map, 0, curaddr, + sgsize); + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + buf += sgsize; + buflen -= sgsize; + } + + /* + * Did we fit? + */ + if (buflen != 0) { + _bus_dmamap_unload(dmat, map); + return (EFBIG); /* XXX better return value here? */ + } + return (0); +} +/* + * Utility function to load a linear buffer. segp contains * the starting segment on entrance, and the ending segment on exit. - * first indicates if this is the first invocation of this function. */ -static __inline int -bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, - bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, - int flags, vm_offset_t *lastaddrp, int *segp) +int +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, + int *segp) { bus_size_t sgsize; - bus_addr_t curaddr, lastaddr, baddr, bmask; + bus_addr_t curaddr; + struct sync_list *sl; vm_offset_t vaddr = (vm_offset_t)buf; - int seg; int error = 0; pd_entry_t *pde; pt_entry_t pte; pt_entry_t *ptep; - lastaddr = *lastaddrp; - bmask = ~(dmat->boundary - 1); + if (segs == NULL) + segs = dmat->segments; if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { - error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, - flags); - if (error) - return (error); + _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } } CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); - for (seg = *segp; buflen > 0 ; ) { + while (buflen > 0) { /* * Get the physical address for this segment. * * XXX Don't support checking for coherent mappings * XXX in user address space. */ - if (__predict_true(pmap == pmap_kernel())) { + if (__predict_true(pmap == kernel_pmap)) { if (pmap_get_pde_pte(pmap, vaddr, &pde, &ptep) == FALSE) return (EFAULT); @@ -910,263 +1068,66 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dm if (buflen < sgsize) sgsize = buflen; - /* - * Make sure we don't cross any boundaries. - */ - if (dmat->boundary > 0) { - baddr = (curaddr + dmat->boundary) & bmask; - if (sgsize > (baddr - curaddr)) - sgsize = (baddr - curaddr); - } if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && - map->pagesneeded != 0 && run_filter(dmat, curaddr)) - curaddr = add_bounce_page(dmat, map, vaddr, sgsize); - - if (dmat->ranges) { - struct arm32_dma_range *dr; - - dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, - curaddr); - if (dr == NULL) - return (EINVAL); - /* - * In a valid DMA range. Translate the physical - * memory address to an address in the DMA window. - */ - curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; - - } - - /* - * Insert chunk into a segment, coalescing with - * the previous segment if possible. - */ - if (seg >= 0 && curaddr == lastaddr && - (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && - (dmat->boundary == 0 || - (segs[seg].ds_addr & bmask) == - (curaddr & bmask))) { - segs[seg].ds_len += sgsize; - goto segdone; + map->pagesneeded != 0 && run_filter(dmat, curaddr)) { + curaddr = add_bounce_page(dmat, map, vaddr, curaddr, + sgsize); } else { - if (++seg >= dmat->nsegments) - break; - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; + sl = &map->slist[map->sync_count - 1]; + if (map->sync_count == 0 || + vaddr != sl->vaddr + sl->datacount) { + if (++map->sync_count > dmat->nsegments) + goto cleanup; + sl++; + sl->vaddr = vaddr; + sl->datacount = sgsize; + sl->busaddr = curaddr; + } else + sl->datacount += sgsize; } - if (error) + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) break; -segdone: - lastaddr = curaddr + sgsize; vaddr += sgsize; buflen -= sgsize; } - *segp = seg; - *lastaddrp = lastaddr; - +cleanup: /* * Did we fit? */ - if (buflen != 0) - error = EFBIG; /* XXX better return value here? */ - return (error); + if (buflen != 0) { + _bus_dmamap_unload(dmat, map); + return (EFBIG); /* XXX better return value here? */ + } + return (0); } -/* - * Map the buffer buf into bus space using the dmamap map. - */ -int -bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, bus_dmamap_callback_t *callback, - void *callback_arg, int flags) +void +__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_memory_t mem, bus_dmamap_callback_t *callback, + void *callback_arg) { - vm_offset_t lastaddr = 0; - int error, nsegs = -1; KASSERT(dmat != NULL, ("dmatag is NULL")); KASSERT(map != NULL, ("dmamap is NULL")); + map->mem = mem; map->callback = callback; map->callback_arg = callback_arg; - map->flags &= ~DMAMAP_TYPE_MASK; - map->flags |= DMAMAP_LINEAR; - map->buffer = buf; - map->len = buflen; - error = bus_dmamap_load_buffer(dmat, - dmat->segments, map, buf, buflen, kernel_pmap, - flags, &lastaddr, &nsegs); - if (error == EINPROGRESS) - return (error); - if (error) - (*callback)(callback_arg, NULL, 0, error); - else - (*callback)(callback_arg, dmat->segments, nsegs + 1, error); - - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, nsegs + 1, error); - - return (error); } -/* - * Like bus_dmamap_load(), but for mbufs. - * - * Note that the manpage states that BUS_DMA_NOWAIT is implied for mbufs. - * - * We know that the way the system allocates and uses mbufs implies that we can - * treat them as DMAMAP_CACHE_ALIGNED in terms of handling partial cache line - * flushes. Even though the flush may reference the data area within the mbuf - * that isn't aligned to a cache line, we know the overall mbuf itself is - * properly aligned, and we know that the CPU will not touch the header fields - * before the data area while the DMA is in progress. - */ -int -bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags) +bus_dma_segment_t * +_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) { - int nsegs = -1, error = 0; - M_ASSERTPKTHDR(m0); - - flags |= BUS_DMA_NOWAIT; - map->flags &= ~DMAMAP_TYPE_MASK; - map->flags |= DMAMAP_MBUF | DMAMAP_CACHE_ALIGNED; - map->buffer = m0; - map->len = 0; - if (m0->m_pkthdr.len <= dmat->maxsize) { - vm_offset_t lastaddr = 0; - struct mbuf *m; - - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = bus_dmamap_load_buffer(dmat, - dmat->segments, map, m->m_data, m->m_len, - pmap_kernel(), flags, &lastaddr, &nsegs); - map->len += m->m_len; - } - } - } else { - error = EINVAL; - } - - if (error) { - /* - * force "no valid mappings" on error in callback. - */ - (*callback)(callback_arg, NULL, 0, 0, error); - } else { - (*callback)(callback_arg, dmat->segments, nsegs + 1, - m0->m_pkthdr.len, error); - } - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, nsegs + 1); - - return (error); + if (segs == NULL) + segs = dmat->segments; + return (segs); } -int -bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, - int flags) -{ - int error = 0; - M_ASSERTPKTHDR(m0); - - flags |= BUS_DMA_NOWAIT; - *nsegs = -1; - map->flags &= ~DMAMAP_TYPE_MASK; - map->flags |= DMAMAP_MBUF | DMAMAP_CACHE_ALIGNED; - map->buffer = m0; - map->len = 0; - if (m0->m_pkthdr.len <= dmat->maxsize) { - vm_offset_t lastaddr = 0; - struct mbuf *m; - - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = bus_dmamap_load_buffer(dmat, segs, map, - m->m_data, m->m_len, - pmap_kernel(), flags, &lastaddr, - nsegs); - map->len += m->m_len; - } - } - } else { - error = EINVAL; - } - - /* XXX FIXME: Having to increment nsegs is really annoying */ - ++*nsegs; - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, *nsegs); - return (error); -} - /* - * Like bus_dmamap_load(), but for uios. - */ -int -bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags) -{ - vm_offset_t lastaddr = 0; - int nsegs, i, error; - bus_size_t resid; - struct iovec *iov; - struct pmap *pmap; - - resid = uio->uio_resid; - iov = uio->uio_iov; - map->flags &= ~DMAMAP_TYPE_MASK; - map->flags |= DMAMAP_UIO; - map->buffer = uio; - map->len = 0; - - if (uio->uio_segflg == UIO_USERSPACE) { - KASSERT(uio->uio_td != NULL, - ("bus_dmamap_load_uio: USERSPACE but no proc")); - pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); - } else - pmap = kernel_pmap; - - error = 0; - nsegs = -1; - for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { - /* - * Now at the first iovec to load. Load each iovec - * until we have exhausted the residual count. - */ - bus_size_t minlen = - resid < iov[i].iov_len ? resid : iov[i].iov_len; - caddr_t addr = (caddr_t) iov[i].iov_base; - - if (minlen > 0) { - error = bus_dmamap_load_buffer(dmat, dmat->segments, - map, addr, minlen, pmap, flags, &lastaddr, &nsegs); - - map->len += minlen; - resid -= minlen; - } - } - - if (error) { - /* - * force "no valid mappings" on error in callback. - */ - (*callback)(callback_arg, NULL, 0, 0, error); - } else { - (*callback)(callback_arg, dmat->segments, nsegs+1, - uio->uio_resid, error); - } - - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, nsegs + 1); - return (error); -} - -/* * Release the mapping held by map. */ void @@ -1174,24 +1135,25 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_ { struct bounce_page *bpage; - map->flags &= ~DMAMAP_TYPE_MASK; while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { STAILQ_REMOVE_HEAD(&map->bpages, links); free_bounce_page(dmat, bpage); } + map->sync_count = 0; return; } static void -bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op, int bufaligned) +bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, + int bufaligned) { char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; register_t s; int partial; if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) { - cpu_dcache_wb_range((vm_offset_t)buf, len); - cpu_l2cache_wb_range((vm_offset_t)buf, len); + cpu_dcache_wb_range(buf, len); + cpu_l2cache_wb_range(buf, len); } /* @@ -1214,38 +1176,37 @@ static void if (op & BUS_DMASYNC_PREREAD) { if (!(op & BUS_DMASYNC_PREWRITE) && !partial) { - cpu_dcache_inv_range((vm_offset_t)buf, len); - cpu_l2cache_inv_range((vm_offset_t)buf, len); + cpu_dcache_inv_range(buf, len); + cpu_l2cache_inv_range(buf, len); } else { - cpu_dcache_wbinv_range((vm_offset_t)buf, len); - cpu_l2cache_wbinv_range((vm_offset_t)buf, len); + cpu_dcache_wbinv_range(buf, len); + cpu_l2cache_wbinv_range(buf, len); } } if (op & BUS_DMASYNC_POSTREAD) { if (partial && !bufaligned) { s = intr_disable(); - if ((vm_offset_t)buf & arm_dcache_align_mask) - memcpy(_tmp_cl, (void *)((vm_offset_t)buf & + if (buf & arm_dcache_align_mask) + memcpy(_tmp_cl, (void *)(buf & ~arm_dcache_align_mask), - (vm_offset_t)buf & arm_dcache_align_mask); - if (((vm_offset_t)buf + len) & arm_dcache_align_mask) + buf & arm_dcache_align_mask); + if ((buf + len) & arm_dcache_align_mask) memcpy(_tmp_clend, - (void *)((vm_offset_t)buf + len), - arm_dcache_align - (((vm_offset_t)(buf) + - len) & arm_dcache_align_mask)); + (void *)(buf + len), + arm_dcache_align - + ((buf + len) & arm_dcache_align_mask)); } - cpu_dcache_inv_range((vm_offset_t)buf, len); - cpu_l2cache_inv_range((vm_offset_t)buf, len); + cpu_dcache_inv_range(buf, len); + cpu_l2cache_inv_range(buf, len); if (partial && !bufaligned) { - if ((vm_offset_t)buf & arm_dcache_align_mask) - memcpy((void *)((vm_offset_t)buf & + if (buf & arm_dcache_align_mask) + memcpy((void *)(buf & ~arm_dcache_align_mask), _tmp_cl, - (vm_offset_t)buf & arm_dcache_align_mask); - if (((vm_offset_t)buf + len) & arm_dcache_align_mask) - memcpy((void *)((vm_offset_t)buf + len), + buf & arm_dcache_align_mask); + if ((buf + len) & arm_dcache_align_mask) + memcpy((void *)(buf + len), _tmp_clend, arm_dcache_align - - (((vm_offset_t)(buf) + len) & - arm_dcache_align_mask)); + ((buf + len) & arm_dcache_align_mask)); intr_restore(s); } } @@ -1258,10 +1219,18 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap STAILQ_FOREACH(bpage, &map->bpages, links) { if (op & BUS_DMASYNC_PREWRITE) { - bcopy((void *)bpage->datavaddr, - (void *)(bpage->vaddr_nocache != 0 ? - bpage->vaddr_nocache : bpage->vaddr), - bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)bpage->datavaddr, + (void *)(bpage->vaddr_nocache != 0 ? + bpage->vaddr_nocache : + bpage->vaddr), + bpage->datacount); + else + physcopyout(bpage->dataaddr, + (void *)(bpage->vaddr_nocache != 0 ? + bpage->vaddr_nocache : + bpage->vaddr), + bpage->datacount); if (bpage->vaddr_nocache == 0) { cpu_dcache_wb_range(bpage->vaddr, bpage->datacount); @@ -1277,36 +1246,23 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap cpu_l2cache_inv_range(bpage->vaddr, bpage->datacount); } - bcopy((void *)(bpage->vaddr_nocache != 0 ? - bpage->vaddr_nocache : bpage->vaddr), - (void *)bpage->datavaddr, bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)(bpage->vaddr_nocache != 0 ? + bpage->vaddr_nocache : bpage->vaddr), + (void *)bpage->datavaddr, bpage->datacount); + else + physcopyin((void *)(bpage->vaddr_nocache != 0 ? + bpage->vaddr_nocache : bpage->vaddr), + bpage->dataaddr, bpage->datacount); dmat->bounce_zone->total_bounced++; } } } -static __inline int -_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) -{ - struct bounce_page *bpage; - - STAILQ_FOREACH(bpage, &map->bpages, links) { - if ((vm_offset_t)buf >= bpage->datavaddr && - (vm_offset_t)buf + len <= bpage->datavaddr + - bpage->datacount) - return (1); - } - return (0); - -} - void _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { - struct mbuf *m; - struct uio *uio; - int resid; - struct iovec *iov; + struct sync_list *sl, *end; int bufaligned; if (op == BUS_DMASYNC_POSTWRITE) @@ -1317,40 +1273,11 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t _bus_dmamap_sync_bp(dmat, map, op); CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); bufaligned = (map->flags & DMAMAP_CACHE_ALIGNED); - switch(map->flags & DMAMAP_TYPE_MASK) { - case DMAMAP_LINEAR: - if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) - bus_dmamap_sync_buf(map->buffer, map->len, op, + if (map->sync_count) { + end = &map->slist[map->sync_count]; + for (sl = &map->slist[0]; sl != end; sl++) + bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op, bufaligned); - break; - case DMAMAP_MBUF: - m = map->buffer; - while (m) { - if (m->m_len > 0 && - !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) - bus_dmamap_sync_buf(m->m_data, m->m_len, op, - bufaligned); - m = m->m_next; - } - break; - case DMAMAP_UIO: - uio = map->buffer; - iov = uio->uio_iov; - resid = uio->uio_resid; - for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { - bus_size_t minlen = resid < iov[i].iov_len ? resid : - iov[i].iov_len; - if (minlen > 0) { - if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, - minlen)) - bus_dmamap_sync_buf(iov[i].iov_base, - minlen, op, bufaligned); - resid -= minlen; - } - } - break; - default: - break; } drain: @@ -1517,7 +1444,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmama static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_size_t size) + bus_addr_t addr, bus_size_t size) { struct bounce_zone *bz; struct bounce_page *bpage; @@ -1550,6 +1477,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t m bpage->busaddr |= vaddr & PAGE_MASK; } bpage->datavaddr = vaddr; + bpage->dataaddr = addr; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); @@ -1603,8 +1531,8 @@ busdma_swi(void) mtx_unlock(&bounce_lock); dmat = map->dmat; (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load(map->dmat, map, map->buffer, map->len, - map->callback, map->callback_arg, /*flags*/0); + bus_dmamap_load_mem(map->dmat, map, &map->mem, + map->callback, map->callback_arg, BUS_DMA_WAITOK); (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); mtx_lock(&bounce_lock); } Index: powerpc/powerpc/busdma_machdep.c =================================================================== --- powerpc/powerpc/busdma_machdep.c (.../head/sys) (revision 244874) +++ powerpc/powerpc/busdma_machdep.c (.../projects/physbio/sys) (revision 244874) @@ -41,9 +41,8 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include -#include #include +#include #include #include @@ -87,6 +86,7 @@ struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ + bus_addr_t dataaddr; /* client physical address */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; @@ -125,8 +125,7 @@ struct bus_dmamap { int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; - void *buf; /* unmapped buffer pointer */ - bus_size_t buflen; /* unmapped buffer length */ + bus_dma_memory_t mem; bus_dma_segment_t *segments; int nsegs; bus_dmamap_callback_t *callback; @@ -144,7 +143,8 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_size_t size); + vm_offset_t vaddr, bus_addr_t addr, + bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); @@ -564,32 +564,45 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, b CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); } -/* - * Utility function to load a linear buffer. lastaddrp holds state - * between invocations (for multiple-buffer loads). segp contains - * the starting segment on entrance, and the ending segment on exit. - * first indicates if this is the first invocation of this function. - */ -static __inline int -_bus_dmamap_load_buffer(bus_dma_tag_t dmat, - bus_dmamap_t map, - void *buf, bus_size_t buflen, - pmap_t pmap, - int flags, - bus_addr_t *lastaddrp, - bus_dma_segment_t *segs, - int *segp, - int first) +static void +_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags) { + bus_addr_t curaddr; bus_size_t sgsize; - bus_addr_t curaddr, lastaddr, baddr, bmask; - vm_offset_t vaddr; - bus_addr_t paddr; - int seg; - if (map->pagesneeded == 0 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) { - vm_offset_t vendaddr; + if (map->pagesneeded == 0) { + CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " + "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), + dmat->boundary, dmat->alignment); + CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + curaddr = buf; + while (buflen != 0) { + sgsize = MIN(buflen, dmat->maxsegsz); + if (run_filter(dmat, curaddr) != 0) { + sgsize = MIN(sgsize, PAGE_SIZE); + map->pagesneeded++; + } + curaddr += sgsize; + buflen -= sgsize; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + } +} +static void +_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, + void *buf, bus_size_t buflen, int flags) +{ + vm_offset_t vaddr; + vm_offset_t vendaddr; + bus_addr_t paddr; + + if (map->pagesneeded == 0) { CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), dmat->boundary, dmat->alignment); @@ -605,10 +618,10 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, b bus_size_t sg_len; sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); - if (pmap) - paddr = pmap_extract(pmap, vaddr); - else + if (pmap == kernel_pmap) paddr = pmap_kextract(vaddr); + else + paddr = pmap_extract(pmap, vaddr); if (run_filter(dmat, paddr) != 0) { sg_len = roundup2(sg_len, dmat->alignment); map->pagesneeded++; @@ -617,315 +630,230 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, b } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } +} +static int +_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) +{ + /* Reserve Necessary Bounce Pages */ - if (map->pagesneeded != 0) { - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - map->dmat = dmat; - map->buf = buf; - map->buflen = buflen; - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, - map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } + mtx_lock(&bounce_lock); + if (flags & BUS_DMA_NOWAIT) { + if (reserve_bounce_pages(dmat, map, 0) != 0) { + mtx_unlock(&bounce_lock); + return (ENOMEM); } - mtx_unlock(&bounce_lock); + } else { + if (reserve_bounce_pages(dmat, map, 1) != 0) { + /* Queue us for resources */ + STAILQ_INSERT_TAIL(&bounce_map_waitinglist, + map, links); + mtx_unlock(&bounce_lock); + return (EINPROGRESS); + } } + mtx_unlock(&bounce_lock); - vaddr = (vm_offset_t)buf; - lastaddr = *lastaddrp; - bmask = ~(dmat->boundary - 1); + return (0); +} - for (seg = *segp; buflen > 0 ; ) { - bus_size_t max_sgsize; +/* + * Add a single contiguous physical range to the segment list. + */ +static int +_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, + bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) +{ + bus_addr_t baddr, bmask; + int seg; - /* - * Get the physical address for this segment. - */ - if (pmap) - curaddr = pmap_extract(pmap, vaddr); - else - curaddr = pmap_kextract(vaddr); + /* + * Make sure we don't cross any boundaries. + */ + bmask = ~(dmat->boundary - 1); + if (dmat->boundary > 0) { + baddr = (curaddr + dmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } - /* - * Compute the segment size, and adjust counts. - */ - max_sgsize = MIN(buflen, dmat->maxsegsz); - sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); - if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { - sgsize = roundup2(sgsize, dmat->alignment); - sgsize = MIN(sgsize, max_sgsize); - curaddr = add_bounce_page(dmat, map, vaddr, sgsize); - } else { - sgsize = MIN(sgsize, max_sgsize); - } - - /* - * Make sure we don't cross any boundaries. - */ - if (dmat->boundary > 0) { - baddr = (curaddr + dmat->boundary) & bmask; - if (sgsize > (baddr - curaddr)) - sgsize = (baddr - curaddr); - } - - /* - * Insert chunk into a segment, coalescing with - * previous segment if possible. - */ - if (first) { + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + seg = *segp; + if (seg == -1) { + seg = 0; + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } else { + if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && + (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && + (dmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= dmat->nsegments) + return (0); segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; - first = 0; - } else { - if (curaddr == lastaddr && - (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && - (dmat->boundary == 0 || - (segs[seg].ds_addr & bmask) == (curaddr & bmask))) - segs[seg].ds_len += sgsize; - else { - if (++seg >= dmat->nsegments) - break; - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; - } } - - lastaddr = curaddr + sgsize; - vaddr += sgsize; - buflen -= sgsize; } - *segp = seg; - *lastaddrp = lastaddr; - - /* - * Did we fit? - */ - return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ + return (sgsize); } /* - * Map the buffer buf into bus space using the dmamap map. + * Utility function to load a physical buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. */ int -bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, bus_dmamap_callback_t *callback, - void *callback_arg, int flags) +_bus_dmamap_load_phys(bus_dma_tag_t dmat, + bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, + int flags, + bus_dma_segment_t *segs, + int *segp) { - bus_addr_t lastaddr = 0; - int error; + bus_addr_t curaddr; + bus_size_t sgsize; + int error; - if (dmat->flags & BUS_DMA_COULD_BOUNCE) { - flags |= BUS_DMA_WAITOK; - map->callback = callback; - map->callback_arg = callback_arg; + if (segs == NULL) + segs = map->segments; + + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } } - map->nsegs = 0; - error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, - &lastaddr, map->segments, &map->nsegs, 1); - map->nsegs++; - - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, map->nsegs); - - if (error == EINPROGRESS) { - return (error); + while (buflen > 0) { + curaddr = buf; + sgsize = MIN(buflen, dmat->maxsegsz); + if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { + sgsize = MIN(sgsize, PAGE_SIZE); + curaddr = add_bounce_page(dmat, map, 0, curaddr, + sgsize); + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + buf += sgsize; + buflen -= sgsize; } - if (dmat->iommu != NULL) - IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, - dmat->highaddr, dmat->alignment, dmat->boundary, - dmat->iommu_cookie); - - if (error) - (*callback)(callback_arg, map->segments, 0, error); - else - (*callback)(callback_arg, map->segments, map->nsegs, 0); - /* - * Return ENOMEM to the caller so that it can pass it up the stack. - * This error only happens when NOWAIT is set, so deferal is disabled. + * Did we fit? */ - if (error == ENOMEM) - return (error); - - return (0); + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } - /* - * Like _bus_dmamap_load(), but for mbufs. + * Utility function to load a linear buffer. segp contains + * the starting segment on entrance, and the ending segment on exit. */ int -bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *m0, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags) +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, + bus_dmamap_t map, + void *buf, bus_size_t buflen, + pmap_t pmap, + int flags, + bus_dma_segment_t *segs, + int *segp) { + bus_size_t sgsize; + bus_addr_t curaddr; + vm_offset_t vaddr; int error; - M_ASSERTPKTHDR(m0); + if (segs == NULL) + segs = map->segments; - flags |= BUS_DMA_NOWAIT; - map->nsegs = 0; - error = 0; - if (m0->m_pkthdr.len <= dmat->maxsize) { - int first = 1; - bus_addr_t lastaddr = 0; - struct mbuf *m; - - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, map, - m->m_data, m->m_len, - NULL, flags, &lastaddr, - map->segments, &map->nsegs, first); - first = 0; - } + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); } - } else { - error = EINVAL; } - map->nsegs++; - if (dmat->iommu != NULL) - IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, - dmat->highaddr, dmat->alignment, dmat->boundary, - dmat->iommu_cookie); + vaddr = (vm_offset_t)buf; - if (error) { - /* force "no valid mappings" in callback */ - (*callback)(callback_arg, map->segments, 0, 0, error); - } else { - (*callback)(callback_arg, map->segments, - map->nsegs, m0->m_pkthdr.len, error); - } - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, map->nsegs); - return (error); -} + while (buflen > 0) { + bus_size_t max_sgsize; -int -bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, - int flags) -{ - int error; + /* + * Get the physical address for this segment. + */ + if (pmap == kernel_pmap) + curaddr = pmap_kextract(vaddr); + else + curaddr = pmap_extract(pmap, vaddr); - M_ASSERTPKTHDR(m0); - - flags |= BUS_DMA_NOWAIT; - *nsegs = 0; - error = 0; - if (m0->m_pkthdr.len <= dmat->maxsize) { - int first = 1; - bus_addr_t lastaddr = 0; - struct mbuf *m; - - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, map, - m->m_data, m->m_len, - NULL, flags, &lastaddr, - segs, nsegs, first); - first = 0; - } + /* + * Compute the segment size, and adjust counts. + */ + max_sgsize = MIN(buflen, dmat->maxsegsz); + sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); + if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { + sgsize = roundup2(sgsize, dmat->alignment); + sgsize = MIN(sgsize, max_sgsize); + curaddr = add_bounce_page(dmat, map, vaddr, curaddr, + sgsize); + } else { + sgsize = MIN(sgsize, max_sgsize); } - } else { - error = EINVAL; + + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + vaddr += sgsize; + buflen -= sgsize; } - /* XXX FIXME: Having to increment nsegs is really annoying */ - ++*nsegs; - - if (dmat->iommu != NULL) - IOMMU_MAP(dmat->iommu, segs, nsegs, dmat->lowaddr, - dmat->highaddr, dmat->alignment, dmat->boundary, - dmat->iommu_cookie); - - map->nsegs = *nsegs; - memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); - - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, *nsegs); - return (error); + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } -/* - * Like _bus_dmamap_load(), but for uios. - */ -int -bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, - struct uio *uio, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags) +void +__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_memory_t mem, bus_dmamap_callback_t *callback, + void *callback_arg) { - bus_addr_t lastaddr = 0; - int error, first, i; - bus_size_t resid; - struct iovec *iov; - pmap_t pmap; - flags |= BUS_DMA_NOWAIT; - resid = uio->uio_resid; - iov = uio->uio_iov; - - if (uio->uio_segflg == UIO_USERSPACE) { - KASSERT(uio->uio_td != NULL, - ("bus_dmamap_load_uio: USERSPACE but no proc")); - pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); - } else - pmap = NULL; - - map->nsegs = 0; - error = 0; - first = 1; - for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { - /* - * Now at the first iovec to load. Load each iovec - * until we have exhausted the residual count. - */ - bus_size_t minlen = - resid < iov[i].iov_len ? resid : iov[i].iov_len; - caddr_t addr = (caddr_t) iov[i].iov_base; - - if (minlen > 0) { - error = _bus_dmamap_load_buffer(dmat, map, - addr, minlen, pmap, flags, &lastaddr, - map->segments, &map->nsegs, first); - first = 0; - - resid -= minlen; - } + if (dmat->flags & BUS_DMA_COULD_BOUNCE) { + map->dmat = dmat; + map->mem = mem; + map->callback = callback; + map->callback_arg = callback_arg; } +} - map->nsegs++; +bus_dma_segment_t * +_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) +{ + + if (segs != NULL) + memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); + else + segs = map->segments; + map->nsegs = nsegs; if (dmat->iommu != NULL) - IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, - dmat->highaddr, dmat->alignment, dmat->boundary, - dmat->iommu_cookie); + IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, + dmat->lowaddr, dmat->highaddr, dmat->alignment, + dmat->boundary, dmat->iommu_cookie); - if (error) { - /* force "no valid mappings" in callback */ - (*callback)(callback_arg, map->segments, 0, 0, error); - } else { - (*callback)(callback_arg, map->segments, - map->nsegs, uio->uio_resid, error); - } - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, map->nsegs); - return (error); + return (segs); } /* @@ -963,9 +891,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t if (op & BUS_DMASYNC_PREWRITE) { while (bpage != NULL) { - bcopy((void *)bpage->datavaddr, - (void *)bpage->vaddr, - bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)bpage->datavaddr, + (void *)bpage->vaddr, + bpage->datacount); + else + physcopyout(bpage->dataaddr, + (void *)bpage->vaddr, + bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; @@ -973,9 +906,13 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t if (op & BUS_DMASYNC_POSTREAD) { while (bpage != NULL) { - bcopy((void *)bpage->vaddr, - (void *)bpage->datavaddr, - bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)bpage->vaddr, + (void *)bpage->datavaddr, + bpage->datacount); + else + physcopyin((void *)bpage->vaddr, + bpage->dataaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; @@ -1142,7 +1079,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmama static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_size_t size) + bus_addr_t addr, bus_size_t size) { struct bounce_zone *bz; struct bounce_page *bpage; @@ -1174,6 +1111,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t m bpage->busaddr |= vaddr & PAGE_MASK; } bpage->datavaddr = vaddr; + bpage->dataaddr = addr; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); @@ -1227,8 +1165,9 @@ busdma_swi(void) mtx_unlock(&bounce_lock); dmat = map->dmat; (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load(map->dmat, map, map->buf, map->buflen, - map->callback, map->callback_arg, /*flags*/0); + bus_dmamap_load_mem(map->dmat, map, &map->mem, + map->callback, map->callback_arg, + BUS_DMA_WAITOK); (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); mtx_lock(&bounce_lock); } Index: powerpc/ps3/ps3cdrom.c =================================================================== --- powerpc/ps3/ps3cdrom.c (.../head/sys) (revision 244874) +++ powerpc/ps3/ps3cdrom.c (.../projects/physbio/sys) (revision 244874) @@ -369,9 +369,8 @@ ps3cdrom_action(struct cam_sim *sim, union ccb *cc TAILQ_REMOVE(&sc->sc_free_xferq, xp, x_queue); - err = bus_dmamap_load(sc->sc_dmatag, xp->x_dmamap, - ccb->csio.data_ptr, ccb->csio.dxfer_len, ps3cdrom_transfer, - xp, 0); + err = bus_dmamap_load_ccb(sc->sc_dmatag, xp->x_dmamap, + ccb, ps3cdrom_transfer, xp, 0); if (err && err != EINPROGRESS) { device_printf(dev, "Could not load DMA map (%d)\n", err); Index: sparc64/sparc64/iommu.c =================================================================== --- sparc64/sparc64/iommu.c (.../head/sys) (revision 244874) +++ sparc64/sparc64/iommu.c (.../projects/physbio/sys) (revision 244874) @@ -847,31 +847,50 @@ iommu_dvmamap_destroy(bus_dma_tag_t dt, bus_dmamap } /* - * IOMMU DVMA operations, common to PCI and SBus + * Utility function to load a physical buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. */ static int -iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is, - bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td, - int flags, bus_dma_segment_t *segs, int *segp, int align) +iommu_dvmamap_load_phys(bus_dma_tag_t dt, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { bus_addr_t amask, dvmaddr, dvmoffs; bus_size_t sgsize, esize; - vm_offset_t vaddr, voffs; + struct iommu_state *is; + vm_offset_t voffs; vm_paddr_t curaddr; - pmap_t pmap = NULL; int error, firstpg, sgcnt; u_int slot; + is = dt->dt_cookie; + if (*segp == -1) { + if ((map->dm_flags & DMF_LOADED) != 0) { +#ifdef DIAGNOSTIC + printf("%s: map still in use\n", __func__); +#endif + bus_dmamap_unload(dt, map); + } + + /* + * Make sure that the map is not on a queue so that the + * resource list may be safely accessed and modified without + * needing the lock to cover the whole operation. + */ + IS_LOCK(is); + iommu_map_remq(is, map); + IS_UNLOCK(is); + + amask = dt->dt_alignment - 1; + } else + amask = 0; KASSERT(buflen != 0, ("%s: buflen == 0!", __func__)); if (buflen > dt->dt_maxsize) return (EINVAL); - if (td != NULL) - pmap = vmspace_pmap(td->td_proc->p_vmspace); + if (segs == NULL) + segs = dt->dt_segments; - vaddr = (vm_offset_t)buf; - voffs = vaddr & IO_PAGE_MASK; - amask = align ? dt->dt_alignment - 1 : 0; + voffs = buf & IO_PAGE_MASK; /* Try to find a slab that is large enough. */ error = iommu_dvma_vallocseg(dt, is, map, voffs, buflen, amask, @@ -885,23 +904,17 @@ static int map->dm_flags |= iommu_use_streaming(is, map, buflen) != 0 ? DMF_STREAMED : 0; for (; buflen > 0; ) { - /* - * Get the physical address for this page. - */ - if (pmap != NULL) - curaddr = pmap_extract(pmap, vaddr); - else - curaddr = pmap_kextract(vaddr); + curaddr = buf; /* * Compute the segment size, and adjust counts. */ - sgsize = IO_PAGE_SIZE - ((u_long)vaddr & IO_PAGE_MASK); + sgsize = IO_PAGE_SIZE - ((u_long)buf & IO_PAGE_MASK); if (buflen < sgsize) sgsize = buflen; buflen -= sgsize; - vaddr += sgsize; + buf += sgsize; dvmoffs = trunc_io_page(dvmaddr); iommu_enter(is, dvmoffs, trunc_io_page(curaddr), @@ -949,203 +962,153 @@ static int return (0); } +/* + * IOMMU DVMA operations, common to PCI and SBus + */ static int -iommu_dvmamap_load(bus_dma_tag_t dt, bus_dmamap_t map, void *buf, - bus_size_t buflen, bus_dmamap_callback_t *cb, void *cba, - int flags) +iommu_dvmamap_load_buffer(bus_dma_tag_t dt, bus_dmamap_t map, void *buf, + bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, + int *segp) { - struct iommu_state *is = dt->dt_cookie; - int error, seg = -1; + bus_addr_t amask, dvmaddr, dvmoffs; + bus_size_t sgsize, esize; + struct iommu_state *is; + vm_offset_t vaddr, voffs; + vm_paddr_t curaddr; + int error, firstpg, sgcnt; + u_int slot; - if ((map->dm_flags & DMF_LOADED) != 0) { + is = dt->dt_cookie; + if (*segp == -1) { + if ((map->dm_flags & DMF_LOADED) != 0) { #ifdef DIAGNOSTIC - printf("%s: map still in use\n", __func__); + printf("%s: map still in use\n", __func__); #endif - bus_dmamap_unload(dt, map); - } + bus_dmamap_unload(dt, map); + } - /* - * Make sure that the map is not on a queue so that the resource list - * may be safely accessed and modified without needing the lock to - * cover the whole operation. - */ - IS_LOCK(is); - iommu_map_remq(is, map); - IS_UNLOCK(is); + /* + * Make sure that the map is not on a queue so that the + * resource list may be safely accessed and modified without + * needing the lock to cover the whole operation. + */ + IS_LOCK(is); + iommu_map_remq(is, map); + IS_UNLOCK(is); - error = iommu_dvmamap_load_buffer(dt, is, map, buf, buflen, NULL, - flags, dt->dt_segments, &seg, 1); + amask = dt->dt_alignment - 1; + } else + amask = 0; + KASSERT(buflen != 0, ("%s: buflen == 0!", __func__)); + if (buflen > dt->dt_maxsize) + return (EINVAL); - IS_LOCK(is); - iommu_map_insq(is, map); - if (error != 0) { - iommu_dvmamap_vunload(is, map); - IS_UNLOCK(is); - (*cb)(cba, dt->dt_segments, 0, error); - } else { - IS_UNLOCK(is); - map->dm_flags |= DMF_LOADED; - (*cb)(cba, dt->dt_segments, seg + 1, 0); - } + if (segs == NULL) + segs = dt->dt_segments; - return (error); -} + vaddr = (vm_offset_t)buf; + voffs = vaddr & IO_PAGE_MASK; -static int -iommu_dvmamap_load_mbuf(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0, - bus_dmamap_callback2_t *cb, void *cba, int flags) -{ - struct iommu_state *is = dt->dt_cookie; - struct mbuf *m; - int error = 0, first = 1, nsegs = -1; + /* Try to find a slab that is large enough. */ + error = iommu_dvma_vallocseg(dt, is, map, voffs, buflen, amask, + &dvmaddr); + if (error != 0) + return (error); - M_ASSERTPKTHDR(m0); + sgcnt = *segp; + firstpg = 1; + map->dm_flags &= ~DMF_STREAMED; + map->dm_flags |= iommu_use_streaming(is, map, buflen) != 0 ? + DMF_STREAMED : 0; + for (; buflen > 0; ) { + /* + * Get the physical address for this page. + */ + if (pmap == kernel_pmap) + curaddr = pmap_kextract(vaddr); + else + curaddr = pmap_extract(pmap, vaddr); - if ((map->dm_flags & DMF_LOADED) != 0) { -#ifdef DIAGNOSTIC - printf("%s: map still in use\n", __func__); -#endif - bus_dmamap_unload(dt, map); - } + /* + * Compute the segment size, and adjust counts. + */ + sgsize = IO_PAGE_SIZE - ((u_long)vaddr & IO_PAGE_MASK); + if (buflen < sgsize) + sgsize = buflen; - IS_LOCK(is); - iommu_map_remq(is, map); - IS_UNLOCK(is); + buflen -= sgsize; + vaddr += sgsize; - if (m0->m_pkthdr.len <= dt->dt_maxsize) { - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len == 0) - continue; - error = iommu_dvmamap_load_buffer(dt, is, map, - m->m_data, m->m_len, NULL, flags, dt->dt_segments, - &nsegs, first); - first = 0; + dvmoffs = trunc_io_page(dvmaddr); + iommu_enter(is, dvmoffs, trunc_io_page(curaddr), + (map->dm_flags & DMF_STREAMED) != 0, flags); + if ((is->is_flags & IOMMU_FLUSH_CACHE) != 0) { + slot = IOTSBSLOT(dvmoffs); + if (buflen <= 0 || slot % 8 == 7) + IOMMU_WRITE8(is, is_iommu, IMR_CACHE_FLUSH, + is->is_ptsb + slot * 8); } - } else - error = EINVAL; - IS_LOCK(is); - iommu_map_insq(is, map); - if (error != 0) { - iommu_dvmamap_vunload(is, map); - IS_UNLOCK(is); - /* force "no valid mappings" in callback */ - (*cb)(cba, dt->dt_segments, 0, 0, error); - } else { - IS_UNLOCK(is); - map->dm_flags |= DMF_LOADED; - (*cb)(cba, dt->dt_segments, nsegs + 1, m0->m_pkthdr.len, 0); + /* + * Chop the chunk up into segments of at most maxsegsz, but try + * to fill each segment as well as possible. + */ + if (!firstpg) { + esize = ulmin(sgsize, + dt->dt_maxsegsz - segs[sgcnt].ds_len); + segs[sgcnt].ds_len += esize; + sgsize -= esize; + dvmaddr += esize; + } + while (sgsize > 0) { + sgcnt++; + if (sgcnt >= dt->dt_nsegments) + return (EFBIG); + /* + * No extra alignment here - the common practice in + * the busdma code seems to be that only the first + * segment needs to satisfy the alignment constraints + * (and that only for bus_dmamem_alloc()ed maps). + * It is assumed that such tags have maxsegsize >= + * maxsize. + */ + esize = ulmin(sgsize, dt->dt_maxsegsz); + segs[sgcnt].ds_addr = dvmaddr; + segs[sgcnt].ds_len = esize; + sgsize -= esize; + dvmaddr += esize; + } + + firstpg = 0; } - return (error); + *segp = sgcnt; + return (0); } -static int -iommu_dvmamap_load_mbuf_sg(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0, - bus_dma_segment_t *segs, int *nsegs, int flags) +static void +iommu_dvmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_memory_t mem, bus_dmamap_callback_t *callback, void *callback_arg) { - struct iommu_state *is = dt->dt_cookie; - struct mbuf *m; - int error = 0, first = 1; - - M_ASSERTPKTHDR(m0); - - *nsegs = -1; - if ((map->dm_flags & DMF_LOADED) != 0) { -#ifdef DIAGNOSTIC - printf("%s: map still in use\n", __func__); -#endif - bus_dmamap_unload(dt, map); - } - - IS_LOCK(is); - iommu_map_remq(is, map); - IS_UNLOCK(is); - - if (m0->m_pkthdr.len <= dt->dt_maxsize) { - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len == 0) - continue; - error = iommu_dvmamap_load_buffer(dt, is, map, - m->m_data, m->m_len, NULL, flags, segs, - nsegs, first); - first = 0; - } - } else - error = EINVAL; - - IS_LOCK(is); - iommu_map_insq(is, map); - if (error != 0) { - iommu_dvmamap_vunload(is, map); - IS_UNLOCK(is); - } else { - IS_UNLOCK(is); - map->dm_flags |= DMF_LOADED; - ++*nsegs; - } - return (error); } -static int -iommu_dvmamap_load_uio(bus_dma_tag_t dt, bus_dmamap_t map, struct uio *uio, - bus_dmamap_callback2_t *cb, void *cba, int flags) +static bus_dma_segment_t * +iommu_dvmamap_complete(bus_dma_tag_t dt, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) { struct iommu_state *is = dt->dt_cookie; - struct iovec *iov; - struct thread *td = NULL; - bus_size_t minlen, resid; - int nsegs = -1, error = 0, first = 1, i; - if ((map->dm_flags & DMF_LOADED) != 0) { -#ifdef DIAGNOSTIC - printf("%s: map still in use\n", __func__); -#endif - bus_dmamap_unload(dt, map); - } - IS_LOCK(is); - iommu_map_remq(is, map); - IS_UNLOCK(is); - - resid = uio->uio_resid; - iov = uio->uio_iov; - - if (uio->uio_segflg == UIO_USERSPACE) { - td = uio->uio_td; - KASSERT(td != NULL, - ("%s: USERSPACE but no proc", __func__)); - } - - for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { - /* - * Now at the first iovec to load. Load each iovec - * until we have exhausted the residual count. - */ - minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; - if (minlen == 0) - continue; - - error = iommu_dvmamap_load_buffer(dt, is, map, - iov[i].iov_base, minlen, td, flags, dt->dt_segments, - &nsegs, first); - first = 0; - - resid -= minlen; - } - - IS_LOCK(is); iommu_map_insq(is, map); - if (error) { + if (error != 0) { iommu_dvmamap_vunload(is, map); IS_UNLOCK(is); - /* force "no valid mappings" in callback */ - (*cb)(cba, dt->dt_segments, 0, 0, error); } else { IS_UNLOCK(is); map->dm_flags |= DMF_LOADED; - (*cb)(cba, dt->dt_segments, nsegs + 1, uio->uio_resid, 0); } - return (error); + if (segs == NULL) + segs = dt->dt_segments; + return (segs); } static void @@ -1241,10 +1204,10 @@ iommu_diag(struct iommu_state *is, vm_offset_t va) struct bus_dma_methods iommu_dma_methods = { iommu_dvmamap_create, iommu_dvmamap_destroy, - iommu_dvmamap_load, - iommu_dvmamap_load_mbuf, - iommu_dvmamap_load_mbuf_sg, - iommu_dvmamap_load_uio, + iommu_dvmamap_load_phys, + iommu_dvmamap_load_buffer, + iommu_dvmamap_waitok, + iommu_dvmamap_complete, iommu_dvmamap_unload, iommu_dvmamap_sync, iommu_dvmamem_alloc, Index: sparc64/sparc64/bus_machdep.c =================================================================== --- sparc64/sparc64/bus_machdep.c (.../head/sys) (revision 244874) +++ sparc64/sparc64/bus_machdep.c (.../projects/physbio/sys) (revision 244874) @@ -98,13 +98,11 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include #include #include -#include #include #include @@ -326,247 +324,146 @@ nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmama } /* - * Utility function to load a linear buffer. lastaddrp holds state - * between invocations (for multiple-buffer loads). segp contains - * the starting segment on entrace, and the ending segment on exit. - * first indicates if this is the first invocation of this function. + * Add a single contiguous physical range to the segment list. */ static int -_nexus_dmamap_load_buffer(bus_dma_tag_t dmat, void *buf, bus_size_t buflen, - struct thread *td, int flags, bus_addr_t *lastaddrp, - bus_dma_segment_t *segs, int *segp, int first) +nexus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, + bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) { - bus_size_t sgsize; - bus_addr_t curaddr, lastaddr, baddr, bmask; - vm_offset_t vaddr = (vm_offset_t)buf; + bus_addr_t baddr, bmask; int seg; - pmap_t pmap; - if (td != NULL) - pmap = vmspace_pmap(td->td_proc->p_vmspace); - else - pmap = NULL; - - lastaddr = *lastaddrp; + /* + * Make sure we don't cross any boundaries. + */ bmask = ~(dmat->dt_boundary - 1); + if (dmat->dt_boundary > 0) { + baddr = (curaddr + dmat->dt_boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } - for (seg = *segp; buflen > 0 ; ) { - /* - * Get the physical address for this segment. - */ - if (pmap) - curaddr = pmap_extract(pmap, vaddr); - else - curaddr = pmap_kextract(vaddr); - - /* - * Compute the segment size, and adjust counts. - */ - sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); - if (sgsize > dmat->dt_maxsegsz) - sgsize = dmat->dt_maxsegsz; - if (buflen < sgsize) - sgsize = buflen; - - /* - * Make sure we don't cross any boundaries. - */ - if (dmat->dt_boundary > 0) { - baddr = (curaddr + dmat->dt_boundary) & bmask; - if (sgsize > (baddr - curaddr)) - sgsize = (baddr - curaddr); - } - - /* - * Insert chunk into a segment, coalescing with - * previous segment if possible. - */ - if (first) { + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + seg = *segp; + if (seg == -1) { + seg = 0; + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } else { + if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && + (segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz && + (dmat->dt_boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= dmat->dt_nsegments) + return (0); segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; - first = 0; - } else { - if (curaddr == lastaddr && - (segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz && - (dmat->dt_boundary == 0 || - (segs[seg].ds_addr & bmask) == (curaddr & bmask))) - segs[seg].ds_len += sgsize; - else { - if (++seg >= dmat->dt_nsegments) - break; - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; - } } - - lastaddr = curaddr + sgsize; - vaddr += sgsize; - buflen -= sgsize; } - *segp = seg; - *lastaddrp = lastaddr; - - /* - * Did we fit? - */ - return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ + return (sgsize); } /* - * Common function for loading a DMA map with a linear buffer. May - * be called by bus-specific DMA map load functions. - * - * Most SPARCs have IOMMUs in the bus controllers. In those cases - * they only need one segment and will use virtual addresses for DVMA. - * Those bus controllers should intercept these vectors and should - * *NEVER* call nexus_dmamap_load() which is used only by devices that - * bypass DVMA. + * Utility function to load a physical buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. */ static int -nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg, - int flags) +nexus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { - bus_addr_t lastaddr; - int error, nsegs; + bus_addr_t curaddr; + bus_size_t sgsize; - error = _nexus_dmamap_load_buffer(dmat, buf, buflen, NULL, flags, - &lastaddr, dmat->dt_segments, &nsegs, 1); + if (segs == NULL) + segs = dmat->dt_segments; - if (error == 0) { - (*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 0); - map->dm_flags |= DMF_LOADED; - } else - (*callback)(callback_arg, NULL, 0, error); + curaddr = buf; + while (buflen > 0) { + sgsize = MIN(buflen, dmat->maxsegsz); + sgsize = nexus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + curaddr += sgsize; + buflen -= sgsize; + } - return (0); + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } /* - * Like nexus_dmamap_load(), but for mbufs. + * Utility function to load a linear buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. */ static int -nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, - bus_dmamap_callback2_t *callback, void *callback_arg, int flags) +nexus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, + int *segp) { - int nsegs, error; + bus_size_t sgsize; + bus_addr_t curaddr; + vm_offset_t vaddr = (vm_offset_t)buf; - M_ASSERTPKTHDR(m0); + if (segs == NULL) + segs = dmat->dt_segments; - nsegs = 0; - error = 0; - if (m0->m_pkthdr.len <= dmat->dt_maxsize) { - int first = 1; - bus_addr_t lastaddr = 0; - struct mbuf *m; + while (buflen > 0) { + /* + * Get the physical address for this segment. + */ + if (pmap == kernel_pmap) + curaddr = pmap_kextract(vaddr); + else + curaddr = pmap_extract(pmap, vaddr); - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = _nexus_dmamap_load_buffer(dmat, - m->m_data, m->m_len,NULL, flags, &lastaddr, - dmat->dt_segments, &nsegs, first); - first = 0; - } - } - } else { - error = EINVAL; - } + /* + * Compute the segment size, and adjust counts. + */ + sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); + if (sgsize > dmat->dt_maxsegsz) + sgsize = dmat->dt_maxsegsz; + if (buflen < sgsize) + sgsize = buflen; - if (error) { - /* force "no valid mappings" in callback */ - (*callback)(callback_arg, dmat->dt_segments, 0, 0, error); - } else { - map->dm_flags |= DMF_LOADED; - (*callback)(callback_arg, dmat->dt_segments, nsegs + 1, - m0->m_pkthdr.len, error); + sgsize = nexus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + + vaddr += sgsize; + buflen -= sgsize; } - return (error); + + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } -static int -nexus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, - bus_dma_segment_t *segs, int *nsegs, int flags) +static void +nexus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_memory_t mem, bus_dmamap_callback_t *callback, void *callback_arg) { - int error; - M_ASSERTPKTHDR(m0); - - *nsegs = 0; - error = 0; - if (m0->m_pkthdr.len <= dmat->dt_maxsize) { - int first = 1; - bus_addr_t lastaddr = 0; - struct mbuf *m; - - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = _nexus_dmamap_load_buffer(dmat, - m->m_data, m->m_len,NULL, flags, &lastaddr, - segs, nsegs, first); - first = 0; - } - } - } else { - error = EINVAL; - } - - ++*nsegs; - return (error); } -/* - * Like nexus_dmamap_load(), but for uios. - */ -static int -nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, - bus_dmamap_callback2_t *callback, void *callback_arg, int flags) +static bus_dma_segment_t * +nexus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) { - bus_addr_t lastaddr; - int nsegs, error, first, i; - bus_size_t resid; - struct iovec *iov; - struct thread *td = NULL; - resid = uio->uio_resid; - iov = uio->uio_iov; - - if (uio->uio_segflg == UIO_USERSPACE) { - td = uio->uio_td; - KASSERT(td != NULL, ("%s: USERSPACE but no proc", __func__)); - } - - nsegs = 0; - error = 0; - first = 1; - for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { - /* - * Now at the first iovec to load. Load each iovec - * until we have exhausted the residual count. - */ - bus_size_t minlen = - resid < iov[i].iov_len ? resid : iov[i].iov_len; - caddr_t addr = (caddr_t) iov[i].iov_base; - - if (minlen > 0) { - error = _nexus_dmamap_load_buffer(dmat, addr, minlen, - td, flags, &lastaddr, dmat->dt_segments, &nsegs, - first); - first = 0; - - resid -= minlen; - } - } - - if (error) { - /* force "no valid mappings" in callback */ - (*callback)(callback_arg, dmat->dt_segments, 0, 0, error); - } else { - map->dm_flags |= DMF_LOADED; - (*callback)(callback_arg, dmat->dt_segments, nsegs + 1, - uio->uio_resid, error); - } - return (error); + if (segs == NULL) + segs = dmat->dt_segments; + return (segs); } /* @@ -669,10 +566,10 @@ nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, static struct bus_dma_methods nexus_dma_methods = { nexus_dmamap_create, nexus_dmamap_destroy, - nexus_dmamap_load, - nexus_dmamap_load_mbuf, - nexus_dmamap_load_mbuf_sg, - nexus_dmamap_load_uio, + nexus_dmamap_load_phys, + nexus_dmamap_load_buffer, + nexus_dmamap_waitok, + nexus_dmamap_complete, nexus_dmamap_unload, nexus_dmamap_sync, nexus_dmamem_alloc, Index: sparc64/include/bus_dma.h =================================================================== --- sparc64/include/bus_dma.h (.../head/sys) (revision 244874) +++ sparc64/include/bus_dma.h (.../projects/physbio/sys) (revision 244874) @@ -78,14 +78,17 @@ struct bus_dma_methods { int (*dm_dmamap_create)(bus_dma_tag_t, int, bus_dmamap_t *); int (*dm_dmamap_destroy)(bus_dma_tag_t, bus_dmamap_t); - int (*dm_dmamap_load)(bus_dma_tag_t, bus_dmamap_t, void *, - bus_size_t, bus_dmamap_callback_t *, void *, int); - int (*dm_dmamap_load_mbuf)(bus_dma_tag_t, bus_dmamap_t, - struct mbuf *, bus_dmamap_callback2_t *, void *, int); - int (*dm_dmamap_load_mbuf_sg)(bus_dma_tag_t, bus_dmamap_t, - struct mbuf *, bus_dma_segment_t *segs, int *nsegs, int); - int (*dm_dmamap_load_uio)(bus_dma_tag_t, bus_dmamap_t, struct uio *, - bus_dmamap_callback2_t *, void *, int); + int (*dm_dmamap_load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, int flags, + bus_dma_segment_t *segs, int *segp); + int (*dm_dmamap_load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map, + void *buf, bus_size_t buflen, struct pmap *pmap, int flags, + bus_dma_segment_t *segs, int *segp); + void (*dm_dmamap_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_memory_t mem, bus_dmamap_callback_t *callback, + void *callback_arg); + bus_dma_segment_t *(*dm_dmamap_complete)(bus_dma_tag_t dmat, + bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error); void (*dm_dmamap_unload)(bus_dma_tag_t, bus_dmamap_t); void (*dm_dmamap_sync)(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t); @@ -125,14 +128,16 @@ struct bus_dma_tag { ((t)->dt_mt->dm_dmamap_create((t), (f), (p))) #define bus_dmamap_destroy(t, p) \ ((t)->dt_mt->dm_dmamap_destroy((t), (p))) -#define bus_dmamap_load(t, m, p, s, cb, cba, f) \ - ((t)->dt_mt->dm_dmamap_load((t), (m), (p), (s), (cb), (cba), (f))) -#define bus_dmamap_load_mbuf(t, m, mb, cb, cba, f) \ - ((t)->dt_mt->dm_dmamap_load_mbuf((t), (m), (mb), (cb), (cba), (f))) -#define bus_dmamap_load_mbuf_sg(t, m, mb, segs, nsegs, f) \ - ((t)->dt_mt->dm_dmamap_load_mbuf_sg((t), (m), (mb), (segs), (nsegs), (f))) -#define bus_dmamap_load_uio(t, m, ui, cb, cba, f) \ - ((t)->dt_mt->dm_dmamap_load_uio((t), (m), (ui), (cb), (cba), (f))) +#define _bus_dmamap_load_phys(t, m, b, l, f, s, sp) \ + ((t)->dt_mt->dm_dmamap_load_phys((t), (m), (b), (l), \ + (f), (s), (sp))) +#define _bus_dmamap_load_buffer(t, m, b, l, p, f, s, sp) \ + ((t)->dt_mt->dm_dmamap_load_buffer((t), (m), (b), (l), (p), \ + (f), (s), (sp))) +#define _bus_dmamap_waitok(t, m, mem, c, ca) \ + ((t)->dt_mt->dm_dmamap_waitok((t), (m), (mem), (c), (ca))) +#define _bus_dmamap_complete(t, m, s, n, e) \ + ((t)->dt_mt->dm_dmamap_complete((t), (m), (s), (n), (e))) #define bus_dmamap_unload(t, p) \ ((t)->dt_mt->dm_dmamap_unload((t), (p))) #define bus_dmamap_sync(t, m, op) \ Index: rpc/krpc.h =================================================================== --- rpc/krpc.h (.../head/sys) (revision 244874) +++ rpc/krpc.h (.../projects/physbio/sys) (revision 244874) @@ -1,111 +0,0 @@ -/*- - * Sun RPC is a product of Sun Microsystems, Inc. and is provided for - * unrestricted use provided that this legend is included on all tape - * media and as a part of the software program in whole or part. Users - * may copy or modify Sun RPC without charge, but are not authorized - * to license or distribute it to anyone else except as part of a product or - * program developed by the user. - * - * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE - * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. - * - * Sun RPC is provided with no support and without any obligation on the - * part of Sun Microsystems, Inc. to assist in its use, correction, - * modification or enhancement. - * - * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE - * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC - * OR ANY PART THEREOF. - * - * In no event will Sun Microsystems, Inc. be liable for any lost revenue - * or profits or other special, indirect and consequential damages, even if - * Sun has been advised of the possibility of such damages. - * - * Sun Microsystems, Inc. - * 2550 Garcia Avenue - * Mountain View, California 94043 - * - * $FreeBSD$ - */ - -#ifndef _RPC_KRPC_H_ -#define _RPC_KRPC_H_ - -#ifdef _KERNEL -/* - * Definitions now shared between client and server RPC for backchannels. - */ -#define MCALL_MSG_SIZE 24 - -/* - * A pending RPC request which awaits a reply. Requests which have - * received their reply will have cr_xid set to zero and cr_mrep to - * the mbuf chain of the reply. - */ -struct ct_request { - TAILQ_ENTRY(ct_request) cr_link; - uint32_t cr_xid; /* XID of request */ - struct mbuf *cr_mrep; /* reply received by upcall */ - int cr_error; /* any error from upcall */ - char cr_verf[MAX_AUTH_BYTES]; /* reply verf */ -}; - -TAILQ_HEAD(ct_request_list, ct_request); - -struct rc_data { - struct mtx rc_lock; - struct sockaddr_storage rc_addr; /* server address */ - struct netconfig* rc_nconf; /* network type */ - rpcprog_t rc_prog; /* program number */ - rpcvers_t rc_vers; /* version number */ - size_t rc_sendsz; - size_t rc_recvsz; - struct timeval rc_timeout; - struct timeval rc_retry; - int rc_retries; - int rc_privport; - char *rc_waitchan; - int rc_intr; - int rc_connecting; - int rc_closed; - struct ucred *rc_ucred; - CLIENT* rc_client; /* underlying RPC client */ - struct rpc_err rc_err; - void *rc_backchannel; -}; - -struct ct_data { - struct mtx ct_lock; - int ct_threads; /* number of threads in clnt_vc_call */ - bool_t ct_closing; /* TRUE if we are closing */ - bool_t ct_closed; /* TRUE if we are closed */ - struct socket *ct_socket; /* connection socket */ - bool_t ct_closeit; /* close it on destroy */ - struct timeval ct_wait; /* wait interval in milliseconds */ - struct sockaddr_storage ct_addr; /* remote addr */ - struct rpc_err ct_error; - uint32_t ct_xid; - char ct_mcallc[MCALL_MSG_SIZE]; /* marshalled callmsg */ - size_t ct_mpos; /* pos after marshal */ - const char *ct_waitchan; - int ct_waitflag; - struct mbuf *ct_record; /* current reply record */ - size_t ct_record_resid; /* how much left of reply to read */ - bool_t ct_record_eor; /* true if reading last fragment */ - struct ct_request_list ct_pending; - int ct_upcallrefs; /* Ref cnt of upcalls in prog. */ - SVCXPRT *ct_backchannelxprt; /* xprt for backchannel */ -}; - -struct cf_conn { /* kept in xprt->xp_p1 for actual connection */ - enum xprt_stat strm_stat; - struct mbuf *mpending; /* unparsed data read from the socket */ - struct mbuf *mreq; /* current record being built from mpending */ - uint32_t resid; /* number of bytes needed for fragment */ - bool_t eor; /* reading last fragment of current record */ -}; - -#endif /* _KERNEL */ - -#endif /* _RPC_KRPC_H_ */ Index: conf/files =================================================================== --- conf/files (.../head/sys) (revision 244874) +++ conf/files (.../projects/physbio/sys) (revision 244874) @@ -2671,6 +2671,7 @@ kern/subr_acl_posix1e.c optional ufs_acl kern/subr_autoconf.c standard kern/subr_blist.c standard kern/subr_bus.c standard +kern/subr_busdma.c standard kern/subr_bufring.c standard kern/subr_clock.c standard kern/subr_devstat.c standard Property changes on: conf ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/conf:r243873-244873 Index: kern/subr_busdma_bufalloc.c =================================================================== --- kern/subr_busdma_bufalloc.c (.../head/sys) (revision 244874) +++ kern/subr_busdma_bufalloc.c (.../projects/physbio/sys) (revision 244874) @@ -1,174 +0,0 @@ -/*- - * Copyright (c) 2012 Ian Lepore - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * Buffer allocation support routines for bus_dmamem_alloc implementations. - */ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -/* - * We manage buffer zones up to a page in size. Buffers larger than a page can - * be managed by one of the kernel's page-oriented memory allocation routines as - * efficiently as what we can do here. Also, a page is the largest size for - * which we can g'tee contiguity when using uma, and contiguity is one of the - * requirements we have to fulfill. - */ -#define MIN_ZONE_BUFSIZE 32 -#define MAX_ZONE_BUFSIZE PAGE_SIZE - -/* - * The static array of 12 bufzones is big enough to handle all the zones for the - * smallest supported allocation size of 32 through the largest supported page - * size of 64K. If you up the biggest page size number, up the array size too. - * Basically the size of the array needs to be log2(maxsize)-log2(minsize)+1, - * but I don't know of an easy way to express that as a compile-time constant. - */ -#if PAGE_SIZE > 65536 -#error Unsupported page size -#endif - -struct busdma_bufalloc { - bus_size_t min_size; - size_t num_zones; - struct busdma_bufzone buf_zones[12]; -}; - -busdma_bufalloc_t -busdma_bufalloc_create(const char *name, bus_size_t minimum_alignment, - uma_alloc alloc_func, uma_free free_func, u_int32_t zcreate_flags) -{ - struct busdma_bufalloc *ba; - struct busdma_bufzone *bz; - int i; - bus_size_t cursize; - - ba = malloc(sizeof(struct busdma_bufalloc), M_DEVBUF, - M_ZERO | M_WAITOK); - - ba->min_size = MAX(MIN_ZONE_BUFSIZE, minimum_alignment); - - /* - * Each uma zone is created with an alignment of size-1, meaning that - * the alignment is equal to the size (I.E., 64 byte buffers are aligned - * to 64 byte boundaries, etc). This allows for a fast efficient test - * when deciding whether a pool buffer meets the constraints of a given - * tag used for allocation: the buffer is usable if tag->alignment <= - * bufzone->size. - */ - for (i = 0, bz = ba->buf_zones, cursize = ba->min_size; - i < nitems(ba->buf_zones) && cursize <= MAX_ZONE_BUFSIZE; - ++i, ++bz, cursize <<= 1) { - snprintf(bz->name, sizeof(bz->name), "dma %.10s %lu", - name, cursize); - bz->size = cursize; - bz->umazone = uma_zcreate(bz->name, bz->size, - NULL, NULL, NULL, NULL, bz->size - 1, zcreate_flags); - if (bz->umazone == NULL) { - busdma_bufalloc_destroy(ba); - return (NULL); - } - if (alloc_func != NULL) - uma_zone_set_allocf(bz->umazone, alloc_func); - if (free_func != NULL) - uma_zone_set_freef(bz->umazone, free_func); - ++ba->num_zones; - } - - return (ba); -} - -void -busdma_bufalloc_destroy(busdma_bufalloc_t ba) -{ - struct busdma_bufzone *bz; - int i; - - if (ba == NULL) - return; - - for (i = 0, bz = ba->buf_zones; i < ba->num_zones; ++i, ++bz) { - uma_zdestroy(bz->umazone); - } - - free(ba, M_DEVBUF); -} - -struct busdma_bufzone * -busdma_bufalloc_findzone(busdma_bufalloc_t ba, bus_size_t size) -{ - struct busdma_bufzone *bz; - int i; - - if (size > MAX_ZONE_BUFSIZE) - return (NULL); - - for (i = 0, bz = ba->buf_zones; i < ba->num_zones; ++i, ++bz) { - if (bz->size >= size) - return (bz); - } - - panic("Didn't find a buffer zone of the right size"); -} - -void * -busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, int size, u_int8_t *pflag, - int wait) -{ -#ifdef VM_MEMATTR_UNCACHEABLE - - /* Inform UMA that this allocator uses kernel_map/object. */ - *pflag = UMA_SLAB_KERNEL; - - return ((void *)kmem_alloc_attr(kernel_map, size, wait, 0, - BUS_SPACE_MAXADDR, VM_MEMATTR_UNCACHEABLE)); - -#else - - panic("VM_MEMATTR_UNCACHEABLE unavailable"); - -#endif /* VM_MEMATTR_UNCACHEABLE */ -} - -void -busdma_bufalloc_free_uncacheable(void *item, int size, u_int8_t pflag) -{ - - kmem_free(kernel_map, (vm_offset_t)item, size); -} - Index: kern/subr_uio.c =================================================================== --- kern/subr_uio.c (.../head/sys) (revision 244874) +++ kern/subr_uio.c (.../projects/physbio/sys) (revision 244874) @@ -152,7 +152,53 @@ copyout_nofault(const void *kaddr, void *udaddr, s return (error); } +#define PHYS_PAGE_COUNT(len) (howmany(len, PAGE_SIZE) + 1) + int +physcopyin(void *src, vm_paddr_t dst, size_t len) +{ + vm_page_t m[PHYS_PAGE_COUNT(len)]; + struct iovec iov[1]; + struct uio uio; + int i; + + iov[0].iov_base = src; + iov[0].iov_len = len; + uio.uio_iov = iov; + uio.uio_iovcnt = 1; + uio.uio_offset = 0; + uio.uio_resid = len; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_rw = UIO_WRITE; + for (i = 0; i < PHYS_PAGE_COUNT(len); i++, dst += PAGE_SIZE) + m[i] = PHYS_TO_VM_PAGE(dst); + return uiomove_fromphys(m, dst & PAGE_MASK, len, &uio); +} + +int +physcopyout(vm_paddr_t src, void *dst, size_t len) +{ + vm_page_t m[PHYS_PAGE_COUNT(len)]; + struct iovec iov[1]; + struct uio uio; + int i; + + iov[0].iov_base = dst; + iov[0].iov_len = len; + uio.uio_iov = iov; + uio.uio_iovcnt = 1; + uio.uio_offset = 0; + uio.uio_resid = len; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_rw = UIO_WRITE; + for (i = 0; i < PHYS_PAGE_COUNT(len); i++, src += PAGE_SIZE) + m[i] = PHYS_TO_VM_PAGE(src); + return uiomove_fromphys(m, src & PAGE_MASK, len, &uio); +} + +#undef PHYS_PAGE_COUNT + +int uiomove(void *cp, int n, struct uio *uio) { Index: kern/subr_busdma.c =================================================================== --- kern/subr_busdma.c (.../head/sys) (revision 0) +++ kern/subr_busdma.c (.../projects/physbio/sys) (revision 244874) @@ -0,0 +1,455 @@ +/*- + * Copyright (c) 2012 EMC Corp. + * All rights reserved. + * + * Copyright (c) 1997, 1998 Justin T. Gibbs. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_bus.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include + +/* + * Load a list of virtual addresses. + */ +static int +_bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs, + int flags) +{ + int error; + + error = 0; + for (; sglist_cnt > 0; sglist_cnt--, list++) { + error = _bus_dmamap_load_buffer(dmat, map, + (void *)list->ds_addr, list->ds_len, pmap, flags, NULL, + nsegs); + if (error) + break; + } + return (error); +} + +/* + * Load a list of physical addresses. + */ +static int +_bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags) +{ + int error; + + error = 0; + for (; sglist_cnt > 0; sglist_cnt--, list++) { + error = _bus_dmamap_load_phys(dmat, map, + (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL, + nsegs); + if (error) + break; + } + return (error); +} + +/* + * Load an mbuf chain. + */ +static int +_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, + struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) +{ + struct mbuf *m; + int error; + + M_ASSERTPKTHDR(m0); + + error = 0; + for (m = m0; m != NULL && error == 0; m = m->m_next) { + if (m->m_len > 0) { + error = _bus_dmamap_load_buffer(dmat, map, m->m_data, + m->m_len, kernel_pmap, flags, segs, nsegs); + } + } + CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", + __func__, dmat, flags, error, *nsegs); + return (error); +} + +/* + * Load from block io. + */ +static int +_bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, + int *nsegs, int flags) +{ + int error; + + error = _bus_dmamap_load_buffer(dmat, map, bio->bio_data, + bio->bio_bcount, kernel_pmap, flags, NULL, nsegs); + + return (error); +} + +/* + * Load a cam control block. + */ +static int +_bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, + int *nsegs, int flags) +{ + struct ccb_ataio *ataio; + struct ccb_scsiio *csio; + struct ccb_hdr *ccb_h; + void *data_ptr; + int error; + uint32_t dxfer_len; + uint16_t sglist_cnt; + + error = 0; + ccb_h = &ccb->ccb_h; + switch (ccb_h->func_code) { + case XPT_SCSI_IO: + csio = &ccb->csio; + data_ptr = csio->data_ptr; + dxfer_len = csio->dxfer_len; + sglist_cnt = csio->sglist_cnt; + break; + case XPT_ATA_IO: + ataio = &ccb->ataio; + data_ptr = ataio->data_ptr; + dxfer_len = ataio->dxfer_len; + sglist_cnt = 0; + break; + default: + panic("_bus_dmamap_load_ccb: Unsupported func code %d", + ccb_h->func_code); + } + + switch ((ccb_h->flags & CAM_DATA_MASK)) { + case CAM_DATA_VADDR: + error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len, + kernel_pmap, flags, NULL, nsegs); + break; + case CAM_DATA_PADDR: + error = _bus_dmamap_load_phys(dmat, map, + (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL, + nsegs); + break; + case CAM_DATA_SG: + error = _bus_dmamap_load_vlist(dmat, map, + (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap, + nsegs, flags); + break; + case CAM_DATA_SG_PADDR: + error = _bus_dmamap_load_plist(dmat, map, + (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags); + break; + case CAM_DATA_BIO: + error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr, + nsegs, flags); + break; + default: + panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented", + ccb_h->flags); + } + return (error); +} + +/* + * Load a uio. + */ +static int +_bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, + pmap_t pmap, int *nsegs, int flags) +{ + bus_size_t resid; + bus_size_t minlen; + struct iovec *iov; + caddr_t addr; + int error, i; + + resid = uio->uio_resid; + iov = uio->uio_iov; + error = 0; + + for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { + /* + * Now at the first iovec to load. Load each iovec + * until we have exhausted the residual count. + */ + + addr = (caddr_t) iov[i].iov_base; + minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; + if (minlen > 0) { + error = _bus_dmamap_load_buffer(dmat, map, addr, + minlen, pmap, flags, NULL, nsegs); + resid -= minlen; + } + } + + return (error); +} + +/* + * Map the buffer buf into bus space using the dmamap map. + */ +int +bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, bus_dmamap_callback_t *callback, + void *callback_arg, int flags) +{ + bus_dma_segment_t *segs; + int error; + int nsegs; + + if ((flags & BUS_DMA_NOWAIT) == 0) + _bus_dmamap_waitok(dmat, map, dma_mem_vaddr(buf, buflen), + callback, callback_arg); + + nsegs = -1; + error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap, + flags, NULL, &nsegs); + nsegs++; + + CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", + __func__, dmat, flags, error, nsegs + 1); + + if (error == EINPROGRESS) + return (error); + + segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); + if (error) + (*callback)(callback_arg, segs, 0, error); + else + (*callback)(callback_arg, segs, nsegs, 0); + + /* + * Return ENOMEM to the caller so that it can pass it up the stack. + * This error only happens when NOWAIT is set, so deferal is disabled. + */ + if (error == ENOMEM) + return (error); + + return (0); +} + +int +bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, + bus_dmamap_callback2_t *callback, void *callback_arg, int flags) +{ + bus_dma_segment_t *segs; + int nsegs, error; + + flags |= BUS_DMA_NOWAIT; + nsegs = -1; + error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags); + ++nsegs; + + segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); + if (error) + (*callback)(callback_arg, segs, 0, 0, error); + else + (*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error); + + CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", + __func__, dmat, flags, error, nsegs); + return (error); +} + +int +bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, + bus_dma_segment_t *segs, int *nsegs, int flags) +{ + int error; + + flags |= BUS_DMA_NOWAIT; + *nsegs = -1; + error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags); + ++*nsegs; + _bus_dmamap_complete(dmat, map, segs, *nsegs, error); + return (error); +} + +int +bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, + bus_dmamap_callback2_t *callback, void *callback_arg, int flags) +{ + bus_dma_segment_t *segs; + int nsegs, error; + pmap_t pmap; + + flags |= BUS_DMA_NOWAIT; + if (uio->uio_segflg == UIO_USERSPACE) { + KASSERT(uio->uio_td != NULL, + ("bus_dmamap_load_uio: USERSPACE but no proc")); + pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); + } else + pmap = kernel_pmap; + + nsegs = -1; + error = _bus_dmamap_load_uio(dmat, map, uio, pmap, &nsegs, flags); + nsegs++; + + segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); + if (error) + (*callback)(callback_arg, segs, 0, 0, error); + else + (*callback)(callback_arg, segs, nsegs, uio->uio_resid, error); + + CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", + __func__, dmat, dmat, error, nsegs + 1); + return (error); +} + +int +bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, + bus_dmamap_callback_t *callback, void *callback_arg, + int flags) +{ + bus_dma_segment_t *segs; + struct ccb_hdr *ccb_h; + int error; + int nsegs; + + ccb_h = &ccb->ccb_h; + if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) { + callback(callback_arg, NULL, 0, 0); + return (0); + } + if ((flags & BUS_DMA_NOWAIT) == 0) + _bus_dmamap_waitok(dmat, map, dma_mem_ccb(ccb), callback, + callback_arg); + nsegs = -1; + error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags); + nsegs++; + if (error == EINPROGRESS) + return (error); + + segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); + if (error) + (*callback)(callback_arg, segs, 0, error); + else + (*callback)(callback_arg, segs, nsegs, error); + /* + * Return ENOMEM to the caller so that it can pass it up the stack. + * This error only happens when NOWAIT is set, so deferal is disabled. + */ + if (error == ENOMEM) + return (error); + + return (0); +} + +int +bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_memory_t *mem, bus_dmamap_callback_t *callback, + void *callback_arg, int flags) +{ + bus_dma_segment_t *segs; + int error; + int nsegs; + + if ((flags & BUS_DMA_NOWAIT) == 0) + _bus_dmamap_waitok(dmat, map, *mem, callback, callback_arg); + + nsegs = -1; + error = 0; + switch (mem->dm_type) { + case BUS_DMAMEM_VADDR: + error = _bus_dmamap_load_buffer(dmat, map, mem->u.dm_vaddr, + mem->dm_opaque, kernel_pmap, flags, NULL, &nsegs); + break; + case BUS_DMAMEM_PADDR: + error = _bus_dmamap_load_phys(dmat, map, mem->u.dm_paddr, + mem->dm_opaque, flags, NULL, &nsegs); + break; + case BUS_DMAMEM_VLIST: + error = _bus_dmamap_load_vlist(dmat, map, mem->u.dm_list, + mem->dm_opaque, kernel_pmap, &nsegs, flags); + break; + case BUS_DMAMEM_PLIST: + error = _bus_dmamap_load_plist(dmat, map, mem->u.dm_list, + mem->dm_opaque, &nsegs, flags); + break; + case BUS_DMAMEM_BIO: + error = _bus_dmamap_load_bio(dmat, map, mem->u.dm_bio, + &nsegs, flags); + break; + case BUS_DMAMEM_UIO: + error = _bus_dmamap_load_uio(dmat, map, mem->u.dm_uio, + /*XXX*/kernel_pmap, &nsegs, flags); + break; + case BUS_DMAMEM_MBUF: + error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.dm_mbuf, + NULL, &nsegs, flags); + break; + case BUS_DMAMEM_CCB: + error = _bus_dmamap_load_ccb(dmat, map, mem->u.dm_ccb, &nsegs, + flags); + break; + } + nsegs++; + + CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", + __func__, dmat, flags, error, nsegs + 1); + + if (error == EINPROGRESS) + return (error); + + segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); + if (error) + (*callback)(callback_arg, segs, 0, error); + else + (*callback)(callback_arg, segs, nsegs, 0); + + /* + * Return ENOMEM to the caller so that it can pass it up the stack. + * This error only happens when NOWAIT is set, so deferal is disabled. + */ + if (error == ENOMEM) + return (error); + + return (0); +} Index: cam/scsi/scsi_pass.c =================================================================== --- cam/scsi/scsi_pass.c (.../head/sys) (revision 244874) +++ cam/scsi/scsi_pass.c (.../projects/physbio/sys) (revision 244874) @@ -696,8 +696,11 @@ passsendccb(struct cam_periph *periph, union ccb * * do the right thing, even if there isn't data to map, but since CCBs * without data are a reasonably common occurance (e.g. test unit * ready), it will save a few cycles if we check for it here. + * + * XXX What happens if a sg list is supplied? We don't filter that + * out. */ - if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) + if (((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && (((ccb->ccb_h.func_code == XPT_SCSI_IO || ccb->ccb_h.func_code == XPT_ATA_IO) && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE)) Index: cam/scsi/scsi_target.c =================================================================== --- cam/scsi/scsi_target.c (.../head/sys) (revision 244874) +++ cam/scsi/scsi_target.c (.../projects/physbio/sys) (revision 244874) @@ -737,7 +737,7 @@ targsendccb(struct targ_softc *softc, union ccb *c * without data are a reasonably common occurance (e.g. test unit * ready), it will save a few cycles if we check for it here. */ - if (((ccb_h->flags & CAM_DATA_PHYS) == 0) + if (((ccb_h->flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && (((ccb_h->func_code == XPT_CONT_TARGET_IO) && ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE)) || (ccb_h->func_code == XPT_DEV_MATCH))) { Index: cam/cam_xpt.c =================================================================== --- cam/cam_xpt.c (.../head/sys) (revision 244874) +++ cam/cam_xpt.c (.../projects/physbio/sys) (revision 244874) @@ -547,7 +547,8 @@ xptioctl(struct cdev *dev, u_long cmd, caddr_t add * We can't deal with physical addresses for this * type of transaction. */ - if (inccb->ccb_h.flags & CAM_DATA_PHYS) { + if ((inccb->ccb_h.flags & CAM_DATA_MASK) != + CAM_DATA_VADDR) { error = EINVAL; break; } Index: cam/ctl/scsi_ctl.c =================================================================== --- cam/ctl/scsi_ctl.c (.../head/sys) (revision 244874) +++ cam/ctl/scsi_ctl.c (.../projects/physbio/sys) (revision 244874) @@ -889,6 +889,7 @@ ctlfestart(struct cam_periph *periph, union ccb *s csio->cdb_len = atio->cdb_len; + flags &= ~CAM_DATA_MASK; if (io->scsiio.kern_sg_entries == 0) { /* No S/G list */ data_ptr = io->scsiio.kern_data_ptr; @@ -896,7 +897,9 @@ ctlfestart(struct cam_periph *periph, union ccb *s csio->sglist_cnt = 0; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) - flags |= CAM_DATA_PHYS; + flags |= CAM_DATA_PADDR; + else + flags |= CAM_DATA_VADDR; } else if (io->scsiio.kern_sg_entries <= (sizeof(cmd_info->cam_sglist)/ sizeof(cmd_info->cam_sglist[0]))) { @@ -920,11 +923,10 @@ ctlfestart(struct cam_periph *periph, union ccb *s ctl_sglist[i].len; } csio->sglist_cnt = io->scsiio.kern_sg_entries; - flags |= CAM_SCATTER_VALID; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) - flags |= CAM_SG_LIST_PHYS; + flags |= CAM_DATA_SG_PADDR; else - flags &= ~CAM_SG_LIST_PHYS; + flags &= ~CAM_DATA_SG; data_ptr = (uint8_t *)cam_sglist; dxfer_len = io->scsiio.kern_data_len; } else { Index: cam/ctl/ctl_frontend_cam_sim.c =================================================================== --- cam/ctl/ctl_frontend_cam_sim.c (.../head/sys) (revision 244874) +++ cam/ctl/ctl_frontend_cam_sim.c (.../projects/physbio/sys) (revision 244874) @@ -93,8 +93,8 @@ struct cfcs_softc { * handle physical addresses yet. That would require mapping things in * order to do the copy. */ -#define CFCS_BAD_CCB_FLAGS (CAM_DATA_PHYS | CAM_SG_LIST_PHYS | \ - CAM_MSG_BUF_PHYS | CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR |\ +#define CFCS_BAD_CCB_FLAGS (CAM_DATA_ISPHYS | CAM_MSG_BUF_PHYS | \ + CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR | \ CAM_SENSE_PHYS) int cfcs_init(void); @@ -379,36 +379,35 @@ cfcs_datamove(union ctl_io *io) * Simplify things on both sides by putting single buffers into a * single entry S/G list. */ - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { - if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) { - /* We should filter this out on entry */ - panic("%s: physical S/G list, should not get here", - __func__); - } else { - int len_seen; + switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { + case CAM_DATA_SG: { + int len_seen; - cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; - cam_sg_count = ccb->csio.sglist_cnt; + cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; + cam_sg_count = ccb->csio.sglist_cnt; - for (i = 0, len_seen = 0; i < cam_sg_count; i++) { - if ((len_seen + cam_sglist[i].ds_len) >= - io->scsiio.kern_rel_offset) { - cam_sg_start = i; - cam_sg_offset = - io->scsiio.kern_rel_offset - - len_seen; - break; - } - len_seen += cam_sglist[i].ds_len; + for (i = 0, len_seen = 0; i < cam_sg_count; i++) { + if ((len_seen + cam_sglist[i].ds_len) >= + io->scsiio.kern_rel_offset) { + cam_sg_start = i; + cam_sg_offset = io->scsiio.kern_rel_offset - + len_seen; + break; } + len_seen += cam_sglist[i].ds_len; } - } else { + break; + } + case CAM_DATA_VADDR: cam_sglist = &cam_sg_entry; cam_sglist[0].ds_len = ccb->csio.dxfer_len; cam_sglist[0].ds_addr = (bus_addr_t)ccb->csio.data_ptr; cam_sg_count = 1; cam_sg_start = 0; cam_sg_offset = io->scsiio.kern_rel_offset; + break; + default: + panic("Invalid CAM flags %#x", ccb->ccb_h.flags); } if (io->scsiio.kern_sg_entries > 0) { Index: cam/cam_ccb.h =================================================================== --- cam/cam_ccb.h (.../head/sys) (revision 244874) +++ cam/cam_ccb.h (.../projects/physbio/sys) (revision 244874) @@ -64,27 +64,33 @@ typedef enum { * Perform transport negotiation * with this command. */ - CAM_SCATTER_VALID = 0x00000010,/* Scatter/gather list is valid */ - CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */ + CAM_DIS_AUTOSENSE = 0x00000010,/* Disable autosense feature */ CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */ - CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */ - CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */ - CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */ - CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */ - CAM_SOFT_RST_OP = 0x00000100,/* Use Soft reset alternative */ - CAM_ENG_SYNC = 0x00000200,/* Flush resid bytes on complete */ - CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */ - CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */ - CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */ - CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */ - CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/ - CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/ - CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/ - CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */ - CAM_SG_LIST_PHYS = 0x00040000,/* SG list has physical addrs. */ - CAM_MSG_BUF_PHYS = 0x00080000,/* Message buffer ptr is physical*/ - CAM_SNS_BUF_PHYS = 0x00100000,/* Autosense data ptr is physical*/ - CAM_DATA_PHYS = 0x00200000,/* SG/Buffer data ptrs are phys. */ + CAM_DIR_IN = 0x00000020,/* Data direction (01:DATA IN) */ + CAM_DIR_OUT = 0x00000040,/* Data direction (10:DATA OUT) */ + CAM_DIR_NONE = 0x00000060,/* Data direction (11:no data) */ + CAM_DIR_MASK = 0x00000060,/* Data direction Mask */ + CAM_DATA_ISPHYS = 0x00000080,/* Data type with physical addrs */ + CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */ + CAM_DATA_PADDR = 0x00000080,/* Data type (001:Physical) */ + CAM_DATA_SG = 0x00000100,/* Data type (010:sglist) */ + CAM_DATA_SG_PADDR = 0x00000180,/* Data type (011:sglist phys) */ + CAM_DATA_BIO = 0x00000200,/* Data type (100:bio) */ + CAM_DATA_MASK = 0x00000380,/* Data type mask */ + CAM_SOFT_RST_OP = 0x00000400,/* Use Soft reset alternative */ + CAM_ENG_SYNC = 0x00000800,/* Flush resid bytes on complete */ + CAM_DEV_QFRZDIS = 0x00001000,/* Disable DEV Q freezing */ + CAM_DEV_QFREEZE = 0x00002000,/* Freeze DEV Q on execution */ + CAM_HIGH_POWER = 0x00004000,/* Command takes a lot of power */ + CAM_SENSE_PTR = 0x00008000,/* Sense data is a pointer */ + CAM_SENSE_PHYS = 0x00010000,/* Sense pointer is physical addr*/ + CAM_TAG_ACTION_VALID = 0x00020000,/* Use the tag action in this ccb*/ + CAM_PASS_ERR_RECOVER = 0x00040000,/* Pass driver does err. recovery*/ + CAM_DIS_DISCONNECT = 0x00080000,/* Disable disconnect */ + CAM_MSG_BUF_PHYS = 0x00100000,/* Message buffer ptr is physical*/ + CAM_SNS_BUF_PHYS = 0x00200000,/* Autosense data ptr is physical*/ + + CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */ CAM_ENG_SGLIST = 0x00800000,/* SG list is for the HBA engine */ Property changes on: boot/ia64/efi ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/boot/ia64/efi:r243873-244873 Property changes on: boot/ia64/ski ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/boot/ia64/ski:r243873-244873 Index: boot/fdt/dts/versatilepb.dts =================================================================== --- boot/fdt/dts/versatilepb.dts (.../head/sys) (revision 244874) +++ boot/fdt/dts/versatilepb.dts (.../projects/physbio/sys) (revision 244874) @@ -1,118 +0,0 @@ -/* - * $FreeBSD$ - */ -/dts-v1/; - -/ { - model = "ARM Versatile PB"; - #address-cells = <1>; - #size-cells = <1>; - compatible = "arm,versatile-pb"; - - amba { - compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - - intc: interrupt-controller { - compatible = "arm,versatile-vic"; - reg = <0x10140000 0x1000>; - - interrupt-controller; - #interrupt-cells = <1>; - }; - - sic: secondary-interrupt-controller { - compatible = "arm,versatile-sic"; - reg = <0x10003000 0x28>; - - interrupt-controller; - #interrupt-cells = <1>; - }; - - uart0: uart0 { - compatible = "arm,pl011", "arm,primecell"; - reg = <0x101f1000 0x1000>; - interrupts = <12>; - interrupt-parent = <&intc>; - clock-frequency = <3000000>; - reg-shift = <2>; - }; - - uart1: uart1 { - compatible = "arm,pl011", "arm,primecell"; - reg = <0x101f2000 0x1000>; - interrupts = <13>; - interrupt-parent = <&intc>; - clock-frequency = <3000000>; - reg-shift = <2>; - }; - - uart2: uart2 { - compatible = "arm,pl011", "arm,primecell"; - reg = <0x101f3000 0x1000>; - interrupts = <14>; - interrupt-parent = <&intc>; - clock-frequency = <3000000>; - reg-shift = <2>; - }; - - timer0 { - compatible = "arm,sp804", "arm,primecell"; - reg = <0x101e2000 0x40>; - interrupts = <4>; - interrupt-parent = <&intc>; - }; - - pci0 { - - compatible = "versatile,pci"; - reg = <0x10000044 0x4 - 0x10001000 0x1000 - 0x41000000 0x01000000 - 0x42000000 0x02000000>; - }; - - net { - compatible = "smsc,lan91c111"; - reg = <0x10010000 0x10000>; - interrupts = <25>; - interrupt-parent = <&intc>; - }; - - display { - compatible = "arm,pl110", "arm,primecell"; - reg = <0x10000050 4 - 0x10120000 0x1000>; - interrupts = <16>; - interrupt-parent = <&intc>; - }; - - /* - * Cut corner here: we do not have proper interrupt - * controllers cascading so just hardwire SIC IRQ 3 - * to VIC IRQ31 - */ - kmi { - compatible = "arm,pl050", "arm,primecell"; - reg = <0x10006000 0x1000>; - interrupt-parent = <&intc>; - interrupts = <31>; - }; - }; - - memory { - device_type = "memory"; - reg = <0 0x08000000>; /* 128MB */ - }; - - aliases { - uart0 = &uart0; - }; - - chosen { - stdin = "uart0"; - stdout = "uart0"; - }; -}; Property changes on: boot/powerpc/boot1.chrp ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/boot/powerpc/boot1.chrp:r243873-244873 Property changes on: boot/powerpc/ofw ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/boot/powerpc/ofw:r243873-244873 Property changes on: boot/i386/efi ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/boot/i386/efi:r243873-244873 Property changes on: boot ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys/boot:r243873-244873 Index: ia64/ia64/busdma_machdep.c =================================================================== --- ia64/ia64/busdma_machdep.c (.../head/sys) (revision 244874) +++ ia64/ia64/busdma_machdep.c (.../projects/physbio/sys) (revision 244874) @@ -31,14 +31,13 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include #include #include -#include #include +#include #include #include @@ -73,6 +72,7 @@ struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ + bus_addr_t dataaddr; /* client physical address */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; @@ -107,8 +107,7 @@ struct bus_dmamap { int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; - void *buf; /* unmapped buffer pointer */ - bus_size_t buflen; /* unmapped buffer length */ + bus_dma_memory_t mem; bus_dmamap_callback_t *callback; void *callback_arg; STAILQ_ENTRY(bus_dmamap) links; @@ -123,7 +122,7 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_size_t size); + vm_offset_t vaddr, bus_addr_t addr, bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t len); @@ -480,37 +479,44 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, b } } -/* - * Utility function to load a linear buffer. lastaddrp holds state - * between invocations (for multiple-buffer loads). segp contains - * the starting segment on entrace, and the ending segment on exit. - * first indicates if this is the first invocation of this function. - */ -static int -_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, struct thread *td, int flags, bus_addr_t *lastaddrp, - bus_dma_segment_t *segs, int *segp, int first) +static void +_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags) { + bus_addr_t curaddr; bus_size_t sgsize; - bus_addr_t curaddr, lastaddr, baddr, bmask; + + if ((dmat->lowaddr < paddr_max || dmat->boundary > 0 || + dmat->alignment > 1) && map != &nobounce_dmamap && + map->pagesneeded == 0) { + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + curaddr = buf; + while (buflen != 0) { + sgsize = MIN(buflen, dmat->maxsegsz); + if (run_filter(dmat, curaddr, 0) != 0) { + sgsize = MIN(sgsize, PAGE_SIZE); + map->pagesneeded++; + } + curaddr += sgsize; + buflen -= sgsize; + } + } +} + +static void +_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, + void *buf, bus_size_t buflen, int flags) +{ vm_offset_t vaddr; + vm_offset_t vendaddr; bus_addr_t paddr; - int seg; - pmap_t pmap; - if (map == NULL) - map = &nobounce_dmamap; - - if (td != NULL) - pmap = vmspace_pmap(td->td_proc->p_vmspace); - else - pmap = NULL; - if ((dmat->lowaddr < paddr_max || dmat->boundary > 0 || dmat->alignment > 1) && map != &nobounce_dmamap && map->pagesneeded == 0) { - vm_offset_t vendaddr; - /* * Count the number of bounce pages * needed in order to complete this transfer @@ -519,267 +525,227 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, b vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { - if (pmap != NULL) - paddr = pmap_extract(pmap, vaddr); - else + if (pmap == kernel_pmap) paddr = pmap_kextract(vaddr); + else + paddr = pmap_extract(pmap, vaddr); if (run_filter(dmat, paddr, 0) != 0) map->pagesneeded++; vaddr += PAGE_SIZE; } } +} - vaddr = (vm_offset_t)buf; +static int +_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) +{ /* Reserve Necessary Bounce Pages */ - if (map->pagesneeded != 0) { - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - map->dmat = dmat; - map->buf = buf; - map->buflen = buflen; - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, - map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } + mtx_lock(&bounce_lock); + if (flags & BUS_DMA_NOWAIT) { + if (reserve_bounce_pages(dmat, map, 0) != 0) { + mtx_unlock(&bounce_lock); + return (ENOMEM); } - mtx_unlock(&bounce_lock); + } else { + if (reserve_bounce_pages(dmat, map, 1) != 0) { + /* Queue us for resources */ + STAILQ_INSERT_TAIL(&bounce_map_waitinglist, + map, links); + mtx_unlock(&bounce_lock); + return (EINPROGRESS); + } } + mtx_unlock(&bounce_lock); - lastaddr = *lastaddrp; - bmask = ~(dmat->boundary - 1); + return (0); +} - for (seg = *segp; buflen > 0 ; ) { - /* - * Get the physical address for this segment. - */ - if (pmap) - curaddr = pmap_extract(pmap, vaddr); - else - curaddr = pmap_kextract(vaddr); +/* + * Add a single contiguous physical range to the segment list. + */ +static int +_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, + bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) +{ + bus_addr_t baddr, bmask; + int seg; - /* - * Compute the segment size, and adjust counts. - */ - sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); - if (sgsize > dmat->maxsegsz) - sgsize = dmat->maxsegsz; - if (buflen < sgsize) - sgsize = buflen; + /* + * Make sure we don't cross any boundaries. + */ + bmask = ~(dmat->boundary - 1); + if (dmat->boundary > 0) { + baddr = (curaddr + dmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } - /* - * Make sure we don't cross any boundaries. - */ - if (dmat->boundary > 0) { - baddr = (curaddr + dmat->boundary) & bmask; - if (sgsize > (baddr - curaddr)) - sgsize = (baddr - curaddr); - } - - if (map->pagesneeded != 0 && run_filter(dmat, curaddr, sgsize)) - curaddr = add_bounce_page(dmat, map, vaddr, sgsize); - - /* - * Insert chunk into a segment, coalescing with - * previous segment if possible. - */ - if (first) { + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + seg = *segp; + if (seg == -1) { + seg = 0; + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } else { + if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && + (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && + (dmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= dmat->nsegments) + return (0); segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; - first = 0; - } else { - if (curaddr == lastaddr && - (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && - (dmat->boundary == 0 || - (segs[seg].ds_addr & bmask) == (curaddr & bmask))) - segs[seg].ds_len += sgsize; - else { - if (++seg >= dmat->nsegments) - break; - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; - } } - - lastaddr = curaddr + sgsize; - vaddr += sgsize; - buflen -= sgsize; } - *segp = seg; - *lastaddrp = lastaddr; - - /* - * Did we fit? - */ - return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ + return (sgsize); } /* - * Map the buffer buf into bus space using the dmamap map. + * Utility function to load a physical buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. */ int -bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg, - int flags) +_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, + int *segp) { - bus_addr_t lastaddr = 0; - int error, nsegs = 0; + bus_addr_t curaddr; + bus_size_t sgsize; + int error; - if (map != NULL) { - flags |= BUS_DMA_WAITOK; - map->callback = callback; - map->callback_arg = callback_arg; - } + if (map == NULL) + map = &nobounce_dmamap; - error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, - &lastaddr, dmat->segments, &nsegs, 1); + if (segs == NULL) + segs = dmat->segments; - if (error == EINPROGRESS) - return (error); + if (map != &nobounce_dmamap) { + _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } + } - if (error) - (*callback)(callback_arg, dmat->segments, 0, error); - else - (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); + while (buflen > 0) { + curaddr = buf; + sgsize = MIN(buflen, dmat->maxsegsz); + if (map->pagesneeded != 0 && + run_filter(dmat, curaddr, sgsize)) { + sgsize = MIN(sgsize, PAGE_SIZE); + curaddr = add_bounce_page(dmat, map, 0, curaddr, + sgsize); + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + buf += sgsize; + buflen -= sgsize; + } - return (0); + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } - /* - * Like _bus_dmamap_load(), but for mbufs. + * Utility function to load a linear buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. */ int -bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, - bus_dmamap_callback2_t *callback, void *callback_arg, int flags) +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, pmap_t pmap, int flags, + bus_dma_segment_t *segs, int *segp) { - int nsegs, error; + bus_size_t sgsize; + bus_addr_t curaddr; + vm_offset_t vaddr; + int error; - M_ASSERTPKTHDR(m0); + if (map == NULL) + map = &nobounce_dmamap; - flags |= BUS_DMA_NOWAIT; - nsegs = 0; - error = 0; - if (m0->m_pkthdr.len <= dmat->maxsize) { - int first = 1; - bus_addr_t lastaddr = 0; - struct mbuf *m; + if (segs == NULL) + segs = dmat->segments; - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, map, - m->m_data, m->m_len, NULL, flags, - &lastaddr, dmat->segments, &nsegs, first); - first = 0; - } + if (map != &nobounce_dmamap) { + _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); } - } else { - error = EINVAL; } - if (error) { - /* force "no valid mappings" in callback */ - (*callback)(callback_arg, dmat->segments, 0, 0, error); - } else { - (*callback)(callback_arg, dmat->segments, nsegs + 1, - m0->m_pkthdr.len, error); - } - return (error); -} + vaddr = (vm_offset_t)buf; -int -bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, - bus_dma_segment_t *segs, int *nsegs, int flags) -{ - int error; + while (buflen > 0) { + /* + * Get the physical address for this segment. + */ + if (pmap == kernel_pmap) + curaddr = pmap_kextract(vaddr); + else + curaddr = pmap_extract(pmap, vaddr); - M_ASSERTPKTHDR(m0); + /* + * Compute the segment size, and adjust counts. + */ + sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); + if (sgsize > dmat->maxsegsz) + sgsize = dmat->maxsegsz; + if (buflen < sgsize) + sgsize = buflen; - flags |= BUS_DMA_NOWAIT; - *nsegs = 0; - error = 0; - if (m0->m_pkthdr.len <= dmat->maxsize) { - int first = 1; - bus_addr_t lastaddr = 0; - struct mbuf *m; + if (map->pagesneeded != 0 && run_filter(dmat, curaddr, sgsize)) + curaddr = add_bounce_page(dmat, map, vaddr, curaddr, + sgsize); - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, map, - m->m_data, m->m_len, NULL, flags, - &lastaddr, segs, nsegs, first); - first = 0; - } - } - ++*nsegs; - } else { - error = EINVAL; + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + + vaddr += sgsize; + buflen -= sgsize; } - return (error); + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } -/* - * Like _bus_dmamap_load(), but for uios. - */ -int -bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, - bus_dmamap_callback2_t *callback, void *callback_arg, int flags) -{ - bus_addr_t lastaddr; - int nsegs, error, first, i; - bus_size_t resid; - struct iovec *iov; - struct thread *td = NULL; - flags |= BUS_DMA_NOWAIT; - resid = uio->uio_resid; - iov = uio->uio_iov; - - if (uio->uio_segflg == UIO_USERSPACE) { - td = uio->uio_td; - KASSERT(td != NULL, - ("bus_dmamap_load_uio: USERSPACE but no proc")); +void +__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_memory_t mem, bus_dmamap_callback_t *callback, void *callback_arg) +{ + if (map != NULL) { + map->dmat = dmat; + map->mem = mem; + map->callback = callback; + map->callback_arg = callback_arg; } +} - nsegs = 0; - error = 0; - first = 1; - for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { - /* - * Now at the first iovec to load. Load each iovec - * until we have exhausted the residual count. - */ - bus_size_t minlen = - resid < iov[i].iov_len ? resid : iov[i].iov_len; - caddr_t addr = (caddr_t) iov[i].iov_base; +bus_dma_segment_t * +_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) +{ - if (minlen > 0) { - error = _bus_dmamap_load_buffer(dmat, map, addr, - minlen, td, flags, &lastaddr, dmat->segments, - &nsegs, first); - first = 0; - - resid -= minlen; - } - } - - if (error) { - /* force "no valid mappings" in callback */ - (*callback)(callback_arg, dmat->segments, 0, 0, error); - } else { - (*callback)(callback_arg, dmat->segments, nsegs + 1, - uio->uio_resid, error); - } - return (error); + if (segs == NULL) + segs = dmat->segments; + return (segs); } /* @@ -810,8 +776,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t if (op & BUS_DMASYNC_PREWRITE) { while (bpage != NULL) { - bcopy((void *)bpage->datavaddr, - (void *)bpage->vaddr, bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)bpage->datavaddr, + (void *)bpage->vaddr, + bpage->datacount); + else + physcopyout(bpage->dataaddr, + (void *)bpage->vaddr, + bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } total_bounced++; @@ -819,8 +791,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t if (op & BUS_DMASYNC_POSTREAD) { while (bpage != NULL) { - bcopy((void *)bpage->vaddr, - (void *)bpage->datavaddr, bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)bpage->vaddr, + (void *)bpage->datavaddr, + bpage->datacount); + else + physcopyin((void *)bpage->vaddr, + bpage->dataaddr, + bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } total_bounced++; @@ -893,7 +871,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmama static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_size_t size) + bus_addr_t addr, bus_size_t size) { struct bounce_page *bpage; @@ -924,6 +902,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t m bpage->busaddr |= vaddr & PAGE_MASK; } bpage->datavaddr = vaddr; + bpage->dataaddr = addr; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); @@ -975,8 +954,8 @@ busdma_swi(void) mtx_unlock(&bounce_lock); dmat = map->dmat; (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load(map->dmat, map, map->buf, map->buflen, - map->callback, map->callback_arg, /*flags*/0); + bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, + map->callback_arg, BUS_DMA_WAITOK); (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); mtx_lock(&bounce_lock); } Index: mips/mips/busdma_machdep.c =================================================================== --- mips/mips/busdma_machdep.c (.../head/sys) (revision 244874) +++ mips/mips/busdma_machdep.c (.../projects/physbio/sys) (revision 244874) @@ -41,11 +41,10 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include -#include #include #include #include +#include #include #include @@ -89,10 +88,17 @@ struct bounce_page { vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ + bus_addr_t dataaddr; /* client physical address */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; +struct sync_list { + vm_offset_t vaddr; /* kva of bounce buffer */ + bus_addr_t busaddr; /* Physical address */ + bus_size_t datacount; /* client data count */ +}; + int busdma_swi_pending; struct bounce_zone { @@ -122,10 +128,6 @@ static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_ SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, "Total bounce pages"); -#define DMAMAP_LINEAR 0x1 -#define DMAMAP_MBUF 0x2 -#define DMAMAP_UIO 0x4 -#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) #define DMAMAP_UNCACHEABLE 0x8 #define DMAMAP_ALLOCATED 0x10 #define DMAMAP_MALLOCUSED 0x20 @@ -135,16 +137,16 @@ struct bus_dmamap { int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; + bus_dma_memory_t mem; int flags; - void *buffer; void *origbuffer; void *allocbuffer; TAILQ_ENTRY(bus_dmamap) freelist; - int len; STAILQ_ENTRY(bus_dmamap) links; bus_dmamap_callback_t *callback; void *callback_arg; - + int sync_count; + struct sync_list *slist; }; static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; @@ -166,7 +168,8 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_size_t size); + vm_offset_t vaddr, bus_addr_t addr, + bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); /* Default tag, as most drivers provide no parent tag. */ @@ -215,11 +218,6 @@ SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmam */ static __inline int -bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, - bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, - int flags, vm_offset_t *lastaddrp, int *segp); - -static __inline int _bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) { int i; @@ -273,10 +271,14 @@ dflt_lock(void *arg, bus_dma_lock_op_t op) } static __inline bus_dmamap_t -_busdma_alloc_dmamap(void) +_busdma_alloc_dmamap(bus_dma_tag_t dmat) { + struct sync_list *slist; bus_dmamap_t map; + slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT); + if (slist == NULL) + return (NULL); mtx_lock(&busdma_mtx); map = TAILQ_FIRST(&dmamap_freelist); if (map) @@ -288,13 +290,18 @@ static __inline bus_dmamap_t map->flags = DMAMAP_ALLOCATED; } else map->flags = 0; - STAILQ_INIT(&map->bpages); + if (map != NULL) { + STAILQ_INIT(&map->bpages); + map->slist = slist; + } else + free(slist, M_DEVBUF); return (map); } static __inline void _busdma_free_dmamap(bus_dmamap_t map) { + free(map->slist, M_DEVBUF); if (map->flags & DMAMAP_ALLOCATED) free(map, M_DEVBUF); else { @@ -477,7 +484,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, b } } - newmap = _busdma_alloc_dmamap(); + newmap = _busdma_alloc_dmamap(dmat); if (newmap == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (ENOMEM); @@ -485,6 +492,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, b *mapp = newmap; newmap->dmat = dmat; newmap->allocbuffer = NULL; + newmap->sync_count = 0; dmat->map_count++; /* @@ -549,7 +557,7 @@ int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { - if (STAILQ_FIRST(&map->bpages) != NULL) { + if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY); return (EBUSY); @@ -592,7 +600,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, if (flags & BUS_DMA_ZERO) mflags |= M_ZERO; - newmap = _busdma_alloc_dmamap(); + newmap = _busdma_alloc_dmamap(dmat); if (newmap == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, ENOMEM); @@ -601,6 +609,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, dmat->map_count++; *mapp = newmap; newmap->dmat = dmat; + newmap->sync_count = 0; /* * If all the memory is coherent with DMA then we don't need to @@ -684,7 +693,37 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, b CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); } -static int +static void +_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags) +{ + bus_addr_t curaddr; + bus_size_t sgsize; + + if ((map->pagesneeded == 0)) { + CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", + dmat->lowaddr, dmat->boundary, dmat->alignment); + CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", + map, map->pagesneeded); + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + curaddr = buf; + while (buflen != 0) { + sgsize = MIN(buflen, dmat->maxsegsz); + if (run_filter(dmat, curaddr) != 0) { + sgsize = MIN(sgsize, PAGE_SIZE); + map->pagesneeded++; + } + curaddr += sgsize; + buflen -= sgsize; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + } +} + +static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags) { @@ -719,60 +758,157 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dm } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } +} +static int +_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags) +{ + /* Reserve Necessary Bounce Pages */ - if (map->pagesneeded != 0) { - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, - map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } + mtx_lock(&bounce_lock); + if (flags & BUS_DMA_NOWAIT) { + if (reserve_bounce_pages(dmat, map, 0) != 0) { + mtx_unlock(&bounce_lock); + return (ENOMEM); } - mtx_unlock(&bounce_lock); + } else { + if (reserve_bounce_pages(dmat, map, 1) != 0) { + /* Queue us for resources */ + STAILQ_INSERT_TAIL(&bounce_map_waitinglist, + map, links); + mtx_unlock(&bounce_lock); + return (EINPROGRESS); + } } + mtx_unlock(&bounce_lock); return (0); } /* - * Utility function to load a linear buffer. lastaddrp holds state - * between invocations (for multiple-buffer loads). segp contains + * Add a single contiguous physical range to the segment list. + */ +static int +_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, + bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) +{ + bus_addr_t baddr, bmask; + int seg; + + /* + * Make sure we don't cross any boundaries. + */ + bmask = ~(dmat->boundary - 1); + if (dmat->boundary > 0) { + baddr = (curaddr + dmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + /* + * Insert chunk into a segment, coalescing with + * the previous segment if possible. + */ + seg = *segp; + if (seg >= 0 && + curaddr == segs[seg].ds_addr + segs[seg].ds_len && + (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && + (dmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) { + segs[seg].ds_len += sgsize; + } else { + if (++seg >= dmat->nsegments) + return (0); + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } + *segp = seg; + return (sgsize); +} + +/* + * Utility function to load a physical buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. + */ +int +_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, + int *segp) +{ + bus_addr_t curaddr; + bus_size_t sgsize; + int error; + + if (segs == NULL) + segs = dmat->segments; + + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } + } + + while (buflen > 0) { + curaddr = buf; + sgsize = MIN(buflen, dmat->maxsegsz); + if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && + map->pagesneeded != 0 && run_filter(dmat, curaddr)) { + sgsize = MIN(sgsize, PAGE_SIZE); + curaddr = add_bounce_page(dmat, map, 0, curaddr, + sgsize); + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + buf += sgsize; + buflen -= sgsize; + } + + /* + * Did we fit? + */ + if (buflen != 0) { + _bus_dmamap_unload(dmat, map); + return (EFBIG); /* XXX better return value here? */ + } + return (0); +} + +/* + * Utility function to load a linear buffer. segp contains * the starting segment on entrance, and the ending segment on exit. * first indicates if this is the first invocation of this function. */ -static __inline int -bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, - bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, - int flags, vm_offset_t *lastaddrp, int *segp) +int +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, + int *segp) { bus_size_t sgsize; - bus_addr_t curaddr, lastaddr, baddr, bmask; + bus_addr_t curaddr; + struct sync_list *sl; vm_offset_t vaddr = (vm_offset_t)buf; - int seg; int error = 0; - lastaddr = *lastaddrp; - bmask = ~(dmat->boundary - 1); + if (segs == NULL) + segs = dmat->segments; + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { - error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, - flags); - if (error) - return (error); + _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } } CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); - for (seg = *segp; buflen > 0 ; ) { + while (buflen > 0) { /* * Get the physical address for this segment. * @@ -791,240 +927,65 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dm if (buflen < sgsize) sgsize = buflen; - /* - * Make sure we don't cross any boundaries. - */ - if (dmat->boundary > 0) { - baddr = (curaddr + dmat->boundary) & bmask; - if (sgsize > (baddr - curaddr)) - sgsize = (baddr - curaddr); - } if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && map->pagesneeded != 0 && run_filter(dmat, curaddr)) { - curaddr = add_bounce_page(dmat, map, vaddr, sgsize); - } - - /* - * Insert chunk into a segment, coalescing with - * the previous segment if possible. - */ - if (seg >= 0 && curaddr == lastaddr && - (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && - (dmat->boundary == 0 || - (segs[seg].ds_addr & bmask) == - (curaddr & bmask))) { - segs[seg].ds_len += sgsize; - goto segdone; + curaddr = add_bounce_page(dmat, map, vaddr, curaddr, + sgsize); } else { - if (++seg >= dmat->nsegments) - break; - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; + sl = &map->slist[map->sync_count - 1]; + if (map->sync_count == 0 || + vaddr != sl->vaddr + sl->datacount) { + if (++map->sync_count > dmat->nsegments) + goto cleanup; + sl++; + sl->vaddr = vaddr; + sl->datacount = sgsize; + sl->busaddr = curaddr; + } else + sl->datacount += sgsize; } - if (error) + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) break; -segdone: - lastaddr = curaddr + sgsize; vaddr += sgsize; buflen -= sgsize; } - *segp = seg; - *lastaddrp = lastaddr; - +cleanup: /* * Did we fit? */ - if (buflen != 0) + if (buflen != 0) { + _bus_dmamap_unload(dmat, map); error = EFBIG; /* XXX better return value here? */ + } return (error); } -/* - * Map the buffer buf into bus space using the dmamap map. - */ -int -bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, bus_dmamap_callback_t *callback, - void *callback_arg, int flags) +void +__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_memory_t mem, bus_dmamap_callback_t *callback, void *callback_arg) { - vm_offset_t lastaddr = 0; - int error, nsegs = -1; KASSERT(dmat != NULL, ("dmatag is NULL")); KASSERT(map != NULL, ("dmamap is NULL")); + map->mem = mem; map->callback = callback; map->callback_arg = callback_arg; - map->flags &= ~DMAMAP_TYPE_MASK; - map->flags |= DMAMAP_LINEAR; - map->buffer = buf; - map->len = buflen; - error = bus_dmamap_load_buffer(dmat, - dmat->segments, map, buf, buflen, kernel_pmap, - flags, &lastaddr, &nsegs); - if (error == EINPROGRESS) - return (error); - if (error) - (*callback)(callback_arg, NULL, 0, error); - else - (*callback)(callback_arg, dmat->segments, nsegs + 1, error); - - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, nsegs + 1, error); - - return (error); } -/* - * Like bus_dmamap_load(), but for mbufs. - */ -int -bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags) +bus_dma_segment_t * +_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) { - int nsegs = -1, error = 0; - M_ASSERTPKTHDR(m0); - - map->flags &= ~DMAMAP_TYPE_MASK; - map->flags |= DMAMAP_MBUF; - map->buffer = m0; - map->len = 0; - if (m0->m_pkthdr.len <= dmat->maxsize) { - vm_offset_t lastaddr = 0; - struct mbuf *m; - - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = bus_dmamap_load_buffer(dmat, - dmat->segments, map, m->m_data, m->m_len, - kernel_pmap, flags, &lastaddr, &nsegs); - map->len += m->m_len; - } - } - } else { - error = EINVAL; - } - - if (error) { - /* - * force "no valid mappings" on error in callback. - */ - (*callback)(callback_arg, dmat->segments, 0, 0, error); - } else { - (*callback)(callback_arg, dmat->segments, nsegs + 1, - m0->m_pkthdr.len, error); - } - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, nsegs + 1); - - return (error); + if (segs == NULL) + segs = dmat->segments; + return (segs); } -int -bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, - int flags) -{ - int error = 0; - M_ASSERTPKTHDR(m0); - - flags |= BUS_DMA_NOWAIT; - *nsegs = -1; - map->flags &= ~DMAMAP_TYPE_MASK; - map->flags |= DMAMAP_MBUF; - map->buffer = m0; - map->len = 0; - if (m0->m_pkthdr.len <= dmat->maxsize) { - vm_offset_t lastaddr = 0; - struct mbuf *m; - - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = bus_dmamap_load_buffer(dmat, segs, map, - m->m_data, m->m_len, - kernel_pmap, flags, &lastaddr, - nsegs); - map->len += m->m_len; - } - } - } else { - error = EINVAL; - } - - /* XXX FIXME: Having to increment nsegs is really annoying */ - ++*nsegs; - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, *nsegs); - return (error); -} - /* - * Like bus_dmamap_load(), but for uios. - */ -int -bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags) -{ - vm_offset_t lastaddr = 0; - int nsegs, i, error; - bus_size_t resid; - struct iovec *iov; - struct pmap *pmap; - - resid = uio->uio_resid; - iov = uio->uio_iov; - map->flags &= ~DMAMAP_TYPE_MASK; - map->flags |= DMAMAP_UIO; - map->buffer = uio; - map->len = 0; - - if (uio->uio_segflg == UIO_USERSPACE) { - KASSERT(uio->uio_td != NULL, - ("bus_dmamap_load_uio: USERSPACE but no proc")); - /* XXX: pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); */ - panic("can't do it yet"); - } else - pmap = kernel_pmap; - - error = 0; - nsegs = -1; - for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { - /* - * Now at the first iovec to load. Load each iovec - * until we have exhausted the residual count. - */ - bus_size_t minlen = - resid < iov[i].iov_len ? resid : iov[i].iov_len; - caddr_t addr = (caddr_t) iov[i].iov_base; - - if (minlen > 0) { - error = bus_dmamap_load_buffer(dmat, dmat->segments, - map, addr, minlen, pmap, flags, &lastaddr, &nsegs); - - map->len += minlen; - resid -= minlen; - } - } - - if (error) { - /* - * force "no valid mappings" on error in callback. - */ - (*callback)(callback_arg, dmat->segments, 0, 0, error); - } else { - (*callback)(callback_arg, dmat->segments, nsegs+1, - uio->uio_resid, error); - } - - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, nsegs + 1); - return (error); -} - -/* * Release the mapping held by map. */ void @@ -1032,16 +993,16 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_ { struct bounce_page *bpage; - map->flags &= ~DMAMAP_TYPE_MASK; while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { STAILQ_REMOVE_HEAD(&map->bpages, links); free_bounce_page(dmat, bpage); } + map->sync_count = 0; return; } static void -bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) +bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op) { char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize]; vm_offset_t buf_cl, buf_clend; @@ -1055,9 +1016,9 @@ static void * prevent a data loss we save these chunks in temporary buffer * before invalidation and restore them afer it */ - buf_cl = (vm_offset_t)buf & ~cache_linesize_mask; - size_cl = (vm_offset_t)buf & cache_linesize_mask; - buf_clend = (vm_offset_t)buf + len; + buf_cl = buf & ~cache_linesize_mask; + size_cl = buf & cache_linesize_mask; + buf_clend = buf + len; size_clend = (mips_pdcache_linesize - (buf_clend & cache_linesize_mask)) & cache_linesize_mask; @@ -1072,7 +1033,7 @@ static void memcpy (tmp_cl, (void*)buf_cl, size_cl); if (size_clend) memcpy (tmp_clend, (void*)buf_clend, size_clend); - mips_dcache_inv_range((vm_offset_t)buf, len); + mips_dcache_inv_range(buf, len); /* * Restore them */ @@ -1087,15 +1048,14 @@ static void * necessary. */ if (size_cl) - mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); + mips_dcache_wbinv_range(buf_cl, size_cl); if (size_clend && (size_cl == 0 || buf_clend - buf_cl > mips_pdcache_linesize)) - mips_dcache_wbinv_range((vm_offset_t)buf_clend, - size_clend); + mips_dcache_wbinv_range(buf_clend, size_clend); break; case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: - mips_dcache_wbinv_range((vm_offset_t)buf_cl, len); + mips_dcache_wbinv_range(buf_cl, len); break; case BUS_DMASYNC_PREREAD: @@ -1106,7 +1066,7 @@ static void memcpy (tmp_cl, (void *)buf_cl, size_cl); if (size_clend) memcpy (tmp_clend, (void *)buf_clend, size_clend); - mips_dcache_inv_range((vm_offset_t)buf, len); + mips_dcache_inv_range(buf, len); /* * Restore them */ @@ -1121,15 +1081,14 @@ static void * necessary. */ if (size_cl) - mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); + mips_dcache_wbinv_range(buf_cl, size_cl); if (size_clend && (size_cl == 0 || buf_clend - buf_cl > mips_pdcache_linesize)) - mips_dcache_wbinv_range((vm_offset_t)buf_clend, - size_clend); + mips_dcache_wbinv_range(buf_clend, size_clend); break; case BUS_DMASYNC_PREWRITE: - mips_dcache_wb_range((vm_offset_t)buf, len); + mips_dcache_wb_range(buf, len); break; } } @@ -1141,10 +1100,18 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap STAILQ_FOREACH(bpage, &map->bpages, links) { if (op & BUS_DMASYNC_PREWRITE) { - bcopy((void *)bpage->datavaddr, - (void *)(bpage->vaddr_nocache != 0 ? - bpage->vaddr_nocache : bpage->vaddr), - bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)bpage->datavaddr, + (void *)(bpage->vaddr_nocache != 0 ? + bpage->vaddr_nocache : + bpage->vaddr), + bpage->datacount); + else + physcopyout(bpage->dataaddr, + (void *)(bpage->vaddr_nocache != 0 ? + bpage->vaddr_nocache : + bpage->vaddr), + bpage->datacount); if (bpage->vaddr_nocache == 0) { mips_dcache_wb_range(bpage->vaddr, bpage->datacount); @@ -1156,36 +1123,23 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap mips_dcache_inv_range(bpage->vaddr, bpage->datacount); } - bcopy((void *)(bpage->vaddr_nocache != 0 ? - bpage->vaddr_nocache : bpage->vaddr), - (void *)bpage->datavaddr, bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)(bpage->vaddr_nocache != 0 ? + bpage->vaddr_nocache : bpage->vaddr), + (void *)bpage->datavaddr, bpage->datacount); + else + physcopyin((void *)(bpage->vaddr_nocache != 0 ? + bpage->vaddr_nocache : bpage->vaddr), + bpage->dataaddr, bpage->datacount); dmat->bounce_zone->total_bounced++; } } } -static __inline int -_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) -{ - struct bounce_page *bpage; - - STAILQ_FOREACH(bpage, &map->bpages, links) { - if ((vm_offset_t)buf >= bpage->datavaddr && - (vm_offset_t)buf + len <= bpage->datavaddr + - bpage->datacount) - return (1); - } - return (0); - -} - void _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { - struct mbuf *m; - struct uio *uio; - int resid; - struct iovec *iov; + struct sync_list *sl, *end; if (op == BUS_DMASYNC_POSTWRITE) return; @@ -1199,38 +1153,10 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t return; CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); - switch(map->flags & DMAMAP_TYPE_MASK) { - case DMAMAP_LINEAR: - if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) - bus_dmamap_sync_buf(map->buffer, map->len, op); - break; - case DMAMAP_MBUF: - m = map->buffer; - while (m) { - if (m->m_len > 0 && - !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) - bus_dmamap_sync_buf(m->m_data, m->m_len, op); - m = m->m_next; - } - break; - case DMAMAP_UIO: - uio = map->buffer; - iov = uio->uio_iov; - resid = uio->uio_resid; - for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { - bus_size_t minlen = resid < iov[i].iov_len ? resid : - iov[i].iov_len; - if (minlen > 0) { - if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, - minlen)) - bus_dmamap_sync_buf(iov[i].iov_base, - minlen, op); - resid -= minlen; - } - } - break; - default: - break; + if (map->sync_count) { + end = &map->slist[map->sync_count]; + for (sl = &map->slist[0]; sl != end; sl++) + bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op); } } @@ -1393,7 +1319,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmama static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_size_t size) + bus_addr_t addr, bus_size_t size) { struct bounce_zone *bz; struct bounce_page *bpage; @@ -1426,6 +1352,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t m bpage->busaddr |= vaddr & PAGE_MASK; } bpage->datavaddr = vaddr; + bpage->dataaddr = addr; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); @@ -1479,8 +1406,8 @@ busdma_swi(void) mtx_unlock(&bounce_lock); dmat = map->dmat; (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load(map->dmat, map, map->buffer, map->len, - map->callback, map->callback_arg, /*flags*/0); + bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, + map->callback_arg, BUS_DMA_WAITOK); (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); mtx_lock(&bounce_lock); } Property changes on: . ___________________________________________________________________ Modified: svn:mergeinfo Merged /head/sys:r243873-244873