Index: sys/dev/drm/r600_blit.c =================================================================== --- sys/dev/drm/r600_blit.c (revision 203846) +++ sys/dev/drm/r600_blit.c (working copy) @@ -1290,8 +1290,8 @@ DRM_DEBUG("\n"); /* load shaders */ - vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset); - ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); + vs = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset); + ps = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset + 256); shader_size = sizeof(r6xx_vs) / 4; for (i= 0; i < shader_size; i++) @@ -1718,11 +1718,10 @@ u64 vb_addr; u32 *vb; - vb = (u32 *) ((char *)dev->agp_buffer_map->handle + + vb = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset + dev_priv->blit_vb->used); - DRM_DEBUG("src=0x%016llx, dst=0x%016llx, size=%d\n", - (unsigned long long)src_gpu_addr, - (unsigned long long)dst_gpu_addr, size_bytes); + DRM_DEBUG("src=0x%016jx, dst=0x%016jx, size=%d\n", + src_gpu_addr, dst_gpu_addr, size_bytes); if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { max_bytes = 8192; @@ -1759,7 +1758,7 @@ if (!dev_priv->blit_vb) return; set_shaders(dev); - vb = (u32 *) ((char *)dev->agp_buffer_map->handle + + vb = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset + dev_priv->blit_vb->used); } @@ -1849,7 +1848,7 @@ if (!dev_priv->blit_vb) return; set_shaders(dev); - vb = (u32 *) ((char *)dev->agp_buffer_map->handle + + vb = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset + dev_priv->blit_vb->used); } @@ -1928,7 +1927,7 @@ return; set_shaders(dev); } - vb = (u32 *) ((char *)dev->agp_buffer_map->handle + + vb = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset + dev_priv->blit_vb->used); sx2 = sx + w; Index: sys/dev/drm/drm_bufs.c =================================================================== --- sys/dev/drm/drm_bufs.c (revision 203846) +++ sys/dev/drm/drm_bufs.c (working copy) @@ -156,10 +156,12 @@ map->size = size; map->type = type; map->flags = flags; + map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) << + DRM_MAP_HANDLE_SHIFT); switch (map->type) { case _DRM_REGISTERS: - map->handle = drm_ioremap(dev, map); + map->virtual = drm_ioremap(dev, map); if (!(map->flags & _DRM_WRITE_COMBINING)) break; /* FALLTHROUGH */ @@ -168,25 +170,25 @@ map->mtrr = 1; break; case _DRM_SHM: - map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT); + map->virtual = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT); DRM_DEBUG("%lu %d %p\n", - map->size, drm_order(map->size), map->handle); - if (!map->handle) { + map->size, drm_order(map->size), map->virtual); + if (!map->virtual) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return ENOMEM; } - map->offset = (unsigned long)map->handle; + map->offset = (unsigned long)map->virtual; if (map->flags & _DRM_CONTAINS_LOCK) { /* Prevent a 2nd X Server from creating a 2nd lock */ DRM_LOCK(); if (dev->lock.hw_lock != NULL) { DRM_UNLOCK(); - free(map->handle, DRM_MEM_MAPS); + free(map->virtual, DRM_MEM_MAPS); free(map, DRM_MEM_MAPS); return EBUSY; } - dev->lock.hw_lock = map->handle; /* Pointer to lock */ + dev->lock.hw_lock = map->virtual; /* Pointer to lock */ DRM_UNLOCK(); } break; @@ -224,7 +226,8 @@ DRM_LOCK(); return EINVAL; } - map->offset += dev->sg->handle; + map->virtual = (void *)(dev->sg->vaddr + offset); + map->offset = dev->sg->vaddr + offset; break; case _DRM_CONSISTENT: /* Unfortunately, we don't get any alignment specification from @@ -242,7 +245,7 @@ DRM_LOCK(); return ENOMEM; } - map->handle = map->dmah->vaddr; + map->virtual = map->dmah->vaddr; map->offset = map->dmah->busaddr; break; default: @@ -291,12 +294,8 @@ request->type = map->type; request->flags = map->flags; request->mtrr = map->mtrr; - request->handle = map->handle; + request->handle = (void *)map->handle; - if (request->type != _DRM_SHM) { - request->handle = (void *)request->offset; - } - return 0; } @@ -324,7 +323,7 @@ } break; case _DRM_SHM: - free(map->handle, DRM_MEM_MAPS); + free(map->virtual, DRM_MEM_MAPS); break; case _DRM_AGP: case _DRM_SCATTER_GATHER: @@ -342,6 +341,12 @@ map->bsr); } + DRM_UNLOCK(); + if (map->handle) + free_unr(dev->map_unrhdr, (unsigned long)map->handle >> + DRM_MAP_HANDLE_SHIFT); + DRM_LOCK(); + free(map, DRM_MEM_MAPS); } @@ -739,7 +744,7 @@ buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; - buf->address = (void *)(agp_offset + offset + dev->sg->handle); + buf->address = (void *)(agp_offset + offset + dev->sg->vaddr); buf->next = NULL; buf->pending = 0; buf->file_priv = NULL; @@ -1054,7 +1059,7 @@ goto done; } size = round_page(map->size); - foff = map->offset; + foff = (unsigned long)map->handle; } else { size = round_page(dma->byte_count), foff = 0; Index: sys/dev/drm/drm_scatter.c =================================================================== --- sys/dev/drm/drm_scatter.c (revision 203846) +++ sys/dev/drm/drm_scatter.c (working copy) @@ -1,5 +1,5 @@ /*- - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009 Robert C. Noland III * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -20,11 +20,6 @@ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. - * - * Authors: - * Gareth Hughes - * Eric Anholt - * */ #include @@ -32,96 +27,85 @@ /** @file drm_scatter.c * Allocation of memory for scatter-gather mappings by the graphics chip. - * * The memory allocated here is then made into an aperture in the card - * by drm_ati_pcigart_init(). + * by mapping the pages into the GART. */ #include "dev/drm/drmP.h" -static void drm_sg_alloc_cb(void *arg, bus_dma_segment_t *segs, - int nsegs, int error); - int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather *request) { struct drm_sg_mem *entry; - struct drm_dma_handle *dmah; - int ret; + struct thread *td = curthread; + vm_page_t m; + vm_offset_t va; + vm_size_t size; + vm_pindex_t pindex; if (dev->sg) return EINVAL; - entry = malloc(sizeof(*entry), DRM_MEM_SGLISTS, M_WAITOK | M_ZERO); - entry->pages = round_page(request->size) / PAGE_SIZE; - DRM_DEBUG("sg size=%ld pages=%d\n", request->size, entry->pages); + DRM_DEBUG("request size=%ld\n", request->size); + entry = malloc(sizeof(*entry), DRM_MEM_DRIVER, M_WAITOK | M_ZERO); + + size = round_page(request->size); + entry->pages = OFF_TO_IDX(size); entry->busaddr = malloc(entry->pages * sizeof(*entry->busaddr), - DRM_MEM_PAGES, M_WAITOK | M_ZERO); - dmah = malloc(sizeof(struct drm_dma_handle), DRM_MEM_DMA, - M_WAITOK | M_ZERO); - entry->dmah = dmah; + DRM_MEM_SGLISTS, M_WAITOK | M_ZERO); - ret = bus_dma_tag_create(NULL, PAGE_SIZE, 0, /* tag, align, boundary */ - BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */ - NULL, NULL, /* filtfunc, filtfuncargs */ - request->size, entry->pages, /* maxsize, nsegs */ - PAGE_SIZE, 0, /* maxsegsize, flags */ - NULL, NULL, /* lockfunc, lockfuncargs */ - &dmah->tag); - if (ret != 0) { - drm_sg_cleanup(entry); - return ENOMEM; + entry->obj = vm_pager_allocate(OBJT_PHYS, NULL, size, + VM_PROT_RW, 0, td->td_ucred); + VM_OBJECT_LOCK(entry->obj); + vm_object_set_memattr(entry->obj, VM_MEMATTR_WRITE_COMBINING); + for(pindex = 0; pindex < entry->pages; pindex++) { + m = vm_phys_alloc_contig(1, 0, BUS_SPACE_MAXADDR_32BIT, + PAGE_SIZE, 0); + if (m == NULL) { + VM_OBJECT_UNLOCK(entry->obj); + goto error; + } + if ((m->flags & PG_ZERO) == 0) + pmap_zero_page(m); + pmap_page_set_memattr(m, VM_MEMATTR_WRITE_COMBINING); + m->valid = VM_PAGE_BITS_ALL; + vm_page_insert(m, entry->obj, pindex); + entry->busaddr[pindex] = VM_PAGE_TO_PHYS(m); } + vm_object_reference_locked(entry->obj); + VM_OBJECT_UNLOCK(entry->obj); - ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr, - BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_NOCACHE, &dmah->map); - if (ret != 0) { - drm_sg_cleanup(entry); - return ENOMEM; - } + va = vm_map_min(kernel_map); + if (vm_map_find(kernel_map, entry->obj, 0, &va, size, TRUE, + VM_PROT_RW, VM_PROT_RW, 0) != KERN_SUCCESS) + goto error; - entry->handle = (unsigned long)dmah->vaddr; - entry->virtual = dmah->vaddr; + entry->vaddr = va; - ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, - request->size, drm_sg_alloc_cb, entry, BUS_DMA_NOWAIT); - if (ret != 0) { - drm_sg_cleanup(entry); - return ENOMEM; - } + if (vm_map_wire(kernel_map, va, va + size, + VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES) != KERN_SUCCESS) + goto error; DRM_LOCK(); if (dev->sg) { DRM_UNLOCK(); drm_sg_cleanup(entry); - return EINVAL; + return (EINVAL); } dev->sg = entry; DRM_UNLOCK(); - DRM_DEBUG("handle=%08lx, kva=%p, contents=%08lx\n", entry->handle, - entry->virtual, *(unsigned long *)entry->virtual); + request->handle = entry->vaddr; - request->handle = entry->handle; + DRM_DEBUG("allocated %ld pages @ 0x%08lx, contents=%08lx\n", + entry->pages, entry->vaddr, *(unsigned long *)entry->vaddr); - return 0; -} + return (0); -static void -drm_sg_alloc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) -{ - struct drm_sg_mem *entry = arg; - int i; - - if (error != 0) - return; - - for(i = 0 ; i < nsegs ; i++) { - entry->busaddr[i] = segs[i].ds_addr; - DRM_DEBUG("segment %d @ 0x%016lx\n", i, - (unsigned long)segs[i].ds_addr); - } +error: + drm_sg_cleanup(entry); + return (ENOMEM); } int @@ -132,23 +116,26 @@ DRM_DEBUG("\n"); - return drm_sg_alloc(dev, request); + return (drm_sg_alloc(dev, request)); } void drm_sg_cleanup(struct drm_sg_mem *entry) { - struct drm_dma_handle *dmah = entry->dmah; + if (entry == NULL) + return; - if (dmah->map != NULL) - bus_dmamap_unload(dmah->tag, dmah->map); - if (dmah->vaddr != NULL) - bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map); - if (dmah->tag != NULL) - bus_dma_tag_destroy(dmah->tag); - free(dmah, DRM_MEM_DMA); - free(entry->busaddr, DRM_MEM_PAGES); - free(entry, DRM_MEM_SGLISTS); + if (entry->vaddr != 0) + vm_map_remove(kernel_map, entry->vaddr, + entry->vaddr + IDX_TO_OFF(entry->pages)); + + if (entry->obj != NULL) + vm_object_deallocate(entry->obj); + + free(entry->busaddr, DRM_MEM_SGLISTS); + free(entry, DRM_MEM_DRIVER); + + return; } int @@ -162,12 +149,12 @@ dev->sg = NULL; DRM_UNLOCK(); - if (!entry || entry->handle != request->handle) - return EINVAL; + if (!entry || entry->vaddr != request->handle) + return (EINVAL); - DRM_DEBUG("sg free virtual = 0x%lx\n", entry->handle); + DRM_DEBUG("free 0x%lx\n", entry->vaddr); drm_sg_cleanup(entry); - return 0; + return (0); } Index: sys/dev/drm/savage_bci.c =================================================================== --- sys/dev/drm/savage_bci.c (revision 203846) +++ sys/dev/drm/savage_bci.c (working copy) @@ -376,7 +376,7 @@ cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); if (cur + nr_pages < dev_priv->nr_dma_pages) { - dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + + dma_ptr = (uint32_t *)dev_priv->cmd_dma->virtual + cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; if (n < rest) rest = n; @@ -392,7 +392,7 @@ dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } - dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle; + dma_ptr = (uint32_t *)dev_priv->cmd_dma->virtual; dev_priv->first_dma_page = cur = 0; } for (i = cur; nr_pages > 0; ++i, --nr_pages) { @@ -443,7 +443,7 @@ /* pad with noops */ if (pad) { - uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + + uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->virtual + cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; dev_priv->dma_pages[cur].used += pad; while (pad != 0) { @@ -517,7 +517,7 @@ for (i = dev_priv->first_dma_page; i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used; ++i) { - uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + + uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->virtual + i * SAVAGE_DMA_PAGE_SIZE; #if SAVAGE_DMA_DEBUG /* Sanity check: all pages except the last one must be full. */ @@ -784,7 +784,7 @@ return -EINVAL; } drm_core_ioremap(dev_priv->cmd_dma, dev); - if (!dev_priv->cmd_dma->handle) { + if (!dev_priv->cmd_dma->virtual) { DRM_ERROR("failed to ioremap command " "DMA region!\n"); savage_do_cleanup_bci(dev); @@ -806,9 +806,9 @@ dev_priv->fake_dma.offset = 0; dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE; dev_priv->fake_dma.type = _DRM_SHM; - dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE, + dev_priv->fake_dma.virtual = drm_alloc(SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER); - if (!dev_priv->fake_dma.handle) { + if (!dev_priv->fake_dma.virtual) { DRM_ERROR("could not allocate faked DMA buffer!\n"); savage_do_cleanup_bci(dev); return -ENOMEM; @@ -818,7 +818,7 @@ } dev_priv->sarea_priv = - (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle + + (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->virtual + init->sarea_priv_offset); /* setup bitmap descriptors */ @@ -857,7 +857,7 @@ dev_priv->event_counter = 0; dev_priv->event_wrap = 0; dev_priv->bci_ptr = (volatile uint32_t *) - ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET); + ((uint8_t *)dev_priv->mmio->virtual + SAVAGE_BCI_OFFSET); if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D; } else { @@ -865,7 +865,7 @@ } if (dev_priv->status != NULL) { dev_priv->status_ptr = - (volatile uint32_t *)dev_priv->status->handle; + (volatile uint32_t *)dev_priv->status->virtual; dev_priv->wait_fifo = savage_bci_wait_fifo_shadow; dev_priv->wait_evnt = savage_bci_wait_event_shadow; dev_priv->status_ptr[1023] = dev_priv->event_counter; @@ -905,16 +905,16 @@ drm_savage_private_t *dev_priv = dev->dev_private; if (dev_priv->cmd_dma == &dev_priv->fake_dma) { - if (dev_priv->fake_dma.handle) - drm_free(dev_priv->fake_dma.handle, + if (dev_priv->fake_dma.virtual) + drm_free(dev_priv->fake_dma.virtual, SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER); - } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && + } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->virtual && dev_priv->cmd_dma->type == _DRM_AGP && dev_priv->dma_type == SAVAGE_DMA_AGP) drm_core_ioremapfree(dev_priv->cmd_dma, dev); if (dev_priv->dma_type == SAVAGE_DMA_AGP && - dev->agp_buffer_map && dev->agp_buffer_map->handle) { + dev->agp_buffer_map && dev->agp_buffer_map->virtual) { drm_core_ioremapfree(dev->agp_buffer_map, dev); /* make sure the next instance (which may be running * in PCI mode) doesn't try to use an old Index: sys/dev/drm/radeon_cp.c =================================================================== --- sys/dev/drm/radeon_cp.c (revision 203846) +++ sys/dev/drm/radeon_cp.c (working copy) @@ -53,7 +53,7 @@ val = DRM_READ32(dev_priv->ring_rptr, off); } else { val = *(((volatile u32 *) - dev_priv->ring_rptr->handle) + + dev_priv->ring_rptr->virtual) + (off / sizeof(u32))); val = le32_to_cpu(val); } @@ -77,7 +77,7 @@ if (dev_priv->flags & RADEON_IS_AGP) DRM_WRITE32(dev_priv->ring_rptr, off, val); else - *(((volatile u32 *) dev_priv->ring_rptr->handle) + + *(((volatile u32 *) dev_priv->ring_rptr->virtual) + (off / sizeof(u32))) = cpu_to_le32(val); } @@ -720,9 +720,8 @@ + dev_priv->gart_vm_start); } else #endif - ring_start = (dev_priv->cp_ring->offset - - (unsigned long)dev->sg->virtual - + dev_priv->gart_vm_start); + ring_start = (dev_priv->cp_ring->offset - dev->sg->vaddr + + dev_priv->gart_vm_start); RADEON_WRITE(RADEON_CP_RB_BASE, ring_start); @@ -744,9 +743,8 @@ #endif { RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, - dev_priv->ring_rptr->offset - - ((unsigned long) dev->sg->virtual) - + dev_priv->gart_vm_start); + dev_priv->ring_rptr->offset - dev->sg->vaddr + + dev_priv->gart_vm_start); } /* Set ring buffer size */ @@ -1278,7 +1276,7 @@ } dev_priv->sarea_priv = - (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle + + (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset); #if __OS_HAS_AGP @@ -1286,9 +1284,9 @@ drm_core_ioremap_wc(dev_priv->cp_ring, dev); drm_core_ioremap_wc(dev_priv->ring_rptr, dev); drm_core_ioremap_wc(dev->agp_buffer_map, dev); - if (!dev_priv->cp_ring->handle || - !dev_priv->ring_rptr->handle || - !dev->agp_buffer_map->handle) { + if (!dev_priv->cp_ring->virtual || + !dev_priv->ring_rptr->virtual || + !dev->agp_buffer_map->virtual) { DRM_ERROR("could not find ioremap agp regions!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; @@ -1296,19 +1294,19 @@ } else #endif { - dev_priv->cp_ring->handle = + dev_priv->cp_ring->virtual = (void *)(unsigned long)dev_priv->cp_ring->offset; - dev_priv->ring_rptr->handle = + dev_priv->ring_rptr->virtual = (void *)(unsigned long)dev_priv->ring_rptr->offset; - dev->agp_buffer_map->handle = + dev->agp_buffer_map->virtual = (void *)(unsigned long)dev->agp_buffer_map->offset; - DRM_DEBUG("dev_priv->cp_ring->handle %p\n", - dev_priv->cp_ring->handle); - DRM_DEBUG("dev_priv->ring_rptr->handle %p\n", - dev_priv->ring_rptr->handle); - DRM_DEBUG("dev->agp_buffer_map->handle %p\n", - dev->agp_buffer_map->handle); + DRM_DEBUG("dev_priv->cp_ring->virtual %p\n", + dev_priv->cp_ring->virtual); + DRM_DEBUG("dev_priv->ring_rptr->virtual %p\n", + dev_priv->ring_rptr->virtual); + DRM_DEBUG("dev->agp_buffer_map->virtual %p\n", + dev->agp_buffer_map->virtual); } dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; @@ -1377,17 +1375,16 @@ + dev_priv->gart_vm_start); else #endif - dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset - - (unsigned long)dev->sg->virtual - + dev_priv->gart_vm_start); + dev_priv->gart_buffers_offset = dev->agp_buffer_map->offset - + dev->sg->vaddr + dev_priv->gart_vm_start; DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size); DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start); DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n", dev_priv->gart_buffers_offset); - dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle; - dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle + dev_priv->ring.start = (u32 *) dev_priv->cp_ring->virtual; + dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->virtual + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); @@ -1423,7 +1420,7 @@ drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); dev_priv->gart_info.addr = - dev_priv->gart_info.mapping.handle; + dev_priv->gart_info.mapping.virtual; if (dev_priv->flags & RADEON_IS_PCIE) dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; Index: sys/dev/drm/mach64_dma.c =================================================================== --- sys/dev/drm/mach64_dma.c (revision 203846) +++ sys/dev/drm/mach64_dma.c (working copy) @@ -1078,11 +1078,11 @@ } dev_priv->sarea_priv = (drm_mach64_sarea_t *) - ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); + ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset); if (!dev_priv->is_pci) { drm_core_ioremap(dev_priv->ring_map, dev); - if (!dev_priv->ring_map->handle) { + if (!dev_priv->ring_map->virtual) { DRM_ERROR("can not ioremap virtual address for" " descriptor ring\n"); dev->dev_private = (void *)dev_priv; @@ -1103,7 +1103,7 @@ dev_priv->dev_buffers = dev->agp_buffer_map; drm_core_ioremap(dev->agp_buffer_map, dev); - if (!dev->agp_buffer_map->handle) { + if (!dev->agp_buffer_map->virtual) { DRM_ERROR("can not ioremap virtual address for" " dma buffer\n"); dev->dev_private = (void *)dev_priv; @@ -1147,7 +1147,7 @@ } dev_priv->ring.size = 0x4000; /* 16KB */ - dev_priv->ring.start = dev_priv->ring_map->handle; + dev_priv->ring.start = dev_priv->ring_map->virtual; dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset; memset(dev_priv->ring.start, 0, dev_priv->ring.size); Index: sys/dev/drm/r128_cce.c =================================================================== --- sys/dev/drm/r128_cce.c (revision 203846) +++ sys/dev/drm/r128_cce.c (working copy) @@ -327,8 +327,7 @@ ring_start = dev_priv->cce_ring->offset - dev->agp->base; else #endif - ring_start = dev_priv->cce_ring->offset - - (unsigned long)dev->sg->virtual; + ring_start = dev_priv->cce_ring->offset - dev->sg->vaddr; R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET); @@ -509,7 +508,7 @@ } dev_priv->sarea_priv = - (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle + + (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset); #if __OS_HAS_AGP @@ -517,9 +516,9 @@ drm_core_ioremap(dev_priv->cce_ring, dev); drm_core_ioremap(dev_priv->ring_rptr, dev); drm_core_ioremap(dev->agp_buffer_map, dev); - if (!dev_priv->cce_ring->handle || - !dev_priv->ring_rptr->handle || - !dev->agp_buffer_map->handle) { + if (!dev_priv->cce_ring->virtual || + !dev_priv->ring_rptr->virtual || + !dev->agp_buffer_map->virtual) { DRM_ERROR("Could not ioremap agp regions!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); @@ -528,10 +527,11 @@ } else #endif { - dev_priv->cce_ring->handle = (void *)dev_priv->cce_ring->offset; - dev_priv->ring_rptr->handle = + dev_priv->cce_ring->virtual = + (void *)dev_priv->cce_ring->offset; + dev_priv->ring_rptr->virtual = (void *)dev_priv->ring_rptr->offset; - dev->agp_buffer_map->handle = + dev->agp_buffer_map->virtual = (void *)dev->agp_buffer_map->offset; } @@ -540,10 +540,10 @@ dev_priv->cce_buffers_offset = dev->agp->base; else #endif - dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual; + dev_priv->cce_buffers_offset = dev->sg->vaddr; - dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle; - dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle + dev_priv->ring.start = (u32 *) dev_priv->cce_ring->virtual; + dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->virtual + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); Index: sys/dev/drm/i915_dma.c =================================================================== --- sys/dev/drm/i915_dma.c (revision 203846) +++ sys/dev/drm/i915_dma.c (working copy) @@ -151,7 +151,7 @@ if (dev_priv->ring.virtual_start) { drm_core_ioremapfree(&dev_priv->ring.map, dev); dev_priv->ring.virtual_start = NULL; - dev_priv->ring.map.handle = NULL; + dev_priv->ring.map.virtual = NULL; dev_priv->ring.map.size = 0; } @@ -174,7 +174,7 @@ } dev_priv->sarea_priv = (drm_i915_sarea_t *) - ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); + ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset); if (init->ring_size != 0) { if (dev_priv->ring.ring_obj != NULL) { @@ -195,7 +195,7 @@ drm_core_ioremap_wc(&dev_priv->ring.map, dev); - if (dev_priv->ring.map.handle == NULL) { + if (dev_priv->ring.map.virtual == NULL) { i915_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); @@ -203,7 +203,7 @@ } } - dev_priv->ring.virtual_start = dev_priv->ring.map.handle; + dev_priv->ring.virtual_start = dev_priv->ring.map.virtual; dev_priv->cpp = init->cpp; dev_priv->back_offset = init->back_offset; @@ -229,7 +229,7 @@ return -EINVAL; } - if (dev_priv->ring.map.handle == NULL) { + if (dev_priv->ring.map.virtual == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); return -ENOMEM; @@ -823,14 +823,14 @@ dev_priv->hws_map.mtrr = 0; drm_core_ioremap_wc(&dev_priv->hws_map, dev); - if (dev_priv->hws_map.handle == NULL) { + if (dev_priv->hws_map.virtual == NULL) { i915_dma_cleanup(dev); dev_priv->status_gfx_addr = 0; DRM_ERROR("can not ioremap virtual address for" " G33 hw status page\n"); return -ENOMEM; } - dev_priv->hw_status_page = dev_priv->hws_map.handle; + dev_priv->hw_status_page = dev_priv->hws_map.virtual; memset(dev_priv->hw_status_page, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); Index: sys/dev/drm/r128_state.c =================================================================== --- sys/dev/drm/r128_state.c (revision 203846) +++ sys/dev/drm/r128_state.c (working copy) @@ -657,7 +657,7 @@ */ if (dwords & 1) { u32 *data = (u32 *) - ((char *)dev->agp_buffer_map->handle + ((char *)dev->agp_buffer_map->virtual + buf->offset + start); data[dwords++] = cpu_to_le32(R128_CCE_PACKET2); } @@ -722,7 +722,7 @@ dwords = (end - start + 3) / sizeof(u32); - data = (u32 *) ((char *)dev->agp_buffer_map->handle + data = (u32 *) ((char *)dev->agp_buffer_map->virtual + buf->offset + start); data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, Index: sys/dev/drm/drmP.h =================================================================== --- sys/dev/drm/drmP.h (revision 203846) +++ sys/dev/drm/drmP.h (working copy) @@ -49,6 +49,7 @@ #include #include #include +#include #include #if __FreeBSD_version >= 700000 #include @@ -66,8 +67,13 @@ #include #include #include +#include #include +#include +#include +#include #include +#include #include #include #include @@ -228,22 +234,22 @@ #define DRM_MEMORYBARRIER() mb() #define DRM_READ8(map, offset) \ - *(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \ + *(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) #define DRM_READ16(map, offset) \ - *(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \ + *(volatile u_int16_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) #define DRM_READ32(map, offset) \ - *(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \ + *(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) #define DRM_WRITE8(map, offset, val) \ - *(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \ + *(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) = val #define DRM_WRITE16(map, offset, val) \ - *(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \ + *(volatile u_int16_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) = val #define DRM_WRITE32(map, offset, val) \ - *(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \ + *(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) = val #define DRM_VERIFYAREA_READ( uaddr, size ) \ @@ -463,25 +469,27 @@ } drm_agp_head_t; typedef struct drm_sg_mem { - unsigned long handle; - void *virtual; - int pages; - dma_addr_t *busaddr; - struct drm_dma_handle *dmah; /* Handle to PCI memory */ + vm_offset_t vaddr; + vm_paddr_t *busaddr; + vm_pindex_t pages; + vm_object_t obj; } drm_sg_mem_t; +#define DRM_MAP_HANDLE_BITS (sizeof(void *) == 4 ? 4 : 24) +#define DRM_MAP_HANDLE_SHIFT (sizeof(void *) * 8 - DRM_MAP_HANDLE_BITS) typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t; typedef struct drm_local_map { - unsigned long offset; /* Physical address (0 for SAREA)*/ - unsigned long size; /* Physical size (bytes) */ - enum drm_map_type type; /* Type of memory mapped */ - enum drm_map_flags flags; /* Flags */ - void *handle; /* User-space: "Handle" to pass to mmap */ - /* Kernel-space: kernel-virtual address */ - int mtrr; /* Boolean: MTRR used */ - /* Private data */ - int rid; /* PCI resource ID for bus_space */ + unsigned long offset; /* Physical address (0 for SAREA) */ + unsigned long size; /* Physical size (bytes) */ + enum drm_map_type type; /* Type of memory mapped */ + enum drm_map_flags flags; /* Flags */ + void *handle; /* User-space: "Handle" to pass to mmap */ + /* Kernel-space: kernel-virtual address */ + int mtrr; /* Boolean: MTRR used */ + /* Private data */ + int rid; /* PCI resource ID for bus_space */ + void *virtual; /* Kernel-space: kernel-virtual address */ struct resource *bsr; bus_space_tag_t bst; bus_space_handle_t bsh; @@ -632,6 +640,7 @@ /* Linked list of mappable regions. Protected by dev_lock */ drm_map_list_t maplist; + struct unrhdr *map_unrhdr; drm_local_map_t **context_sareas; int max_context; @@ -710,7 +719,7 @@ d_open_t drm_open; d_read_t drm_read; d_poll_t drm_poll; -d_mmap_t drm_mmap; +d_mmap2_t drm_mmap; extern drm_local_map_t *drm_getsarea(struct drm_device *dev); /* File operations helpers (drm_fops.c) */ @@ -962,17 +971,17 @@ static __inline__ void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) { - map->handle = drm_ioremap_wc(dev, map); + map->virtual = drm_ioremap_wc(dev, map); } static __inline__ void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) { - map->handle = drm_ioremap(dev, map); + map->virtual = drm_ioremap(dev, map); } static __inline__ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev) { - if ( map->handle && map->size ) + if ( map->virtual && map->size ) drm_ioremapfree(map); } @@ -983,7 +992,7 @@ DRM_SPINLOCK_ASSERT(&dev->dev_lock); TAILQ_FOREACH(map, &dev->maplist, link) { - if (map->offset == offset) + if (offset == (unsigned long)map->handle) return map; } return NULL; Index: sys/dev/drm/radeon_state.c =================================================================== --- sys/dev/drm/radeon_state.c (revision 203846) +++ sys/dev/drm/radeon_state.c (working copy) @@ -1420,7 +1420,7 @@ static void radeon_cp_dispatch_flip(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; - struct drm_sarea *sarea = (struct drm_sarea *)dev_priv->sarea->handle; + struct drm_sarea *sarea = (struct drm_sarea *)dev_priv->sarea->virtual; int offset = (dev_priv->sarea_priv->pfCurrentPage == 1) ? dev_priv->front_offset : dev_priv->back_offset; RING_LOCALS; @@ -1582,7 +1582,7 @@ */ if (dwords & 1) { u32 *data = (u32 *) - ((char *)dev->agp_buffer_map->handle + ((char *)dev->agp_buffer_map->virtual + buf->offset + start); data[dwords++] = RADEON_CP_PACKET2; } @@ -1629,7 +1629,7 @@ dwords = (prim->finish - prim->start + 3) / sizeof(u32); - data = (u32 *) ((char *)dev->agp_buffer_map->handle + + data = (u32 *) ((char *)dev->agp_buffer_map->virtual + elt_buf->offset + prim->start); data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2); @@ -1781,7 +1781,7 @@ /* Dispatch the indirect buffer. */ buffer = - (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); + (u32 *) ((char *)dev->agp_buffer_map->virtual + buf->offset); dwords = size / 4; #define RADEON_COPY_MT(_buf, _data, _width) \ Index: sys/dev/drm/radeon_cs.c =================================================================== --- sys/dev/drm/radeon_cs.c (revision 203846) +++ sys/dev/drm/radeon_cs.c (working copy) @@ -821,7 +821,7 @@ } buf->file_priv = parser->file_priv; dev_priv->cs_buf = buf; - parser->ib = (void *)((vm_offset_t)dev->agp_buffer_map->handle + + parser->ib = (void *)((vm_offset_t)dev->agp_buffer_map->virtual + buf->offset); return 0; Index: sys/dev/drm/drm_drv.c =================================================================== --- sys/dev/drm/drm_drv.c (revision 203846) +++ sys/dev/drm/drm_drv.c (working copy) @@ -126,9 +126,9 @@ .d_read = drm_read, .d_ioctl = drm_ioctl, .d_poll = drm_poll, - .d_mmap = drm_mmap, + .d_mmap2 = drm_mmap, .d_name = "drm", - .d_flags = D_TRACKCLOSE + .d_flags = D_TRACKCLOSE | D_MMAP2 }; static int drm_msi = 1; /* Enable by default. */ @@ -434,7 +434,13 @@ DRM_DEBUG("\n"); TAILQ_INIT(&dev->maplist); + dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL); + if (dev->map_unrhdr == NULL) { + DRM_ERROR("Couldn't allocate map number allocator\n"); + return EINVAL; + } + drm_mem_init(); drm_sysctl_init(dev); TAILQ_INIT(&dev->files); @@ -565,6 +571,7 @@ } delete_unrhdr(dev->drw_unrhdr); + delete_unrhdr(dev->map_unrhdr); drm_mem_uninit(); Index: sys/dev/drm/r600_cp.c =================================================================== --- sys/dev/drm/r600_cp.c (revision 203846) +++ sys/dev/drm/r600_cp.c (working copy) @@ -180,7 +180,7 @@ entry_addr = entry->busaddr[i]; for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) { page_base = (u64) entry_addr & ATI_PCIGART_PAGE_MASK; - page_base |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; + page_base |= R600_PTE_VALID | R600_PTE_SYSTEM; page_base |= R600_PTE_READABLE | R600_PTE_WRITEABLE; *pci_gart = page_base; @@ -1670,9 +1670,8 @@ } else #endif { - rptr_addr = dev_priv->ring_rptr->offset - - ((unsigned long) dev->sg->virtual) - + dev_priv->gart_vm_start; + rptr_addr = dev_priv->ring_rptr->offset - dev->sg->vaddr + + dev_priv->gart_vm_start; } RADEON_WRITE(R600_CP_RB_RPTR_ADDR, rptr_addr & 0xffffffff); @@ -1706,9 +1705,8 @@ + dev_priv->gart_vm_start); } else #endif - ring_start = (dev_priv->cp_ring->offset - - (unsigned long)dev->sg->virtual - + dev_priv->gart_vm_start); + ring_start = dev_priv->cp_ring->offset - dev->sg->vaddr + + dev_priv->gart_vm_start; RADEON_WRITE(R600_CP_RB_BASE, ring_start >> 8); @@ -1914,7 +1912,7 @@ } dev_priv->sarea_priv = - (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle + + (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset); #if __OS_HAS_AGP @@ -1923,9 +1921,9 @@ drm_core_ioremap_wc(dev_priv->cp_ring, dev); drm_core_ioremap_wc(dev_priv->ring_rptr, dev); drm_core_ioremap_wc(dev->agp_buffer_map, dev); - if (!dev_priv->cp_ring->handle || - !dev_priv->ring_rptr->handle || - !dev->agp_buffer_map->handle) { + if (!dev_priv->cp_ring->virtual || + !dev_priv->ring_rptr->virtual || + !dev->agp_buffer_map->virtual) { DRM_ERROR("could not find ioremap agp regions!\n"); r600_do_cleanup_cp(dev); return -EINVAL; @@ -1933,18 +1931,19 @@ } else #endif { - dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset; - dev_priv->ring_rptr->handle = + dev_priv->cp_ring->virtual = + (void *)dev_priv->cp_ring->offset; + dev_priv->ring_rptr->virtual = (void *)dev_priv->ring_rptr->offset; - dev->agp_buffer_map->handle = + dev->agp_buffer_map->virtual = (void *)dev->agp_buffer_map->offset; - DRM_DEBUG("dev_priv->cp_ring->handle %p\n", - dev_priv->cp_ring->handle); - DRM_DEBUG("dev_priv->ring_rptr->handle %p\n", - dev_priv->ring_rptr->handle); - DRM_DEBUG("dev->agp_buffer_map->handle %p\n", - dev->agp_buffer_map->handle); + DRM_DEBUG("dev_priv->cp_ring->virtual %p\n", + dev_priv->cp_ring->virtual); + DRM_DEBUG("dev_priv->ring_rptr->virtual %p\n", + dev_priv->ring_rptr->virtual); + DRM_DEBUG("dev->agp_buffer_map->virtual %p\n", + dev->agp_buffer_map->virtual); } dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 24; @@ -2011,9 +2010,8 @@ + dev_priv->gart_vm_start); else #endif - dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset - - (unsigned long)dev->sg->virtual - + dev_priv->gart_vm_start); + dev_priv->gart_buffers_offset = dev->agp_buffer_map->offset - + dev->sg->vaddr + dev_priv->gart_vm_start; DRM_DEBUG("fb 0x%08x size %d\n", (unsigned int) dev_priv->fb_location, @@ -2024,8 +2022,8 @@ DRM_DEBUG("dev_priv->gart_buffers_offset 0x%08lx\n", dev_priv->gart_buffers_offset); - dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle; - dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle + dev_priv->ring.start = (u32 *) dev_priv->cp_ring->virtual; + dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->virtual + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); @@ -2064,14 +2062,14 @@ dev_priv->gart_info.table_size; drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); - if (!dev_priv->gart_info.mapping.handle) { + if (!dev_priv->gart_info.mapping.virtual) { DRM_ERROR("ioremap failed.\n"); r600_do_cleanup_cp(dev); return -EINVAL; } dev_priv->gart_info.addr = - dev_priv->gart_info.mapping.handle; + dev_priv->gart_info.mapping.virtual; DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", dev_priv->gart_info.addr, @@ -2219,7 +2217,7 @@ */ while (dwords & 0xf) { u32 *data = (u32 *) - ((char *)dev->agp_buffer_map->handle + ((char *)dev->agp_buffer_map->virtual + buf->offset + start); data[dwords++] = RADEON_CP_PACKET2; } @@ -2343,7 +2341,8 @@ /* Dispatch the indirect buffer. */ buffer = - (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); + (u32 *) ((char *)dev->agp_buffer_map->virtual + + buf->offset); if (DRM_COPY_FROM_USER(buffer, data, pass_size)) { DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size); Index: sys/dev/drm/drm_memory.c =================================================================== --- sys/dev/drm/drm_memory.c (revision 203846) +++ sys/dev/drm/drm_memory.c (working copy) @@ -81,7 +81,7 @@ void drm_ioremapfree(drm_local_map_t *map) { - pmap_unmapdev((vm_offset_t) map->handle, map->size); + pmap_unmapdev((vm_offset_t) map->virtual, map->size); } int Index: sys/dev/drm/drm_sysctl.c =================================================================== --- sys/dev/drm/drm_sysctl.c (revision 203846) +++ sys/dev/drm/drm_sysctl.c (working copy) @@ -188,7 +188,7 @@ DRM_UNLOCK(); DRM_SYSCTL_PRINT("\nslot offset size " - "type flags address mtrr\n"); + "type flags address handle mtrr\n"); for (i = 0; i < mapcount; i++) { map = &tempmaps[i]; @@ -204,9 +204,11 @@ yesno = "yes"; DRM_SYSCTL_PRINT( - "%4d 0x%016lx 0x%08lx %4.4s 0x%02x 0x%016lx %s\n", i, - map->offset, map->size, type, map->flags, - (unsigned long)map->handle, yesno); + "%4d 0x%016lx 0x%08lx %4.4s 0x%02x 0x%016lx %6d %s\n", + i, map->offset, map->size, type, map->flags, + (unsigned long)map->virtual, + (unsigned int)((unsigned long)map->handle >> + DRM_MAP_HANDLE_SHIFT), yesno); } SYSCTL_OUT(req, "", 1); Index: sys/dev/drm/drm_vm.c =================================================================== --- sys/dev/drm/drm_vm.c (revision 203846) +++ sys/dev/drm/drm_vm.c (working copy) @@ -32,7 +32,7 @@ #include "dev/drm/drm.h" int drm_mmap(struct cdev *kdev, vm_offset_t offset, vm_paddr_t *paddr, - int prot) + int prot, vm_memattr_t *memattr) { struct drm_device *dev = drm_get_device_from_kdev(kdev); struct drm_file *file_priv = NULL; @@ -54,6 +54,7 @@ if (file_priv && !file_priv->authenticated) return EACCES; + DRM_DEBUG("called with offset %016zx\n", offset); if (dev->dma && offset < ptoa(dev->dma->page_count)) { drm_device_dma_t *dma = dev->dma; @@ -72,31 +73,31 @@ } } - /* A sequential search of a linked list is - fine here because: 1) there will only be - about 5-10 entries in the list and, 2) a - DRI client only has to do this mapping - once, so it doesn't have to be optimized - for performance, even if the list was a - bit longer. */ + /* A sequential search of a linked list is + fine here because: 1) there will only be + about 5-10 entries in the list and, 2) a + DRI client only has to do this mapping + once, so it doesn't have to be optimized + for performance, even if the list was a + bit longer. + */ DRM_LOCK(); TAILQ_FOREACH(map, &dev->maplist, link) { - if (offset >= map->offset && offset < map->offset + map->size) + if (offset >> DRM_MAP_HANDLE_SHIFT == + (unsigned long)map->handle >> DRM_MAP_HANDLE_SHIFT) break; } if (map == NULL) { - DRM_DEBUG("Can't find map, requested offset = %016lx\n", - (unsigned long)offset); + DRM_DEBUG("Can't find map, request offset = %016zx\n", offset); TAILQ_FOREACH(map, &dev->maplist, link) { - DRM_DEBUG("map offset = %016lx, handle = %016lx\n", - (unsigned long)map->offset, - (unsigned long)map->handle); + DRM_DEBUG("map offset = %016zx, handle = %016lx\n", + map->offset, (unsigned long)map->handle); } DRM_UNLOCK(); return -1; } - if (((map->flags&_DRM_RESTRICTED) && !DRM_SUSER(DRM_CURPROC))) { + if (((map->flags & _DRM_RESTRICTED) && !DRM_SUSER(DRM_CURPROC))) { DRM_UNLOCK(); DRM_DEBUG("restricted map\n"); return -1; @@ -104,18 +105,22 @@ type = map->type; DRM_UNLOCK(); + offset = offset & ((1ULL << DRM_MAP_HANDLE_SHIFT) - 1); + switch (type) { case _DRM_FRAME_BUFFER: + case _DRM_AGP: + *memattr = VM_MEMATTR_WRITE_COMBINING; + /* FALLTHROUGH */ case _DRM_REGISTERS: - case _DRM_AGP: - phys = offset; + phys = map->offset + offset; break; + case _DRM_SCATTER_GATHER: + *memattr = VM_MEMATTR_WRITE_COMBINING; + /* FALLTHROUGH */ case _DRM_CONSISTENT: - phys = vtophys((char *)map->handle + (offset - map->offset)); - break; - case _DRM_SCATTER_GATHER: case _DRM_SHM: - phys = vtophys(offset); + phys = vtophys((char *)map->virtual + offset); break; default: DRM_ERROR("bad map type %d\n", type); Index: sys/dev/drm/mga_dma.c =================================================================== --- sys/dev/drm/mga_dma.c (revision 203846) +++ sys/dev/drm/mga_dma.c (working copy) @@ -585,11 +585,11 @@ drm_core_ioremap(dev_priv->primary, dev); drm_core_ioremap(dev->agp_buffer_map, dev); - if (!dev_priv->warp->handle || - !dev_priv->primary->handle || !dev->agp_buffer_map->handle) { + if (!dev_priv->warp->virtual || + !dev_priv->primary->virtual || !dev->agp_buffer_map->virtual) { DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", - dev_priv->warp->handle, dev_priv->primary->handle, - dev->agp_buffer_map->handle); + dev_priv->warp->virtual, dev_priv->primary->virtual, + dev->agp_buffer_map->virtual); return -ENOMEM; } @@ -878,14 +878,14 @@ } dev_priv->sarea_priv = - (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle + + (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset); - if (!dev_priv->warp->handle || - !dev_priv->primary->handle || + if (!dev_priv->warp->virtual || + !dev_priv->primary->virtual || ((dev_priv->dma_access != 0) && ((dev->agp_buffer_map == NULL) || - (dev->agp_buffer_map->handle == NULL)))) { + (dev->agp_buffer_map->virtual == NULL)))) { DRM_ERROR("failed to ioremap agp regions!\n"); return -ENOMEM; } @@ -902,7 +902,7 @@ return ret; } - dev_priv->prim.status = (u32 *) dev_priv->status->handle; + dev_priv->prim.status = (u32 *) dev_priv->status->virtual; mga_do_wait_for_idle(dev_priv); @@ -910,8 +910,8 @@ */ MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL); - dev_priv->prim.start = (u8 *) dev_priv->primary->handle; - dev_priv->prim.end = ((u8 *) dev_priv->primary->handle + dev_priv->prim.start = (u8 *) dev_priv->primary->virtual; + dev_priv->prim.end = ((u8 *) dev_priv->primary->virtual + dev_priv->primary->size); dev_priv->prim.size = dev_priv->primary->size; Index: sys/dev/drm/drm_context.c =================================================================== --- sys/dev/drm/drm_context.c (revision 203846) +++ sys/dev/drm/drm_context.c (working copy) @@ -147,7 +147,7 @@ map = dev->context_sareas[request->ctx_id]; DRM_UNLOCK(); - request->handle = map->handle; + request->handle = (void *)map->handle; return 0; } Index: sys/dev/drm/ati_pcigart.c =================================================================== --- sys/dev/drm/ati_pcigart.c (revision 203846) +++ sys/dev/drm/ati_pcigart.c (working copy) @@ -39,8 +39,9 @@ #define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */ #define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1)) -#define ATI_PCIE_WRITE 0x4 -#define ATI_PCIE_READ 0x8 +#define ATI_GART_NOSNOOP 0x1 +#define ATI_GART_WRITE 0x4 +#define ATI_GART_READ 0x8 static void drm_ati_alloc_pcigart_table_cb(void *arg, bus_dma_segment_t *segs, @@ -196,13 +197,15 @@ case DRM_ATI_GART_IGP: page_base |= (upper_32_bits(entry_addr) & 0xff) << 4; - page_base |= 0xc; + page_base |= ATI_GART_READ | ATI_GART_WRITE; + page_base |= ATI_GART_NOSNOOP; break; case DRM_ATI_GART_PCIE: page_base >>= 8; page_base |= (upper_32_bits(entry_addr) & 0xff) << 24; - page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE; + page_base |= ATI_GART_READ | ATI_GART_WRITE; + page_base |= ATI_GART_NOSNOOP; break; default: case DRM_ATI_GART_PCI: Index: sys/dev/drm/mga_warp.c =================================================================== --- sys/dev/drm/mga_warp.c (revision 203846) +++ sys/dev/drm/mga_warp.c (working copy) @@ -96,7 +96,7 @@ static int mga_warp_install_g400_microcode(drm_mga_private_t * dev_priv) { - unsigned char *vcbase = dev_priv->warp->handle; + unsigned char *vcbase = dev_priv->warp->virtual; unsigned long pcbase = dev_priv->warp->offset; memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); @@ -124,7 +124,7 @@ static int mga_warp_install_g200_microcode(drm_mga_private_t * dev_priv) { - unsigned char *vcbase = dev_priv->warp->handle; + unsigned char *vcbase = dev_priv->warp->virtual; unsigned long pcbase = dev_priv->warp->offset; memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));