diff --git a/sys/dev/drm2/radeon/evergreen_cs.c b/sys/dev/drm2/radeon/evergreen_cs.c index 6a37910..f63714f 100644 --- a/sys/dev/drm2/radeon/evergreen_cs.c +++ b/sys/dev/drm2/radeon/evergreen_cs.c @@ -952,8 +952,8 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) u64 offset = (u64)track->vgt_strmout_bo_offset[i] + (u64)track->vgt_strmout_size[i]; if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { - DRM_ERROR("streamout %d bo too small: 0x%lx, 0x%lx\n", - i, offset, + DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n", + i, (uintmax_t)offset, radeon_bo_size(track->vgt_strmout_bo[i])); return -EINVAL; } @@ -2298,8 +2298,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset = reloc->lobj.gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { - dev_warn(p->dev, "CP DMA src buffer too small (%lu %lu)\n", - tmp + size, radeon_bo_size(reloc->robj)); + dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n", + (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } @@ -2336,8 +2336,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset = reloc->lobj.gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { - dev_warn(p->dev, "CP DMA dst buffer too small (%lu %lu)\n", - tmp + size, radeon_bo_size(reloc->robj)); + dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n", + (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } @@ -2630,8 +2630,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%lx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n", + (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; @@ -2649,8 +2649,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%lx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n", + (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; @@ -2678,8 +2678,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } if ((offset + 8) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad MEM_WRITE bo too small: 0x%lx, 0x%lx\n", - offset + 8, radeon_bo_size(reloc->robj)); + DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n", + (uintmax_t)offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; @@ -2703,8 +2703,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COPY_DW src bo too small: 0x%lx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n", + (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; @@ -2727,8 +2727,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COPY_DW dst bo too small: 0x%lx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n", + (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; @@ -2927,8 +2927,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) p->idx += count + 3; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA write buffer too small (%lu %lu)\n", - dst_offset, radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } break; @@ -2965,18 +2965,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) src_offset = ib[idx+8]; src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%lu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%ju %lu)\n", + (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%lu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%lu %lu)\n", - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); + dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%ju %lu)\n", + (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); @@ -3025,18 +3025,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) src_offset = ib[idx+8]; src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%lu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n", + (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%lu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%lu %lu)\n", - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); + dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%ju %lu)\n", + (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); @@ -3070,13 +3070,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%lu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%ju %lu)\n", + (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%lu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } p->idx += 9; @@ -3109,18 +3109,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) src_offset = ib[idx+8]; src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%lu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n", + (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%lu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%lu %lu)\n", - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); + dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%ju %lu)\n", + (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); @@ -3159,13 +3159,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%lu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n", + (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%lu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } p->idx += 9; @@ -3185,13 +3185,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = ib[idx+1]; dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, byte src buffer too small (%lu %lu)\n", - src_offset + count, radeon_bo_size(src_reloc->robj)); + dev_warn(p->dev, "DMA L2L, byte src buffer too small (%ju %lu)\n", + (uintmax_t)src_offset + count, radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%lu %lu)\n", - dst_offset + count, radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset + count, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff); @@ -3227,18 +3227,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) src_offset = ib[idx+3]; src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%lu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%ju %lu)\n", + (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%lu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%lu %lu)\n", - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); + dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%ju %lu)\n", + (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); @@ -3260,13 +3260,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = ib[idx+1]; dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw src buffer too small (%lu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn(p->dev, "DMA L2L, dw src buffer too small (%ju %lu)\n", + (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%lu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); @@ -3286,8 +3286,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = ib[idx+1]; dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA constant fill buffer too small (%lu %lu)\n", - dst_offset, radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); diff --git a/sys/dev/drm2/radeon/r600.c b/sys/dev/drm2/radeon/r600.c index 2648dd7..03e905f 100644 --- a/sys/dev/drm2/radeon/r600.c +++ b/sys/dev/drm2/radeon/r600.c @@ -1142,9 +1142,9 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc mc->vram_start = mc->gtt_end + 1; } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; - dev_info(rdev->dev, "VRAM: %luM 0x%08lX - 0x%08lX (%luM used)\n", - mc->mc_vram_size >> 20, mc->vram_start, - mc->vram_end, mc->real_vram_size >> 20); + dev_info(rdev->dev, "VRAM: %juM 0x%08jX - 0x%08jX (%juM used)\n", + (uintmax_t)mc->mc_vram_size >> 20, (uintmax_t)mc->vram_start, + (uintmax_t)mc->vram_end, (uintmax_t)mc->real_vram_size >> 20); } else { u64 base = 0; if (rdev->flags & RADEON_IS_IGP) { diff --git a/sys/dev/drm2/radeon/r600_blit_kms.c b/sys/dev/drm2/radeon/r600_blit_kms.c index 2e16379..e2ace69 100644 --- a/sys/dev/drm2/radeon/r600_blit_kms.c +++ b/sys/dev/drm2/radeon/r600_blit_kms.c @@ -708,8 +708,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev, u64 vb_gpu_addr; u32 *vb_cpu_addr; - DRM_DEBUG("emitting copy %16lx %16lx %d\n", - src_gpu_addr, dst_gpu_addr, num_gpu_pages); + DRM_DEBUG("emitting copy %16jx %16jx %d\n", + (uintmax_t)src_gpu_addr, (uintmax_t)dst_gpu_addr, num_gpu_pages); vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb); vb_gpu_addr = radeon_sa_bo_gpu_addr(vb); diff --git a/sys/dev/drm2/radeon/r600_cs.c b/sys/dev/drm2/radeon/r600_cs.c index 791672d..74135a0 100644 --- a/sys/dev/drm2/radeon/r600_cs.c +++ b/sys/dev/drm2/radeon/r600_cs.c @@ -430,8 +430,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { - dev_warn(p->dev, "%s offset[%d] 0x%lx 0x%lx, %d not aligned\n", __func__, i, - base_offset, base_align, array_mode); + dev_warn(p->dev, "%s offset[%d] 0x%jx 0x%jx, %d not aligned\n", __func__, i, + (uintmax_t)base_offset, (uintmax_t)base_align, array_mode); return -EINVAL; } @@ -458,9 +458,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) * broken userspace. */ } else { - dev_warn(p->dev, "%s offset[%d] %d %lu %d %lu too big (%d %d) (%d %d %d)\n", + dev_warn(p->dev, "%s offset[%d] %d %ju %d %lu too big (%d %d) (%d %d %d)\n", __func__, i, array_mode, - track->cb_color_bo_offset[i], tmp, + (uintmax_t)track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]), pitch, height, r600_fmt_get_nblocksx(format, pitch), r600_fmt_get_nblocksy(format, height), @@ -490,9 +490,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) if (bytes + track->cb_color_frag_offset[i] > radeon_bo_size(track->cb_color_frag_bo[i])) { dev_warn(p->dev, "%s FMASK_TILE_MAX too large " - "(tile_max=%u, bytes=%u, offset=%lu, bo_size=%lu)\n", + "(tile_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n", __func__, tile_max, bytes, - track->cb_color_frag_offset[i], + (uintmax_t)track->cb_color_frag_offset[i], radeon_bo_size(track->cb_color_frag_bo[i])); return -EINVAL; } @@ -508,9 +508,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) if (bytes + track->cb_color_tile_offset[i] > radeon_bo_size(track->cb_color_tile_bo[i])) { dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large " - "(block_max=%u, bytes=%u, offset=%lu, bo_size=%lu)\n", + "(block_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n", __func__, block_max, bytes, - track->cb_color_tile_offset[i], + (uintmax_t)track->cb_color_tile_offset[i], radeon_bo_size(track->cb_color_tile_bo[i])); return -EINVAL; } @@ -621,8 +621,8 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { - dev_warn(p->dev, "%s offset 0x%lx, 0x%lx, %d not aligned\n", __func__, - base_offset, base_align, array_mode); + dev_warn(p->dev, "%s offset 0x%jx, 0x%jx, %d not aligned\n", __func__, + (uintmax_t)base_offset, (uintmax_t)base_align, array_mode); return -EINVAL; } @@ -730,8 +730,8 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) u64 offset = (u64)track->vgt_strmout_bo_offset[i] + (u64)track->vgt_strmout_size[i]; if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { - DRM_ERROR("streamout %d bo too small: 0x%lx, 0x%lx\n", - i, offset, + DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n", + i, (uintmax_t)offset, radeon_bo_size(track->vgt_strmout_bo[i])); return -EINVAL; } @@ -1696,13 +1696,13 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { - dev_warn(p->dev, "%s:%d tex base offset (0x%lx, 0x%lx, %d) invalid\n", - __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); + dev_warn(p->dev, "%s:%d tex base offset (0x%jx, 0x%jx, %d) invalid\n", + __func__, __LINE__, (uintmax_t)base_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0)); return -EINVAL; } if (!IS_ALIGNED(mip_offset, base_align)) { - dev_warn(p->dev, "%s:%d tex mip offset (0x%lx, 0x%lx, %d) invalid\n", - __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); + dev_warn(p->dev, "%s:%d tex mip offset (0x%jx, 0x%jx, %d) invalid\n", + __func__, __LINE__, (uintmax_t)mip_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0)); return -EINVAL; } @@ -1725,7 +1725,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, w0, h0, pitch_align, height_align, array_check.array_mode, format, word2, l0_size, radeon_bo_size(texture)); - dev_warn(p->dev, "alignments %d %d %d %ld\n", pitch, pitch_align, height_align, base_align); + dev_warn(p->dev, "alignments %d %d %d %jd\n", pitch, pitch_align, height_align, (uintmax_t)base_align); return -EINVAL; } /* using get ib will give us the offset into the mipmap bo */ @@ -1932,8 +1932,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset = reloc->lobj.gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { - dev_warn(p->dev, "CP DMA src buffer too small (%lu %lu)\n", - tmp + size, radeon_bo_size(reloc->robj)); + dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n", + (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } @@ -1962,8 +1962,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset = reloc->lobj.gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { - dev_warn(p->dev, "CP DMA dst buffer too small (%lu %lu)\n", - tmp + size, radeon_bo_size(reloc->robj)); + dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n", + (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } @@ -2232,14 +2232,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset = radeon_get_ib_value(p, idx+1) << 8; if (offset != track->vgt_strmout_bo_offset[idx_value]) { - DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%lx, 0x%x\n", - offset, track->vgt_strmout_bo_offset[idx_value]); + DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%jx, 0x%x\n", + (uintmax_t)offset, track->vgt_strmout_bo_offset[idx_value]); return -EINVAL; } if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%lx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%jx, 0x%lx\n", + (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); @@ -2271,8 +2271,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%lx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n", + (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; @@ -2290,8 +2290,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%lx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n", + (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; @@ -2319,8 +2319,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } if ((offset + 8) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad MEM_WRITE bo too small: 0x%lx, 0x%lx\n", - offset + 8, radeon_bo_size(reloc->robj)); + DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n", + (uintmax_t)offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; @@ -2344,8 +2344,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COPY_DW src bo too small: 0x%lx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n", + (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; @@ -2368,8 +2368,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COPY_DW dst bo too small: 0x%lx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n", + (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; @@ -2643,8 +2643,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) p->idx += count + 3; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA write buffer too small (%lu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } break; @@ -2710,13 +2710,13 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) } } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA copy src buffer too small (%lu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn(p->dev, "DMA copy src buffer too small (%ju %lu)\n", + (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA write dst buffer too small (%lu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA write dst buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } break; @@ -2733,8 +2733,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = ib[idx+1]; dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA constant fill buffer too small (%lu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n", + (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); diff --git a/sys/dev/drm2/radeon/radeon_agp.c b/sys/dev/drm2/radeon/radeon_agp.c index 9fd2353..dec4045 100644 --- a/sys/dev/drm2/radeon/radeon_agp.c +++ b/sys/dev/drm2/radeon/radeon_agp.c @@ -249,8 +249,8 @@ int radeon_agp_init(struct radeon_device *rdev) rdev->mc.gtt_size = rdev->ddev->agp->info.ai_aperture_size << 20; rdev->mc.gtt_start = rdev->mc.agp_base; rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1; - dev_info(rdev->dev, "GTT: %luM 0x%08lX - 0x%08lX\n", - rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end); + dev_info(rdev->dev, "GTT: %juM 0x%08jX - 0x%08jX\n", + (uintmax_t)rdev->mc.gtt_size >> 20, (uintmax_t)rdev->mc.gtt_start, (uintmax_t)rdev->mc.gtt_end); /* workaround some hw issues */ if (rdev->family < CHIP_R200) { diff --git a/sys/dev/drm2/radeon/radeon_bios.c b/sys/dev/drm2/radeon/radeon_bios.c index 6a7fd78..b9ee4d1 100644 --- a/sys/dev/drm2/radeon/radeon_bios.c +++ b/sys/dev/drm2/radeon/radeon_bios.c @@ -62,7 +62,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) rdev->bios = NULL; vram_base = drm_get_resource_start(rdev->ddev, 0); - DRM_INFO("%s: VRAM base address: 0x%lx\n", __func__, vram_base); + DRM_INFO("%s: VRAM base address: 0x%jx\n", __func__, (uintmax_t)vram_base); bios_map.offset = vram_base; bios_map.size = size; @@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) } bios = bios_map.virtual; size = bios_map.size; - DRM_INFO("%s: Map address: %p (%lu bytes)\n", __func__, bios, size); + DRM_INFO("%s: Map address: %p (%ju bytes)\n", __func__, bios, (uintmax_t)size); if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { if (size == 0) { @@ -111,7 +111,7 @@ static bool radeon_read_bios(struct radeon_device *rdev) if (!bios) { return false; } - DRM_INFO("%s: Map address: %p (%lu bytes)\n", __func__, bios, size); + DRM_INFO("%s: Map address: %p (%zu bytes)\n", __func__, bios, size); if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { if (size == 0) { diff --git a/sys/dev/drm2/radeon/radeon_device.c b/sys/dev/drm2/radeon/radeon_device.c index 79ae817..24f66c6 100644 --- a/sys/dev/drm2/radeon/radeon_device.c +++ b/sys/dev/drm2/radeon/radeon_device.c @@ -373,9 +373,9 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; if (limit && limit < mc->real_vram_size) mc->real_vram_size = limit; - dev_info(rdev->dev, "VRAM: %luM 0x%016lX - 0x%016lX (%luM used)\n", - mc->mc_vram_size >> 20, mc->vram_start, - mc->vram_end, mc->real_vram_size >> 20); + dev_info(rdev->dev, "VRAM: %juM 0x%016jX - 0x%016jX (%juM used)\n", + (uintmax_t)mc->mc_vram_size >> 20, (uintmax_t)mc->vram_start, + (uintmax_t)mc->vram_end, (uintmax_t)mc->real_vram_size >> 20); } /** @@ -410,8 +410,8 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; } mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; - dev_info(rdev->dev, "GTT: %luM 0x%016lX - 0x%016lX\n", - mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); + dev_info(rdev->dev, "GTT: %juM 0x%016jX - 0x%016jX\n", + (uintmax_t)mc->gtt_size >> 20, (uintmax_t)mc->gtt_start, (uintmax_t)mc->gtt_end); } /* @@ -1153,17 +1153,17 @@ int radeon_device_init(struct radeon_device *rdev, return r; } - DRM_INFO("%s: Taking over the fictitious range 0x%lx-0x%lx\n", - __func__, rdev->mc.aper_base, - rdev->mc.aper_base + rdev->mc.visible_vram_size); + DRM_INFO("%s: Taking over the fictitious range 0x%jx-0x%jx\n", + __func__, (uintmax_t)rdev->mc.aper_base, + (uintmax_t)rdev->mc.aper_base + rdev->mc.visible_vram_size); r = vm_phys_fictitious_reg_range( rdev->mc.aper_base, rdev->mc.aper_base + rdev->mc.visible_vram_size, VM_MEMATTR_WRITE_COMBINING); if (r != 0) { DRM_ERROR("Failed to register fictitious range " - "0x%lx-0x%lx (%d).\n", rdev->mc.aper_base, - rdev->mc.aper_base + rdev->mc.visible_vram_size, r); + "0x%jx-0x%jx (%d).\n", (uintmax_t)rdev->mc.aper_base, + (uintmax_t)rdev->mc.aper_base + rdev->mc.visible_vram_size, r); return (-r); } rdev->fictitious_range_registered = true; diff --git a/sys/dev/drm2/radeon/radeon_display.c b/sys/dev/drm2/radeon/radeon_display.c index a2a7e2f..3a8e87c 100644 --- a/sys/dev/drm2/radeon/radeon_display.c +++ b/sys/dev/drm2/radeon/radeon_display.c @@ -901,7 +901,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll, uint32_t post_div; u32 pll_out_min, pll_out_max; - DRM_DEBUG_KMS("PLL freq %lu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); + DRM_DEBUG_KMS("PLL freq %ju %u %u\n", (uintmax_t)freq, pll->min_ref_div, pll->max_ref_div); freq = freq * 1000; if (pll->flags & RADEON_PLL_IS_LCD) { diff --git a/sys/dev/drm2/radeon/radeon_fence.c b/sys/dev/drm2/radeon/radeon_fence.c index 75be1fd..07b1a64 100644 --- a/sys/dev/drm2/radeon/radeon_fence.c +++ b/sys/dev/drm2/radeon/radeon_fence.c @@ -155,7 +155,7 @@ void radeon_fence_process(struct radeon_device *rdev, int ring) * have temporarly set the last_seq not to the true real last * seq but to an older one. */ - last_seq = atomic_load_acq_long(&rdev->fence_drv[ring].last_seq); + last_seq = atomic_load_acq_64(&rdev->fence_drv[ring].last_seq); do { last_emitted = rdev->fence_drv[ring].sync_seq[ring]; seq = radeon_fence_read(rdev, ring); @@ -220,12 +220,12 @@ static void radeon_fence_destroy(struct radeon_fence *fence) static bool radeon_fence_seq_signaled(struct radeon_device *rdev, u64 seq, unsigned ring) { - if (atomic_load_acq_long(&rdev->fence_drv[ring].last_seq) >= seq) { + if (atomic_load_acq_64(&rdev->fence_drv[ring].last_seq) >= seq) { return true; } /* poll new last sequence at least once */ radeon_fence_process(rdev, ring); - if (atomic_load_acq_long(&rdev->fence_drv[ring].last_seq) >= seq) { + if (atomic_load_acq_64(&rdev->fence_drv[ring].last_seq) >= seq) { return true; } return false; @@ -281,7 +281,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, bool signaled, fence_queue_locked; int r; - while (target_seq > atomic_load_acq_long(&rdev->fence_drv[ring].last_seq)) { + while (target_seq > atomic_load_acq_64(&rdev->fence_drv[ring].last_seq)) { if (!rdev->ring[ring].ready) { return -EBUSY; } @@ -296,7 +296,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, */ timeout = 1; } - seq = atomic_load_acq_long(&rdev->fence_drv[ring].last_seq); + seq = atomic_load_acq_64(&rdev->fence_drv[ring].last_seq); /* Save current last activity valuee, used to check for GPU lockups */ last_activity = rdev->fence_drv[ring].last_activity; @@ -350,7 +350,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, #endif /* check if sequence value has changed since last_activity */ - if (seq != atomic_load_acq_long(&rdev->fence_drv[ring].last_seq)) { + if (seq != atomic_load_acq_64(&rdev->fence_drv[ring].last_seq)) { continue; } @@ -368,8 +368,8 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { /* good news we believe it's a lockup */ - dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016lx last fence id 0x%016lx)\n", - target_seq, seq); + dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx last fence id 0x%016jx)\n", + (uintmax_t)target_seq, (uintmax_t)seq); /* change last activity so nobody else think there is a lockup */ for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -561,8 +561,8 @@ static int radeon_fence_wait_any_seq(struct radeon_device *rdev, if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { /* good news we believe it's a lockup */ - dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016lx)\n", - target_seq[ring]); + dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx)\n", + (uintmax_t)target_seq[ring]); /* change last activity so nobody else think there is a lockup */ for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -637,7 +637,7 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) { uint64_t seq; - seq = atomic_load_acq_long(&rdev->fence_drv[ring].last_seq) + 1ULL; + seq = atomic_load_acq_64(&rdev->fence_drv[ring].last_seq) + 1ULL; if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { /* nothing to wait for, last_seq is already the last emited fence */ @@ -724,7 +724,7 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) */ radeon_fence_process(rdev, ring); emitted = rdev->fence_drv[ring].sync_seq[ring] - - atomic_load_acq_long(&rdev->fence_drv[ring].last_seq); + - atomic_load_acq_64(&rdev->fence_drv[ring].last_seq); /* to avoid 32bits warp around */ if (emitted > 0x10000000) { emitted = 0x10000000; @@ -830,10 +830,10 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) } rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; - radeon_fence_write(rdev, atomic_load_acq_long(&rdev->fence_drv[ring].last_seq), ring); + radeon_fence_write(rdev, atomic_load_acq_64(&rdev->fence_drv[ring].last_seq), ring); rdev->fence_drv[ring].initialized = true; - dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016lx and cpu addr 0x%p\n", - ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); + dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016jx and cpu addr 0x%p\n", + ring, (uintmax_t)rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); return 0; } @@ -856,7 +856,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) rdev->fence_drv[ring].gpu_addr = 0; for (i = 0; i < RADEON_NUM_RINGS; ++i) rdev->fence_drv[ring].sync_seq[i] = 0; - atomic_store_rel_long(&rdev->fence_drv[ring].last_seq, 0); + atomic_store_rel_64(&rdev->fence_drv[ring].last_seq, 0); rdev->fence_drv[ring].last_activity = jiffies; rdev->fence_drv[ring].initialized = false; } @@ -955,7 +955,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) seq_printf(m, "--- ring %d ---\n", i); seq_printf(m, "Last signaled fence 0x%016llx\n", - (unsigned long long)atomic_load_acq_long(&rdev->fence_drv[i].last_seq)); + (unsigned long long)atomic_load_acq_64(&rdev->fence_drv[i].last_seq)); seq_printf(m, "Last emitted 0x%016llx\n", rdev->fence_drv[i].sync_seq[i]); diff --git a/sys/dev/drm2/radeon/radeon_object.c b/sys/dev/drm2/radeon/radeon_object.c index 347232b..8a045c4 100644 --- a/sys/dev/drm2/radeon/radeon_object.c +++ b/sys/dev/drm2/radeon/radeon_object.c @@ -241,11 +241,11 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, domain_start = bo->rdev->mc.gtt_start; if (max_offset < (radeon_bo_gpu_offset(bo) - domain_start)) { DRM_ERROR("radeon_bo_pin_restricted: " - "max_offset(%lu) < " - "(radeon_bo_gpu_offset(%lu) - " - "domain_start(%lu)", - max_offset, radeon_bo_gpu_offset(bo), - domain_start); + "max_offset(%ju) < " + "(radeon_bo_gpu_offset(%ju) - " + "domain_start(%ju)", + (uintmax_t)max_offset, (uintmax_t)radeon_bo_gpu_offset(bo), + (uintmax_t)domain_start); } } @@ -338,9 +338,9 @@ int radeon_bo_init(struct radeon_device *rdev) /* Add an MTRR for the VRAM */ rdev->mc.vram_mtrr = drm_mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, DRM_MTRR_WC); - DRM_INFO("Detected VRAM RAM=%luM, BAR=%lluM\n", - rdev->mc.mc_vram_size >> 20, - (unsigned long long)rdev->mc.aper_size >> 20); + DRM_INFO("Detected VRAM RAM=%juM, BAR=%juM\n", + (uintmax_t)rdev->mc.mc_vram_size >> 20, + (uintmax_t)rdev->mc.aper_size >> 20); DRM_INFO("RAM width %dbits %cDR\n", rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); return radeon_ttm_init(rdev); diff --git a/sys/dev/drm2/radeon/radeon_state.c b/sys/dev/drm2/radeon/radeon_state.c index 97b7503..22bd010 100644 --- a/sys/dev/drm2/radeon/radeon_state.c +++ b/sys/dev/drm2/radeon/radeon_state.c @@ -3063,7 +3063,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil case RADEON_PARAM_STATUS_HANDLE: value = dev_priv->ring_rptr_offset; break; -#if !__LP64__ +#ifndef __LP64__ /* * This ioctl() doesn't work on 64-bit platforms because hw_lock is a * pointer which can't fit into an int-sized variable. According to diff --git a/sys/dev/drm2/radeon/radeon_test.c b/sys/dev/drm2/radeon/radeon_test.c index 0ae0638..7774699 100644 --- a/sys/dev/drm2/radeon/radeon_test.c +++ b/sys/dev/drm2/radeon/radeon_test.c @@ -216,8 +216,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) radeon_bo_kunmap(gtt_obj[i]); - DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%lx\n", - gtt_addr - rdev->mc.gtt_start); + DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%jx\n", + (uintmax_t)gtt_addr - rdev->mc.gtt_start); } out_cleanup: diff --git a/sys/dev/drm2/radeon/rv770.c b/sys/dev/drm2/radeon/rv770.c index 51fa14f..1c86d75 100644 --- a/sys/dev/drm2/radeon/rv770.c +++ b/sys/dev/drm2/radeon/rv770.c @@ -832,9 +832,9 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) mc->vram_start = mc->gtt_end + 1; } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; - dev_info(rdev->dev, "VRAM: %luM 0x%08lX - 0x%08lX (%luM used)\n", - mc->mc_vram_size >> 20, mc->vram_start, - mc->vram_end, mc->real_vram_size >> 20); + dev_info(rdev->dev, "VRAM: %juM 0x%08jX - 0x%08jX (%juM used)\n", + (uintmax_t)mc->mc_vram_size >> 20, (uintmax_t)mc->vram_start, + (uintmax_t)mc->vram_end, (uintmax_t)mc->real_vram_size >> 20); } else { radeon_vram_location(rdev, &rdev->mc, 0); rdev->mc.gtt_base_align = 0; diff --git a/sys/dev/drm2/radeon/si.c b/sys/dev/drm2/radeon/si.c index 4b2fda7..f6d8a99 100644 --- a/sys/dev/drm2/radeon/si.c +++ b/sys/dev/drm2/radeon/si.c @@ -2298,9 +2298,9 @@ static void si_vram_location(struct radeon_device *rdev, mc->mc_vram_size = mc->aper_size; } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; - dev_info(rdev->dev, "VRAM: %luM 0x%016lX - 0x%016lX (%luM used)\n", - mc->mc_vram_size >> 20, mc->vram_start, - mc->vram_end, mc->real_vram_size >> 20); + dev_info(rdev->dev, "VRAM: %juM 0x%016jX - 0x%016jX (%juM used)\n", + (uintmax_t)mc->mc_vram_size >> 20, (uintmax_t)mc->vram_start, + (uintmax_t)mc->vram_end, (uintmax_t)mc->real_vram_size >> 20); } static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) @@ -2323,8 +2323,8 @@ static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; } mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; - dev_info(rdev->dev, "GTT: %luM 0x%016lX - 0x%016lX\n", - mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); + dev_info(rdev->dev, "GTT: %juM 0x%016jX - 0x%016jX\n", + (uintmax_t)mc->gtt_size >> 20, (uintmax_t)mc->gtt_start, (uintmax_t)mc->gtt_end); } static void si_vram_gtt_location(struct radeon_device *rdev,