summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r600.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r--drivers/gpu/drm/radeon/r600.c190
1 files changed, 106 insertions, 84 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2ffcf5a03551..c52290197292 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -353,23 +353,14 @@ void r600_hpd_fini(struct radeon_device *rdev)
/*
* R600 PCIE GART
*/
-int r600_gart_clear_page(struct radeon_device *rdev, int i)
-{
- void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
- u64 pte;
-
- if (i < 0 || i > rdev->gart.num_gpu_pages)
- return -EINVAL;
- pte = 0;
- writeq(pte, ((void __iomem *)ptr) + (i * 8));
- return 0;
-}
-
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
{
unsigned i;
u32 tmp;
+ /* flush hdp cache so updates hit vram */
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
@@ -416,6 +407,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -619,6 +611,68 @@ static void r600_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev);
}
+/**
+ * r600_vram_gtt_location - try to find VRAM & GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place VRAM at same place as in CPU (PCI)
+ * address space as some GPU seems to have issue when we reprogram at
+ * different address space.
+ *
+ * If there is not enough space to fit the unvisible VRAM after the
+ * aperture then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP then place VRAM adjacent to AGP aperture are we need
+ * them to be in one from GPU point of view so that we can program GPU to
+ * catch access outside them (weird GPU policy see ??).
+ *
+ * This function will never fails, worst case are limiting VRAM or GTT.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ */
+void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+ u64 size_bf, size_af;
+
+ if (mc->mc_vram_size > 0xE0000000) {
+ /* leave room for at least 512M GTT */
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xE0000000;
+ mc->mc_vram_size = 0xE0000000;
+ }
+ if (rdev->flags & RADEON_IS_AGP) {
+ size_bf = mc->gtt_start;
+ size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+ if (size_bf > size_af) {
+ if (mc->mc_vram_size > size_bf) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_bf;
+ mc->mc_vram_size = size_bf;
+ }
+ mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+ } else {
+ if (mc->mc_vram_size > size_af) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_af;
+ mc->mc_vram_size = size_af;
+ }
+ mc->vram_start = mc->gtt_end;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+ } else {
+ u64 base = 0;
+ if (rdev->flags & RADEON_IS_IGP)
+ base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ radeon_gtt_location(rdev, mc);
+ }
+}
+
int r600_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
@@ -658,75 +712,21 @@ int r600_mc_init(struct radeon_device *rdev)
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ /* FIXME remove this once we support unmappable VRAM */
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
-
- if (rdev->flags & RADEON_IS_AGP) {
- /* gtt_size is setup by radeon_agp_init */
- rdev->mc.gtt_location = rdev->mc.agp_base;
- tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
- /* Try to put vram before or after AGP because we
- * we want SYSTEM_APERTURE to cover both VRAM and
- * AGP so that GPU can catch out of VRAM/AGP access
- */
- if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
- /* Enough place before */
- rdev->mc.vram_location = rdev->mc.gtt_location -
- rdev->mc.mc_vram_size;
- } else if (tmp > rdev->mc.mc_vram_size) {
- /* Enough place after */
- rdev->mc.vram_location = rdev->mc.gtt_location +
- rdev->mc.gtt_size;
- } else {
- /* Try to setup VRAM then AGP might not
- * not work on some card
- */
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- }
- } else {
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
- 0xFFFF) << 24;
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
- /* Enough place after vram */
- rdev->mc.gtt_location = tmp;
- } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
- /* Enough place before vram */
- rdev->mc.gtt_location = 0;
- } else {
- /* Not enough place after or before shrink
- * gart size
- */
- if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
- rdev->mc.gtt_location = 0;
- rdev->mc.gtt_size = rdev->mc.vram_location;
- } else {
- rdev->mc.gtt_location = tmp;
- rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
- }
- }
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
- rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ r600_vram_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-
if (rdev->flags & RADEON_IS_IGP)
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
-
return 0;
}
@@ -981,6 +981,9 @@ void r600_gpu_init(struct radeon_device *rdev)
{
u32 tiling_config;
u32 ramcfg;
+ u32 backend_map;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
u32 tmp;
int i, j;
u32 sq_config;
@@ -1090,8 +1093,11 @@ void r600_gpu_init(struct radeon_device *rdev)
default:
break;
}
+ rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
+ rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE(0);
+ rdev->config.r600.tiling_group_size = 256;
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
@@ -1101,24 +1107,33 @@ void r600_gpu_init(struct radeon_device *rdev)
tiling_config |= SAMPLE_SPLIT(tmp);
}
tiling_config |= BANK_SWAPS(1);
- tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
- rdev->config.r600.max_backends,
- (0xff << rdev->config.r600.max_backends) & 0xff);
- tiling_config |= BACKEND_MAP(tmp);
+
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
+ INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
+ cc_gc_shader_pipe_config |=
+ INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
+
+ backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
+ (R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R6XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+
+ tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
- tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
- WREG32(CC_RB_BACKEND_DISABLE, tmp);
-
/* Setup pipes */
- tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
- tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
+ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
+ tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -1783,12 +1798,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
/* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
+ radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
+ /* wait for 3D idle clean */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
- radeon_ring_write(rdev, 1);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(rdev, RB_INT_STAT);
@@ -2745,6 +2765,7 @@ restart_ih:
case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0);
+ wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -2765,6 +2786,7 @@ restart_ih:
case 0: /* D2 vblank */
if (disp_int & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1);
+ wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
OpenPOWER on IntegriCloud