diff options
Diffstat (limited to 'drivers/gpu/drm/radeon')
34 files changed, 3066 insertions, 214 deletions
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 3c91312dea9a..84b1f2729d43 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -33,6 +33,9 @@ $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable $(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable $(call if_changed,mkregtable) +$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable + $(call if_changed,mkregtable) + $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h $(obj)/r200.o: $(obj)/r200_reg_safe.h @@ -47,6 +50,8 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h +$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h + radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ radeon_irq.o r300_cmdbuf.o r600_cp.o # add KMS driver @@ -60,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ - evergreen.o + evergreen.o evergreen_cs.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 2ebcb979dd7e..1bc72c3190a9 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -3780,7 +3780,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT UCHAR ucReserved[2]; }ATOM_ASIC_SS_ASSIGNMENT; -//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type. +//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type. //SS is not required or enabled if a match is not found. #define ASIC_INTERNAL_MEMORY_SS 1 #define ASIC_INTERNAL_ENGINE_SS 2 @@ -5967,7 +5967,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO UCHAR ucPadding; // For proper alignment and size. USHORT usVDDC; // For the 780, use: None, Low, High, Variable UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} - UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement. + UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requirement. USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). ULONG ulFlags; } ATOM_PPLIB_RS780_CLOCK_INFO; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index f3f2827017ef..8c2d6478a221 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -498,7 +498,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if ((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) - pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV | + pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ RADEON_PLL_PREFER_CLOSEST_LOWER); if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index a4745e49ecf1..057192acdd36 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -41,7 +41,18 @@ void evergreen_fini(struct radeon_device *rdev); void evergreen_pm_misc(struct radeon_device *rdev) { - + int req_ps_idx = rdev->pm.requested_power_state_index; + int req_cm_idx = rdev->pm.requested_clock_mode_index; + struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; + struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; + + if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { + if (voltage->voltage != rdev->pm.current_vddc) { + radeon_atom_set_voltage(rdev, voltage->voltage); + rdev->pm.current_vddc = voltage->voltage; + DRM_DEBUG("Setting: v: %d\n", voltage->voltage); + } + } } void evergreen_pm_prepare(struct radeon_device *rdev) @@ -596,7 +607,7 @@ static void evergreen_mc_program(struct radeon_device *rdev) WREG32(MC_VM_FB_LOCATION, tmp); WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); WREG32(HDP_NONSURFACE_INFO, (2 << 7)); - WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); + WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); if (rdev->flags & RADEON_IS_AGP) { WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); @@ -1211,11 +1222,11 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ps_thread_count = 128; sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); - sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; - sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; - sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; - sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; - sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; + sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); + sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); + sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); + sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); + sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); @@ -1249,6 +1260,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(VGT_GS_VERTEX_REUSE, 16); WREG32(PA_SC_LINE_STIPPLE_STATE, 0); + WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); + WREG32(VGT_OUT_DEALLOC_CNTL, 16); + WREG32(CB_PERF_CTR0_SEL_0, 0); WREG32(CB_PERF_CTR0_SEL_1, 0); WREG32(CB_PERF_CTR1_SEL_0, 0); @@ -1258,6 +1272,26 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(CB_PERF_CTR3_SEL_0, 0); WREG32(CB_PERF_CTR3_SEL_1, 0); + /* clear render buffer base addresses */ + WREG32(CB_COLOR0_BASE, 0); + WREG32(CB_COLOR1_BASE, 0); + WREG32(CB_COLOR2_BASE, 0); + WREG32(CB_COLOR3_BASE, 0); + WREG32(CB_COLOR4_BASE, 0); + WREG32(CB_COLOR5_BASE, 0); + WREG32(CB_COLOR6_BASE, 0); + WREG32(CB_COLOR7_BASE, 0); + WREG32(CB_COLOR8_BASE, 0); + WREG32(CB_COLOR9_BASE, 0); + WREG32(CB_COLOR10_BASE, 0); + WREG32(CB_COLOR11_BASE, 0); + + /* set the shader const cache sizes to 0 */ + for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4) + WREG32(i, 0); + for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) + WREG32(i, 0); + hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); @@ -2148,7 +2182,7 @@ int evergreen_init(struct radeon_device *rdev) if (r) return r; - rdev->accel_working = false; + rdev->accel_working = true; r = evergreen_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c new file mode 100644 index 000000000000..010963d4570f --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -0,0 +1,1356 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#include "drmP.h" +#include "radeon.h" +#include "evergreend.h" +#include "evergreen_reg_safe.h" + +static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, + struct radeon_cs_reloc **cs_reloc); + +struct evergreen_cs_track { + u32 group_size; + u32 nbanks; + u32 npipes; + /* value we track */ + u32 nsamples; + u32 cb_color_base_last[12]; + struct radeon_bo *cb_color_bo[12]; + u32 cb_color_bo_offset[12]; + struct radeon_bo *cb_color_fmask_bo[8]; + struct radeon_bo *cb_color_cmask_bo[8]; + u32 cb_color_info[12]; + u32 cb_color_view[12]; + u32 cb_color_pitch_idx[12]; + u32 cb_color_slice_idx[12]; + u32 cb_color_dim_idx[12]; + u32 cb_color_dim[12]; + u32 cb_color_pitch[12]; + u32 cb_color_slice[12]; + u32 cb_color_cmask_slice[8]; + u32 cb_color_fmask_slice[8]; + u32 cb_target_mask; + u32 cb_shader_mask; + u32 vgt_strmout_config; + u32 vgt_strmout_buffer_config; + u32 db_depth_control; + u32 db_depth_view; + u32 db_depth_size; + u32 db_depth_size_idx; + u32 db_z_info; + u32 db_z_idx; + u32 db_z_read_offset; + u32 db_z_write_offset; + struct radeon_bo *db_z_read_bo; + struct radeon_bo *db_z_write_bo; + u32 db_s_info; + u32 db_s_idx; + u32 db_s_read_offset; + u32 db_s_write_offset; + struct radeon_bo *db_s_read_bo; + struct radeon_bo *db_s_write_bo; +}; + +static void evergreen_cs_track_init(struct evergreen_cs_track *track) +{ + int i; + + for (i = 0; i < 8; i++) { + track->cb_color_fmask_bo[i] = NULL; + track->cb_color_cmask_bo[i] = NULL; + track->cb_color_cmask_slice[i] = 0; + track->cb_color_fmask_slice[i] = 0; + } + + for (i = 0; i < 12; i++) { + track->cb_color_base_last[i] = 0; + track->cb_color_bo[i] = NULL; + track->cb_color_bo_offset[i] = 0xFFFFFFFF; + track->cb_color_info[i] = 0; + track->cb_color_view[i] = 0; + track->cb_color_pitch_idx[i] = 0; + track->cb_color_slice_idx[i] = 0; + track->cb_color_dim[i] = 0; + track->cb_color_pitch[i] = 0; + track->cb_color_slice[i] = 0; + track->cb_color_dim[i] = 0; + } + track->cb_target_mask = 0xFFFFFFFF; + track->cb_shader_mask = 0xFFFFFFFF; + + track->db_depth_view = 0xFFFFC000; + track->db_depth_size = 0xFFFFFFFF; + track->db_depth_size_idx = 0; + track->db_depth_control = 0xFFFFFFFF; + track->db_z_info = 0xFFFFFFFF; + track->db_z_idx = 0xFFFFFFFF; + track->db_z_read_offset = 0xFFFFFFFF; + track->db_z_write_offset = 0xFFFFFFFF; + track->db_z_read_bo = NULL; + track->db_z_write_bo = NULL; + track->db_s_info = 0xFFFFFFFF; + track->db_s_idx = 0xFFFFFFFF; + track->db_s_read_offset = 0xFFFFFFFF; + track->db_s_write_offset = 0xFFFFFFFF; + track->db_s_read_bo = NULL; + track->db_s_write_bo = NULL; +} + +static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i) +{ + /* XXX fill in */ + return 0; +} + +static int evergreen_cs_track_check(struct radeon_cs_parser *p) +{ + struct evergreen_cs_track *track = p->track; + + /* we don't support stream out buffer yet */ + if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) { + dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n"); + return -EINVAL; + } + + /* XXX fill in */ + return 0; +} + +/** + * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet + * @parser: parser structure holding parsing context. + * @pkt: where to store packet informations + * + * Assume that chunk_ib_index is properly set. Will return -EINVAL + * if packet is bigger than remaining ib size. or if packets is unknown. + **/ +int evergreen_cs_packet_parse(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + unsigned idx) +{ + struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; + uint32_t header; + + if (idx >= ib_chunk->length_dw) { + DRM_ERROR("Can not parse packet at %d after CS end %d !\n", + idx, ib_chunk->length_dw); + return -EINVAL; + } + header = radeon_get_ib_value(p, idx); + pkt->idx = idx; + pkt->type = CP_PACKET_GET_TYPE(header); + pkt->count = CP_PACKET_GET_COUNT(header); + pkt->one_reg_wr = 0; + switch (pkt->type) { + case PACKET_TYPE0: + pkt->reg = CP_PACKET0_GET_REG(header); + break; + case PACKET_TYPE3: + pkt->opcode = CP_PACKET3_GET_OPCODE(header); + break; + case PACKET_TYPE2: + pkt->count = -1; + break; + default: + DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); + return -EINVAL; + } + if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { + DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", + pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); + return -EINVAL; + } + return 0; +} + +/** + * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3 + * @parser: parser structure holding parsing context. + * @data: pointer to relocation data + * @offset_start: starting offset + * @offset_mask: offset mask (to align start offset on) + * @reloc: reloc informations + * + * Check next packet is relocation packet3, do bo validation and compute + * GPU offset using the provided start. + **/ +static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, + struct radeon_cs_reloc **cs_reloc) +{ + struct radeon_cs_chunk *relocs_chunk; + struct radeon_cs_packet p3reloc; + unsigned idx; + int r; + + if (p->chunk_relocs_idx == -1) { + DRM_ERROR("No relocation chunk !\n"); + return -EINVAL; + } + *cs_reloc = NULL; + relocs_chunk = &p->chunks[p->chunk_relocs_idx]; + r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); + if (r) { + return r; + } + p->idx += p3reloc.count + 2; + if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { + DRM_ERROR("No packet3 for relocation for packet at %d.\n", + p3reloc.idx); + return -EINVAL; + } + idx = radeon_get_ib_value(p, p3reloc.idx + 1); + if (idx >= relocs_chunk->length_dw) { + DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", + idx, relocs_chunk->length_dw); + return -EINVAL; + } + /* FIXME: we assume reloc size is 4 dwords */ + *cs_reloc = p->relocs_ptr[(idx / 4)]; + return 0; +} + +/** + * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc + * @parser: parser structure holding parsing context. + * + * Check next packet is relocation packet3, do bo validation and compute + * GPU offset using the provided start. + **/ +static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) +{ + struct radeon_cs_packet p3reloc; + int r; + + r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); + if (r) { + return 0; + } + if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { + return 0; + } + return 1; +} + +/** + * evergreen_cs_packet_next_vline() - parse userspace VLINE packet + * @parser: parser structure holding parsing context. + * + * Userspace sends a special sequence for VLINE waits. + * PACKET0 - VLINE_START_END + value + * PACKET3 - WAIT_REG_MEM poll vline status reg + * RELOC (P3) - crtc_id in reloc. + * + * This function parses this and relocates the VLINE START END + * and WAIT_REG_MEM packets to the correct crtc. + * It also detects a switched off crtc and nulls out the + * wait in that case. + */ +static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) +{ + struct drm_mode_object *obj; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + struct radeon_cs_packet p3reloc, wait_reg_mem; + int crtc_id; + int r; + uint32_t header, h_idx, reg, wait_reg_mem_info; + volatile uint32_t *ib; + + ib = p->ib->ptr; + + /* parse the WAIT_REG_MEM */ + r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx); + if (r) + return r; + + /* check its a WAIT_REG_MEM */ + if (wait_reg_mem.type != PACKET_TYPE3 || + wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { + DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); + r = -EINVAL; + return r; + } + + wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); + /* bit 4 is reg (0) or mem (1) */ + if (wait_reg_mem_info & 0x10) { + DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); + r = -EINVAL; + return r; + } + /* waiting for value to be equal */ + if ((wait_reg_mem_info & 0x7) != 0x3) { + DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); + r = -EINVAL; + return r; + } + if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) { + DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); + r = -EINVAL; + return r; + } + + if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) { + DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); + r = -EINVAL; + return r; + } + + /* jump over the NOP */ + r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); + if (r) + return r; + + h_idx = p->idx - 2; + p->idx += wait_reg_mem.count + 2; + p->idx += p3reloc.count + 2; + + header = radeon_get_ib_value(p, h_idx); + crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); + reg = CP_PACKET0_GET_REG(header); + mutex_lock(&p->rdev->ddev->mode_config.mutex); + obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); + if (!obj) { + DRM_ERROR("cannot find crtc %d\n", crtc_id); + r = -EINVAL; + goto out; + } + crtc = obj_to_crtc(obj); + radeon_crtc = to_radeon_crtc(crtc); + crtc_id = radeon_crtc->crtc_id; + + if (!crtc->enabled) { + /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ + ib[h_idx + 2] = PACKET2(0); + ib[h_idx + 3] = PACKET2(0); + ib[h_idx + 4] = PACKET2(0); + ib[h_idx + 5] = PACKET2(0); + ib[h_idx + 6] = PACKET2(0); + ib[h_idx + 7] = PACKET2(0); + ib[h_idx + 8] = PACKET2(0); + } else { + switch (reg) { + case EVERGREEN_VLINE_START_END: + header &= ~R600_CP_PACKET0_REG_MASK; + header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2; + ib[h_idx] = header; + ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2; + break; + default: + DRM_ERROR("unknown crtc reloc\n"); + r = -EINVAL; + goto out; + } + } +out: + mutex_unlock(&p->rdev->ddev->mode_config.mutex); + return r; +} + +static int evergreen_packet0_check(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + unsigned idx, unsigned reg) +{ + int r; + + switch (reg) { + case EVERGREEN_VLINE_START_END: + r = evergreen_cs_packet_parse_vline(p); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + return r; + } + break; + default: + printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", + reg, idx); + return -EINVAL; + } + return 0; +} + +static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt) +{ + unsigned reg, i; + unsigned idx; + int r; + + idx = pkt->idx + 1; + reg = pkt->reg; + for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { + r = evergreen_packet0_check(p, pkt, idx, reg); + if (r) { + return r; + } + } + return 0; +} + +/** + * evergreen_cs_check_reg() - check if register is authorized or not + * @parser: parser structure holding parsing context + * @reg: register we are testing + * @idx: index into the cs buffer + * + * This function will test against evergreen_reg_safe_bm and return 0 + * if register is safe. If register is not flag as safe this function + * will test it against a list of register needind special handling. + */ +static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) +{ + struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; + struct radeon_cs_reloc *reloc; + u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); + u32 m, i, tmp, *ib; + int r; + + i = (reg >> 7); + if (i > last_reg) { + dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); + return -EINVAL; + } + m = 1 << ((reg >> 2) & 31); + if (!(evergreen_reg_safe_bm[i] & m)) + return 0; + ib = p->ib->ptr; + switch (reg) { + /* force following reg to 0 in an attemp to disable out buffer + * which will need us to better understand how it works to perform + * security check on it (Jerome) + */ + case SQ_ESGS_RING_SIZE: + case SQ_GSVS_RING_SIZE: + case SQ_ESTMP_RING_SIZE: + case SQ_GSTMP_RING_SIZE: + case SQ_HSTMP_RING_SIZE: + case SQ_LSTMP_RING_SIZE: + case SQ_PSTMP_RING_SIZE: + case SQ_VSTMP_RING_SIZE: + case SQ_ESGS_RING_ITEMSIZE: + case SQ_ESTMP_RING_ITEMSIZE: + case SQ_GSTMP_RING_ITEMSIZE: + case SQ_GSVS_RING_ITEMSIZE: + case SQ_GS_VERT_ITEMSIZE: + case SQ_GS_VERT_ITEMSIZE_1: + case SQ_GS_VERT_ITEMSIZE_2: + case SQ_GS_VERT_ITEMSIZE_3: + case SQ_GSVS_RING_OFFSET_1: + case SQ_GSVS_RING_OFFSET_2: + case SQ_GSVS_RING_OFFSET_3: + case SQ_HSTMP_RING_ITEMSIZE: + case SQ_LSTMP_RING_ITEMSIZE: + case SQ_PSTMP_RING_ITEMSIZE: + case SQ_VSTMP_RING_ITEMSIZE: + case VGT_TF_RING_SIZE: + /* get value to populate the IB don't remove */ + tmp =radeon_get_ib_value(p, idx); + ib[idx] = 0; + break; + case DB_DEPTH_CONTROL: + track->db_depth_control = radeon_get_ib_value(p, idx); + break; + case DB_Z_INFO: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + track->db_z_info = radeon_get_ib_value(p, idx); + ib[idx] &= ~Z_ARRAY_MODE(0xf); + track->db_z_info &= ~Z_ARRAY_MODE(0xf); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + } else { + ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + } + break; + case DB_STENCIL_INFO: + track->db_s_info = radeon_get_ib_value(p, idx); + break; + case DB_DEPTH_VIEW: + track->db_depth_view = radeon_get_ib_value(p, idx); + break; + case DB_DEPTH_SIZE: + track->db_depth_size = radeon_get_ib_value(p, idx); + track->db_depth_size_idx = idx; + break; + case DB_Z_READ_BASE: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + track->db_z_read_offset = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->db_z_read_bo = reloc->robj; + break; + case DB_Z_WRITE_BASE: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + track->db_z_write_offset = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->db_z_write_bo = reloc->robj; + break; + case DB_STENCIL_READ_BASE: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + track->db_s_read_offset = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->db_s_read_bo = reloc->robj; + break; + case DB_STENCIL_WRITE_BASE: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + track->db_s_write_offset = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->db_s_write_bo = reloc->robj; + break; + case VGT_STRMOUT_CONFIG: + track->vgt_strmout_config = radeon_get_ib_value(p, idx); + break; + case VGT_STRMOUT_BUFFER_CONFIG: + track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx); + break; + case CB_TARGET_MASK: + track->cb_target_mask = radeon_get_ib_value(p, idx); + break; + case CB_SHADER_MASK: + track->cb_shader_mask = radeon_get_ib_value(p, idx); + break; + case PA_SC_AA_CONFIG: + tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK; + track->nsamples = 1 << tmp; + break; + case CB_COLOR0_VIEW: + case CB_COLOR1_VIEW: + case CB_COLOR2_VIEW: + case CB_COLOR3_VIEW: + case CB_COLOR4_VIEW: + case CB_COLOR5_VIEW: + case CB_COLOR6_VIEW: + case CB_COLOR7_VIEW: + tmp = (reg - CB_COLOR0_VIEW) / 0x3c; + track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); + break; + case CB_COLOR8_VIEW: + case CB_COLOR9_VIEW: + case CB_COLOR10_VIEW: + case CB_COLOR11_VIEW: + tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8; + track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); + break; + case CB_COLOR0_INFO: + case CB_COLOR1_INFO: + case CB_COLOR2_INFO: + case CB_COLOR3_INFO: + case CB_COLOR4_INFO: + case CB_COLOR5_INFO: + case CB_COLOR6_INFO: + case CB_COLOR7_INFO: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + tmp = (reg - CB_COLOR0_INFO) / 0x3c; + track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + } + break; + case CB_COLOR8_INFO: + case CB_COLOR9_INFO: + case CB_COLOR10_INFO: + case CB_COLOR11_INFO: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; + track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + } + break; + case CB_COLOR0_PITCH: + case CB_COLOR1_PITCH: + case CB_COLOR2_PITCH: + case CB_COLOR3_PITCH: + case CB_COLOR4_PITCH: + case CB_COLOR5_PITCH: + case CB_COLOR6_PITCH: + case CB_COLOR7_PITCH: + tmp = (reg - CB_COLOR0_PITCH) / 0x3c; + track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_pitch_idx[tmp] = idx; + break; + case CB_COLOR8_PITCH: + case CB_COLOR9_PITCH: + case CB_COLOR10_PITCH: + case CB_COLOR11_PITCH: + tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8; + track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_pitch_idx[tmp] = idx; + break; + case CB_COLOR0_SLICE: + case CB_COLOR1_SLICE: + case CB_COLOR2_SLICE: + case CB_COLOR3_SLICE: + case CB_COLOR4_SLICE: + case CB_COLOR5_SLICE: + case CB_COLOR6_SLICE: + case CB_COLOR7_SLICE: + tmp = (reg - CB_COLOR0_SLICE) / 0x3c; + track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_slice_idx[tmp] = idx; + break; + case CB_COLOR8_SLICE: + case CB_COLOR9_SLICE: + case CB_COLOR10_SLICE: + case CB_COLOR11_SLICE: + tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; + track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_slice_idx[tmp] = idx; + break; + case CB_COLOR0_ATTRIB: + case CB_COLOR1_ATTRIB: + case CB_COLOR2_ATTRIB: + case CB_COLOR3_ATTRIB: + case CB_COLOR4_ATTRIB: + case CB_COLOR5_ATTRIB: + case CB_COLOR6_ATTRIB: + case CB_COLOR7_ATTRIB: + case CB_COLOR8_ATTRIB: + case CB_COLOR9_ATTRIB: + case CB_COLOR10_ATTRIB: + case CB_COLOR11_ATTRIB: + break; + case CB_COLOR0_DIM: + case CB_COLOR1_DIM: + case CB_COLOR2_DIM: + case CB_COLOR3_DIM: + case CB_COLOR4_DIM: + case CB_COLOR5_DIM: + case CB_COLOR6_DIM: + case CB_COLOR7_DIM: + tmp = (reg - CB_COLOR0_DIM) / 0x3c; + track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_dim_idx[tmp] = idx; + break; + case CB_COLOR8_DIM: + case CB_COLOR9_DIM: + case CB_COLOR10_DIM: + case CB_COLOR11_DIM: + tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8; + track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_dim_idx[tmp] = idx; + break; + case CB_COLOR0_FMASK: + case CB_COLOR1_FMASK: + case CB_COLOR2_FMASK: + case CB_COLOR3_FMASK: + case CB_COLOR4_FMASK: + case CB_COLOR5_FMASK: + case CB_COLOR6_FMASK: + case CB_COLOR7_FMASK: + tmp = (reg - CB_COLOR0_FMASK) / 0x3c; + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->cb_color_fmask_bo[tmp] = reloc->robj; + break; + case CB_COLOR0_CMASK: + case CB_COLOR1_CMASK: + case CB_COLOR2_CMASK: + case CB_COLOR3_CMASK: + case CB_COLOR4_CMASK: + case CB_COLOR5_CMASK: + case CB_COLOR6_CMASK: + case CB_COLOR7_CMASK: + tmp = (reg - CB_COLOR0_CMASK) / 0x3c; + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->cb_color_cmask_bo[tmp] = reloc->robj; + break; + case CB_COLOR0_FMASK_SLICE: + case CB_COLOR1_FMASK_SLICE: + case CB_COLOR2_FMASK_SLICE: + case CB_COLOR3_FMASK_SLICE: + case CB_COLOR4_FMASK_SLICE: + case CB_COLOR5_FMASK_SLICE: + case CB_COLOR6_FMASK_SLICE: + case CB_COLOR7_FMASK_SLICE: + tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c; + track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx); + break; + case CB_COLOR0_CMASK_SLICE: + case CB_COLOR1_CMASK_SLICE: + case CB_COLOR2_CMASK_SLICE: + case CB_COLOR3_CMASK_SLICE: + case CB_COLOR4_CMASK_SLICE: + case CB_COLOR5_CMASK_SLICE: + case CB_COLOR6_CMASK_SLICE: + case CB_COLOR7_CMASK_SLICE: + tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c; + track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx); + break; + case CB_COLOR0_BASE: + case CB_COLOR1_BASE: + case CB_COLOR2_BASE: + case CB_COLOR3_BASE: + case CB_COLOR4_BASE: + case CB_COLOR5_BASE: + case CB_COLOR6_BASE: + case CB_COLOR7_BASE: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + tmp = (reg - CB_COLOR0_BASE) / 0x3c; + track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->cb_color_base_last[tmp] = ib[idx]; + track->cb_color_bo[tmp] = reloc->robj; + break; + case CB_COLOR8_BASE: + case CB_COLOR9_BASE: + case CB_COLOR10_BASE: + case CB_COLOR11_BASE: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; + track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->cb_color_base_last[tmp] = ib[idx]; + track->cb_color_bo[tmp] = reloc->robj; + break; + case CB_IMMED0_BASE: + case CB_IMMED1_BASE: + case CB_IMMED2_BASE: + case CB_IMMED3_BASE: + case CB_IMMED4_BASE: + case CB_IMMED5_BASE: + case CB_IMMED6_BASE: + case CB_IMMED7_BASE: + case CB_IMMED8_BASE: + case CB_IMMED9_BASE: + case CB_IMMED10_BASE: + case CB_IMMED11_BASE: + case DB_HTILE_DATA_BASE: + case SQ_PGM_START_FS: + case SQ_PGM_START_ES: + case SQ_PGM_START_VS: + case SQ_PGM_START_GS: + case SQ_PGM_START_PS: + case SQ_PGM_START_HS: + case SQ_PGM_START_LS: + case GDS_ADDR_BASE: + case SQ_CONST_MEM_BASE: + case SQ_ALU_CONST_CACHE_GS_0: + case SQ_ALU_CONST_CACHE_GS_1: + case SQ_ALU_CONST_CACHE_GS_2: + case SQ_ALU_CONST_CACHE_GS_3: + case SQ_ALU_CONST_CACHE_GS_4: + case SQ_ALU_CONST_CACHE_GS_5: + case SQ_ALU_CONST_CACHE_GS_6: + case SQ_ALU_CONST_CACHE_GS_7: + case SQ_ALU_CONST_CACHE_GS_8: + case SQ_ALU_CONST_CACHE_GS_9: + case SQ_ALU_CONST_CACHE_GS_10: + case SQ_ALU_CONST_CACHE_GS_11: + case SQ_ALU_CONST_CACHE_GS_12: + case SQ_ALU_CONST_CACHE_GS_13: + case SQ_ALU_CONST_CACHE_GS_14: + case SQ_ALU_CONST_CACHE_GS_15: + case SQ_ALU_CONST_CACHE_PS_0: + case SQ_ALU_CONST_CACHE_PS_1: + case SQ_ALU_CONST_CACHE_PS_2: + case SQ_ALU_CONST_CACHE_PS_3: + case SQ_ALU_CONST_CACHE_PS_4: + case SQ_ALU_CONST_CACHE_PS_5: + case SQ_ALU_CONST_CACHE_PS_6: + case SQ_ALU_CONST_CACHE_PS_7: + case SQ_ALU_CONST_CACHE_PS_8: + case SQ_ALU_CONST_CACHE_PS_9: + case SQ_ALU_CONST_CACHE_PS_10: + case SQ_ALU_CONST_CACHE_PS_11: + case SQ_ALU_CONST_CACHE_PS_12: + case SQ_ALU_CONST_CACHE_PS_13: + case SQ_ALU_CONST_CACHE_PS_14: + case SQ_ALU_CONST_CACHE_PS_15: + case SQ_ALU_CONST_CACHE_VS_0: + case SQ_ALU_CONST_CACHE_VS_1: + case SQ_ALU_CONST_CACHE_VS_2: + case SQ_ALU_CONST_CACHE_VS_3: + case SQ_ALU_CONST_CACHE_VS_4: + case SQ_ALU_CONST_CACHE_VS_5: + case SQ_ALU_CONST_CACHE_VS_6: + case SQ_ALU_CONST_CACHE_VS_7: + case SQ_ALU_CONST_CACHE_VS_8: + case SQ_ALU_CONST_CACHE_VS_9: + case SQ_ALU_CONST_CACHE_VS_10: + case SQ_ALU_CONST_CACHE_VS_11: + case SQ_ALU_CONST_CACHE_VS_12: + case SQ_ALU_CONST_CACHE_VS_13: + case SQ_ALU_CONST_CACHE_VS_14: + case SQ_ALU_CONST_CACHE_VS_15: + case SQ_ALU_CONST_CACHE_HS_0: + case SQ_ALU_CONST_CACHE_HS_1: + case SQ_ALU_CONST_CACHE_HS_2: + case SQ_ALU_CONST_CACHE_HS_3: + case SQ_ALU_CONST_CACHE_HS_4: + case SQ_ALU_CONST_CACHE_HS_5: + case SQ_ALU_CONST_CACHE_HS_6: + case SQ_ALU_CONST_CACHE_HS_7: + case SQ_ALU_CONST_CACHE_HS_8: + case SQ_ALU_CONST_CACHE_HS_9: + case SQ_ALU_CONST_CACHE_HS_10: + case SQ_ALU_CONST_CACHE_HS_11: + case SQ_ALU_CONST_CACHE_HS_12: + case SQ_ALU_CONST_CACHE_HS_13: + case SQ_ALU_CONST_CACHE_HS_14: + case SQ_ALU_CONST_CACHE_HS_15: + case SQ_ALU_CONST_CACHE_LS_0: + case SQ_ALU_CONST_CACHE_LS_1: + case SQ_ALU_CONST_CACHE_LS_2: + case SQ_ALU_CONST_CACHE_LS_3: + case SQ_ALU_CONST_CACHE_LS_4: + case SQ_ALU_CONST_CACHE_LS_5: + case SQ_ALU_CONST_CACHE_LS_6: + case SQ_ALU_CONST_CACHE_LS_7: + case SQ_ALU_CONST_CACHE_LS_8: + case SQ_ALU_CONST_CACHE_LS_9: + case SQ_ALU_CONST_CACHE_LS_10: + case SQ_ALU_CONST_CACHE_LS_11: + case SQ_ALU_CONST_CACHE_LS_12: + case SQ_ALU_CONST_CACHE_LS_13: + case SQ_ALU_CONST_CACHE_LS_14: + case SQ_ALU_CONST_CACHE_LS_15: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + break; + default: + dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); + return -EINVAL; + } + return 0; +} + +/** + * evergreen_check_texture_resource() - check if register is authorized or not + * @p: parser structure holding parsing context + * @idx: index into the cs buffer + * @texture: texture's bo structure + * @mipmap: mipmap's bo structure + * + * This function will check that the resource has valid field and that + * the texture and mipmap bo object are big enough to cover this resource. + */ +static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx, + struct radeon_bo *texture, + struct radeon_bo *mipmap) +{ + /* XXX fill in */ + return 0; +} + +static int evergreen_packet3_check(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt) +{ + struct radeon_cs_reloc *reloc; + struct evergreen_cs_track *track; + volatile u32 *ib; + unsigned idx; + unsigned i; + unsigned start_reg, end_reg, reg; + int r; + u32 idx_value; + + track = (struct evergreen_cs_track *)p->track; + ib = p->ib->ptr; + idx = pkt->idx + 1; + idx_value = radeon_get_ib_value(p, idx); + + switch (pkt->opcode) { + case PACKET3_CONTEXT_CONTROL: + if (pkt->count != 1) { + DRM_ERROR("bad CONTEXT_CONTROL\n"); + return -EINVAL; + } + break; + case PACKET3_INDEX_TYPE: + case PACKET3_NUM_INSTANCES: + case PACKET3_CLEAR_STATE: + if (pkt->count) { + DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); + return -EINVAL; + } + break; + case PACKET3_INDEX_BASE: + if (pkt->count != 1) { + DRM_ERROR("bad INDEX_BASE\n"); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad INDEX_BASE\n"); + return -EINVAL; + } + ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; + case PACKET3_DRAW_INDEX: + if (pkt->count != 3) { + DRM_ERROR("bad DRAW_INDEX\n"); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad DRAW_INDEX\n"); + return -EINVAL; + } + ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; + case PACKET3_DRAW_INDEX_2: + if (pkt->count != 4) { + DRM_ERROR("bad DRAW_INDEX_2\n"); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad DRAW_INDEX_2\n"); + return -EINVAL; + } + ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; + case PACKET3_DRAW_INDEX_AUTO: + if (pkt->count != 1) { + DRM_ERROR("bad DRAW_INDEX_AUTO\n"); + return -EINVAL; + } + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + return r; + } + break; + case PACKET3_DRAW_INDEX_MULTI_AUTO: + if (pkt->count != 2) { + DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n"); + return -EINVAL; + } + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + return r; + } + break; + case PACKET3_DRAW_INDEX_IMMD: + if (pkt->count < 2) { + DRM_ERROR("bad DRAW_INDEX_IMMD\n"); + return -EINVAL; + } + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; + case PACKET3_DRAW_INDEX_OFFSET: + if (pkt->count != 2) { + DRM_ERROR("bad DRAW_INDEX_OFFSET\n"); + return -EINVAL; + } + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; + case PACKET3_DRAW_INDEX_OFFSET_2: + if (pkt->count != 3) { + DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n"); + return -EINVAL; + } + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; + case PACKET3_WAIT_REG_MEM: + if (pkt->count != 5) { + DRM_ERROR("bad WAIT_REG_MEM\n"); + return -EINVAL; + } + /* bit 4 is reg (0) or mem (1) */ + if (idx_value & 0x10) { + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad WAIT_REG_MEM\n"); + return -EINVAL; + } + ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + } + break; + case PACKET3_SURFACE_SYNC: + if (pkt->count != 3) { + DRM_ERROR("bad SURFACE_SYNC\n"); + return -EINVAL; + } + /* 0xffffffff/0x0 is flush all cache flag */ + if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || + radeon_get_ib_value(p, idx + 2) != 0) { + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad SURFACE_SYNC\n"); + return -EINVAL; + } + ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + } + break; + case PACKET3_EVENT_WRITE: + if (pkt->count != 2 && pkt->count != 0) { + DRM_ERROR("bad EVENT_WRITE\n"); + return -EINVAL; + } + if (pkt->count) { + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad EVENT_WRITE\n"); + return -EINVAL; + } + ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + } + break; + case PACKET3_EVENT_WRITE_EOP: + if (pkt->count != 4) { + DRM_ERROR("bad EVENT_WRITE_EOP\n"); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad EVENT_WRITE_EOP\n"); + return -EINVAL; + } + ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + break; + case PACKET3_EVENT_WRITE_EOS: + if (pkt->count != 3) { + DRM_ERROR("bad EVENT_WRITE_EOS\n"); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad EVENT_WRITE_EOS\n"); + return -EINVAL; + } + ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + break; + case PACKET3_SET_CONFIG_REG: + start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_CONFIG_REG_START) || + (start_reg >= PACKET3_SET_CONFIG_REG_END) || + (end_reg >= PACKET3_SET_CONFIG_REG_END)) { + DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); + return -EINVAL; + } + for (i = 0; i < pkt->count; i++) { + reg = start_reg + (4 * i); + r = evergreen_cs_check_reg(p, reg, idx+1+i); + if (r) + return r; + } + break; + case PACKET3_SET_CONTEXT_REG: + start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_CONTEXT_REG_START) || + (start_reg >= PACKET3_SET_CONTEXT_REG_END) || + (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { + DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); + return -EINVAL; + } + for (i = 0; i < pkt->count; i++) { + reg = start_reg + (4 * i); + r = evergreen_cs_check_reg(p, reg, idx+1+i); + if (r) + return r; + } + break; + case PACKET3_SET_RESOURCE: + if (pkt->count % 8) { + DRM_ERROR("bad SET_RESOURCE\n"); + return -EINVAL; + } + start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_RESOURCE_START) || + (start_reg >= PACKET3_SET_RESOURCE_END) || + (end_reg >= PACKET3_SET_RESOURCE_END)) { + DRM_ERROR("bad SET_RESOURCE\n"); + return -EINVAL; + } + for (i = 0; i < (pkt->count / 8); i++) { + struct radeon_bo *texture, *mipmap; + u32 size, offset; + + switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { + case SQ_TEX_VTX_VALID_TEXTURE: + /* tex base */ + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad SET_RESOURCE (tex)\n"); + return -EINVAL; + } + ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + texture = reloc->robj; + /* tex mip base */ + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad SET_RESOURCE (tex)\n"); + return -EINVAL; + } + ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + mipmap = reloc->robj; + r = evergreen_check_texture_resource(p, idx+1+(i*8), + texture, mipmap); + if (r) + return r; + break; + case SQ_TEX_VTX_VALID_BUFFER: + /* vtx base */ + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad SET_RESOURCE (vtx)\n"); + return -EINVAL; + } + offset = radeon_get_ib_value(p, idx+1+(i*8)+0); + size = radeon_get_ib_value(p, idx+1+(i*8)+1); + if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { + /* force size to size of the buffer */ + dev_warn(p->dev, "vbo resource seems too big for the bo\n"); + ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj); + } + ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); + ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + break; + case SQ_TEX_VTX_INVALID_TEXTURE: + case SQ_TEX_VTX_INVALID_BUFFER: + default: + DRM_ERROR("bad SET_RESOURCE\n"); + return -EINVAL; + } + } + break; + case PACKET3_SET_ALU_CONST: + /* XXX fix me ALU const buffers only */ + break; + case PACKET3_SET_BOOL_CONST: + start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_BOOL_CONST_START) || + (start_reg >= PACKET3_SET_BOOL_CONST_END) || + (end_reg >= PACKET3_SET_BOOL_CONST_END)) { + DRM_ERROR("bad SET_BOOL_CONST\n"); + return -EINVAL; + } + break; + case PACKET3_SET_LOOP_CONST: + start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_LOOP_CONST_START) || + (start_reg >= PACKET3_SET_LOOP_CONST_END) || + (end_reg >= PACKET3_SET_LOOP_CONST_END)) { + DRM_ERROR("bad SET_LOOP_CONST\n"); + return -EINVAL; + } + break; + case PACKET3_SET_CTL_CONST: + start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_CTL_CONST_START) || + (start_reg >= PACKET3_SET_CTL_CONST_END) || + (end_reg >= PACKET3_SET_CTL_CONST_END)) { + DRM_ERROR("bad SET_CTL_CONST\n"); + return -EINVAL; + } + break; + case PACKET3_SET_SAMPLER: + if (pkt->count % 3) { + DRM_ERROR("bad SET_SAMPLER\n"); + return -EINVAL; + } + start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_SAMPLER_START) || + (start_reg >= PACKET3_SET_SAMPLER_END) || + (end_reg >= PACKET3_SET_SAMPLER_END)) { + DRM_ERROR("bad SET_SAMPLER\n"); + return -EINVAL; + } + break; + case PACKET3_NOP: + break; + default: + DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); + return -EINVAL; + } + return 0; +} + +int evergreen_cs_parse(struct radeon_cs_parser *p) +{ + struct radeon_cs_packet pkt; + struct evergreen_cs_track *track; + int r; + + if (p->track == NULL) { + /* initialize tracker, we are in kms */ + track = kzalloc(sizeof(*track), GFP_KERNEL); + if (track == NULL) + return -ENOMEM; + evergreen_cs_track_init(track); + track->npipes = p->rdev->config.evergreen.tiling_npipes; + track->nbanks = p->rdev->config.evergreen.tiling_nbanks; + track->group_size = p->rdev->config.evergreen.tiling_group_size; + p->track = track; + } + do { + r = evergreen_cs_packet_parse(p, &pkt, p->idx); + if (r) { + kfree(p->track); + p->track = NULL; + return r; + } + p->idx += pkt.count + 2; + switch (pkt.type) { + case PACKET_TYPE0: + r = evergreen_cs_parse_packet0(p, &pkt); + break; + case PACKET_TYPE2: + break; + case PACKET_TYPE3: + r = evergreen_packet3_check(p, &pkt); + break; + default: + DRM_ERROR("Unknown packet type %d !\n", pkt.type); + kfree(p->track); + p->track = NULL; + return -EINVAL; + } + if (r) { + kfree(p->track); + p->track = NULL; + return r; + } + } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); +#if 0 + for (r = 0; r < p->ib->length_dw; r++) { + printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]); + mdelay(1); + } +#endif + kfree(p->track); + p->track = NULL; + return 0; +} + diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index af86af836f13..e028c1cd9d9b 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -151,6 +151,9 @@ #define EVERGREEN_DATA_FORMAT 0x6b00 # define EVERGREEN_INTERLEAVE_EN (1 << 0) #define EVERGREEN_DESKTOP_HEIGHT 0x6b04 +#define EVERGREEN_VLINE_START_END 0x6b08 +#define EVERGREEN_VLINE_STATUS 0x6bb8 +# define EVERGREEN_VLINE_STAT (1 << 12) #define EVERGREEN_VIEWPORT_START 0x6d70 #define EVERGREEN_VIEWPORT_SIZE 0x6d74 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 93e9e17ad54a..a1cd621780e2 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -218,6 +218,8 @@ #define CLIP_VTX_REORDER_ENA (1 << 0) #define NUM_CLIP_SEQ(x) ((x) << 1) #define PA_SC_AA_CONFIG 0x28C04 +#define MSAA_NUM_SAMPLES_SHIFT 0 +#define MSAA_NUM_SAMPLES_MASK 0x3 #define PA_SC_CLIPRECT_RULE 0x2820C #define PA_SC_EDGERULE 0x28230 #define PA_SC_FIFO_SIZE 0x8BCC @@ -553,4 +555,469 @@ # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) # define DC_HPDx_EN (1 << 28) +/* + * PM4 + */ +#define PACKET_TYPE0 0 +#define PACKET_TYPE1 1 +#define PACKET_TYPE2 2 +#define PACKET_TYPE3 3 + +#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) +#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) +#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) +#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) +#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ + (((reg) >> 2) & 0xFFFF) | \ + ((n) & 0x3FFF) << 16) +#define CP_PACKET2 0x80000000 +#define PACKET2_PAD_SHIFT 0 +#define PACKET2_PAD_MASK (0x3fffffff << 0) + +#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) + +#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ + (((op) & 0xFF) << 8) | \ + ((n) & 0x3FFF) << 16) + +/* Packet 3 types */ +#define PACKET3_NOP 0x10 +#define PACKET3_SET_BASE 0x11 +#define PACKET3_CLEAR_STATE 0x12 +#define PACKET3_INDIRECT_BUFFER_SIZE 0x13 +#define PACKET3_DISPATCH_DIRECT 0x15 +#define PACKET3_DISPATCH_INDIRECT 0x16 +#define PACKET3_INDIRECT_BUFFER_END 0x17 +#define PACKET3_SET_PREDICATION 0x20 +#define PACKET3_REG_RMW 0x21 +#define PACKET3_COND_EXEC 0x22 +#define PACKET3_PRED_EXEC 0x23 +#define PACKET3_DRAW_INDIRECT 0x24 +#define PACKET3_DRAW_INDEX_INDIRECT 0x25 +#define PACKET3_INDEX_BASE 0x26 +#define PACKET3_DRAW_INDEX_2 0x27 +#define PACKET3_CONTEXT_CONTROL 0x28 +#define PACKET3_DRAW_INDEX_OFFSET 0x29 +#define PACKET3_INDEX_TYPE 0x2A +#define PACKET3_DRAW_INDEX 0x2B +#define PACKET3_DRAW_INDEX_AUTO 0x2D +#define PACKET3_DRAW_INDEX_IMMD 0x2E +#define PACKET3_NUM_INSTANCES 0x2F +#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 +#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 +#define PACKET3_DRAW_INDEX_OFFSET_2 0x35 +#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 +#define PACKET3_MEM_SEMAPHORE 0x39 +#define PACKET3_MPEG_INDEX 0x3A +#define PACKET3_WAIT_REG_MEM 0x3C +#define PACKET3_MEM_WRITE 0x3D +#define PACKET3_INDIRECT_BUFFER 0x32 +#define PACKET3_SURFACE_SYNC 0x43 +# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) +# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) +# define PACKET3_CB2_DEST_BASE_ENA (1 << 8) +# define PACKET3_CB3_DEST_BASE_ENA (1 << 9) +# define PACKET3_CB4_DEST_BASE_ENA (1 << 10) +# define PACKET3_CB5_DEST_BASE_ENA (1 << 11) +# define PACKET3_CB6_DEST_BASE_ENA (1 << 12) +# define PACKET3_CB7_DEST_BASE_ENA (1 << 13) +# define PACKET3_DB_DEST_BASE_ENA (1 << 14) +# define PACKET3_CB8_DEST_BASE_ENA (1 << 15) +# define PACKET3_CB9_DEST_BASE_ENA (1 << 16) +# define PACKET3_CB10_DEST_BASE_ENA (1 << 17) +# define PACKET3_CB11_DEST_BASE_ENA (1 << 17) +# define PACKET3_FULL_CACHE_ENA (1 << 20) +# define PACKET3_TC_ACTION_ENA (1 << 23) +# define PACKET3_VC_ACTION_ENA (1 << 24) +# define PACKET3_CB_ACTION_ENA (1 << 25) +# define PACKET3_DB_ACTION_ENA (1 << 26) +# define PACKET3_SH_ACTION_ENA (1 << 27) +# define PACKET3_SMX_ACTION_ENA (1 << 28) +#define PACKET3_ME_INITIALIZE 0x44 +#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) +#define PACKET3_COND_WRITE 0x45 +#define PACKET3_EVENT_WRITE 0x46 +#define PACKET3_EVENT_WRITE_EOP 0x47 +#define PACKET3_EVENT_WRITE_EOS 0x48 +#define PACKET3_PREAMBLE_CNTL 0x4A +#define PACKET3_RB_OFFSET 0x4B +#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C +#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D +#define PACKET3_ALU_PS_CONST_UPDATE 0x4E +#define PACKET3_ALU_VS_CONST_UPDATE 0x4F +#define PACKET3_ONE_REG_WRITE 0x57 +#define PACKET3_SET_CONFIG_REG 0x68 +#define PACKET3_SET_CONFIG_REG_START 0x00008000 +#define PACKET3_SET_CONFIG_REG_END 0x0000ac00 +#define PACKET3_SET_CONTEXT_REG 0x69 +#define PACKET3_SET_CONTEXT_REG_START 0x00028000 +#define PACKET3_SET_CONTEXT_REG_END 0x00029000 +#define PACKET3_SET_ALU_CONST 0x6A +/* alu const buffers only; no reg file */ +#define PACKET3_SET_BOOL_CONST 0x6B +#define PACKET3_SET_BOOL_CONST_START 0x0003a500 +#define PACKET3_SET_BOOL_CONST_END 0x0003a518 +#define PACKET3_SET_LOOP_CONST 0x6C +#define PACKET3_SET_LOOP_CONST_START 0x0003a200 +#define PACKET3_SET_LOOP_CONST_END 0x0003a500 +#define PACKET3_SET_RESOURCE 0x6D +#define PACKET3_SET_RESOURCE_START 0x00030000 +#define PACKET3_SET_RESOURCE_END 0x00038000 +#define PACKET3_SET_SAMPLER 0x6E +#define PACKET3_SET_SAMPLER_START 0x0003c000 +#define PACKET3_SET_SAMPLER_END 0x0003c600 +#define PACKET3_SET_CTL_CONST 0x6F +#define PACKET3_SET_CTL_CONST_START 0x0003cff0 +#define PACKET3_SET_CTL_CONST_END 0x0003ff0c +#define PACKET3_SET_RESOURCE_OFFSET 0x70 +#define PACKET3_SET_ALU_CONST_VS 0x71 +#define PACKET3_SET_ALU_CONST_DI 0x72 +#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 +#define PACKET3_SET_RESOURCE_INDIRECT 0x74 +#define PACKET3_SET_APPEND_CNT 0x75 + +#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c +#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30) +#define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3) +#define SQ_TEX_VTX_INVALID_TEXTURE 0x0 +#define SQ_TEX_VTX_INVALID_BUFFER 0x1 +#define SQ_TEX_VTX_VALID_TEXTURE 0x2 +#define SQ_TEX_VTX_VALID_BUFFER 0x3 + +#define SQ_CONST_MEM_BASE 0x8df8 + +#define SQ_ESGS_RING_SIZE 0x8c44 +#define SQ_GSVS_RING_SIZE 0x8c4c +#define SQ_ESTMP_RING_SIZE 0x8c54 +#define SQ_GSTMP_RING_SIZE 0x8c5c +#define SQ_VSTMP_RING_SIZE 0x8c64 +#define SQ_PSTMP_RING_SIZE 0x8c6c +#define SQ_LSTMP_RING_SIZE 0x8e14 +#define SQ_HSTMP_RING_SIZE 0x8e1c +#define VGT_TF_RING_SIZE 0x8988 + +#define SQ_ESGS_RING_ITEMSIZE 0x28900 +#define SQ_GSVS_RING_ITEMSIZE 0x28904 +#define SQ_ESTMP_RING_ITEMSIZE 0x28908 +#define SQ_GSTMP_RING_ITEMSIZE 0x2890c +#define SQ_VSTMP_RING_ITEMSIZE 0x28910 +#define SQ_PSTMP_RING_ITEMSIZE 0x28914 +#define SQ_LSTMP_RING_ITEMSIZE 0x28830 +#define SQ_HSTMP_RING_ITEMSIZE 0x28834 + +#define SQ_GS_VERT_ITEMSIZE 0x2891c +#define SQ_GS_VERT_ITEMSIZE_1 0x28920 +#define SQ_GS_VERT_ITEMSIZE_2 0x28924 +#define SQ_GS_VERT_ITEMSIZE_3 0x28928 +#define SQ_GSVS_RING_OFFSET_1 0x2892c +#define SQ_GSVS_RING_OFFSET_2 0x28930 +#define SQ_GSVS_RING_OFFSET_3 0x28934 + +#define SQ_ALU_CONST_BUFFER_SIZE_PS_0 0x28140 +#define SQ_ALU_CONST_BUFFER_SIZE_HS_0 0x28f80 + +#define SQ_ALU_CONST_CACHE_PS_0 0x28940 +#define SQ_ALU_CONST_CACHE_PS_1 0x28944 +#define SQ_ALU_CONST_CACHE_PS_2 0x28948 +#define SQ_ALU_CONST_CACHE_PS_3 0x2894c +#define SQ_ALU_CONST_CACHE_PS_4 0x28950 +#define SQ_ALU_CONST_CACHE_PS_5 0x28954 +#define SQ_ALU_CONST_CACHE_PS_6 0x28958 +#define SQ_ALU_CONST_CACHE_PS_7 0x2895c +#define SQ_ALU_CONST_CACHE_PS_8 0x28960 +#define SQ_ALU_CONST_CACHE_PS_9 0x28964 +#define SQ_ALU_CONST_CACHE_PS_10 0x28968 +#define SQ_ALU_CONST_CACHE_PS_11 0x2896c +#define SQ_ALU_CONST_CACHE_PS_12 0x28970 +#define SQ_ALU_CONST_CACHE_PS_13 0x28974 +#define SQ_ALU_CONST_CACHE_PS_14 0x28978 +#define SQ_ALU_CONST_CACHE_PS_15 0x2897c +#define SQ_ALU_CONST_CACHE_VS_0 0x28980 +#define SQ_ALU_CONST_CACHE_VS_1 0x28984 +#define SQ_ALU_CONST_CACHE_VS_2 0x28988 +#define SQ_ALU_CONST_CACHE_VS_3 0x2898c +#define SQ_ALU_CONST_CACHE_VS_4 0x28990 +#define SQ_ALU_CONST_CACHE_VS_5 0x28994 +#define SQ_ALU_CONST_CACHE_VS_6 0x28998 +#define SQ_ALU_CONST_CACHE_VS_7 0x2899c +#define SQ_ALU_CONST_CACHE_VS_8 0x289a0 +#define SQ_ALU_CONST_CACHE_VS_9 0x289a4 +#define SQ_ALU_CONST_CACHE_VS_10 0x289a8 +#define SQ_ALU_CONST_CACHE_VS_11 0x289ac +#define SQ_ALU_CONST_CACHE_VS_12 0x289b0 +#define SQ_ALU_CONST_CACHE_VS_13 0x289b4 +#define SQ_ALU_CONST_CACHE_VS_14 0x289b8 +#define SQ_ALU_CONST_CACHE_VS_15 0x289bc +#define SQ_ALU_CONST_CACHE_GS_0 0x289c0 +#define SQ_ALU_CONST_CACHE_GS_1 0x289c4 +#define SQ_ALU_CONST_CACHE_GS_2 0x289c8 +#define SQ_ALU_CONST_CACHE_GS_3 0x289cc +#define SQ_ALU_CONST_CACHE_GS_4 0x289d0 +#define SQ_ALU_CONST_CACHE_GS_5 0x289d4 +#define SQ_ALU_CONST_CACHE_GS_6 0x289d8 +#define SQ_ALU_CONST_CACHE_GS_7 0x289dc +#define SQ_ALU_CONST_CACHE_GS_8 0x289e0 +#define SQ_ALU_CONST_CACHE_GS_9 0x289e4 +#define SQ_ALU_CONST_CACHE_GS_10 0x289e8 +#define SQ_ALU_CONST_CACHE_GS_11 0x289ec +#define SQ_ALU_CONST_CACHE_GS_12 0x289f0 +#define SQ_ALU_CONST_CACHE_GS_13 0x289f4 +#define SQ_ALU_CONST_CACHE_GS_14 0x289f8 +#define SQ_ALU_CONST_CACHE_GS_15 0x289fc +#define SQ_ALU_CONST_CACHE_HS_0 0x28f00 +#define SQ_ALU_CONST_CACHE_HS_1 0x28f04 +#define SQ_ALU_CONST_CACHE_HS_2 0x28f08 +#define SQ_ALU_CONST_CACHE_HS_3 0x28f0c +#define SQ_ALU_CONST_CACHE_HS_4 0x28f10 +#define SQ_ALU_CONST_CACHE_HS_5 0x28f14 +#define SQ_ALU_CONST_CACHE_HS_6 0x28f18 +#define SQ_ALU_CONST_CACHE_HS_7 0x28f1c +#define SQ_ALU_CONST_CACHE_HS_8 0x28f20 +#define SQ_ALU_CONST_CACHE_HS_9 0x28f24 +#define SQ_ALU_CONST_CACHE_HS_10 0x28f28 +#define SQ_ALU_CONST_CACHE_HS_11 0x28f2c +#define SQ_ALU_CONST_CACHE_HS_12 0x28f30 +#define SQ_ALU_CONST_CACHE_HS_13 0x28f34 +#define SQ_ALU_CONST_CACHE_HS_14 0x28f38 +#define SQ_ALU_CONST_CACHE_HS_15 0x28f3c +#define SQ_ALU_CONST_CACHE_LS_0 0x28f40 +#define SQ_ALU_CONST_CACHE_LS_1 0x28f44 +#define SQ_ALU_CONST_CACHE_LS_2 0x28f48 +#define SQ_ALU_CONST_CACHE_LS_3 0x28f4c +#define SQ_ALU_CONST_CACHE_LS_4 0x28f50 +#define SQ_ALU_CONST_CACHE_LS_5 0x28f54 +#define SQ_ALU_CONST_CACHE_LS_6 0x28f58 +#define SQ_ALU_CONST_CACHE_LS_7 0x28f5c +#define SQ_ALU_CONST_CACHE_LS_8 0x28f60 +#define SQ_ALU_CONST_CACHE_LS_9 0x28f64 +#define SQ_ALU_CONST_CACHE_LS_10 0x28f68 +#define SQ_ALU_CONST_CACHE_LS_11 0x28f6c +#define SQ_ALU_CONST_CACHE_LS_12 0x28f70 +#define SQ_ALU_CONST_CACHE_LS_13 0x28f74 +#define SQ_ALU_CONST_CACHE_LS_14 0x28f78 +#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c + +#define DB_DEPTH_CONTROL 0x28800 +#define DB_DEPTH_VIEW 0x28008 +#define DB_HTILE_DATA_BASE 0x28014 +#define DB_Z_INFO 0x28040 +# define Z_ARRAY_MODE(x) ((x) << 4) +#define DB_STENCIL_INFO 0x28044 +#define DB_Z_READ_BASE 0x28048 +#define DB_STENCIL_READ_BASE 0x2804c +#define DB_Z_WRITE_BASE 0x28050 +#define DB_STENCIL_WRITE_BASE 0x28054 +#define DB_DEPTH_SIZE 0x28058 + +#define SQ_PGM_START_PS 0x28840 +#define SQ_PGM_START_VS 0x2885c +#define SQ_PGM_START_GS 0x28874 +#define SQ_PGM_START_ES 0x2888c +#define SQ_PGM_START_FS 0x288a4 +#define SQ_PGM_START_HS 0x288b8 +#define SQ_PGM_START_LS 0x288d0 + +#define VGT_STRMOUT_CONFIG 0x28b94 +#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98 + +#define CB_TARGET_MASK 0x28238 +#define CB_SHADER_MASK 0x2823c + +#define GDS_ADDR_BASE 0x28720 + +#define CB_IMMED0_BASE 0x28b9c +#define CB_IMMED1_BASE 0x28ba0 +#define CB_IMMED2_BASE 0x28ba4 +#define CB_IMMED3_BASE 0x28ba8 +#define CB_IMMED4_BASE 0x28bac +#define CB_IMMED5_BASE 0x28bb0 +#define CB_IMMED6_BASE 0x28bb4 +#define CB_IMMED7_BASE 0x28bb8 +#define CB_IMMED8_BASE 0x28bbc +#define CB_IMMED9_BASE 0x28bc0 +#define CB_IMMED10_BASE 0x28bc4 +#define CB_IMMED11_BASE 0x28bc8 + +/* all 12 CB blocks have these regs */ +#define CB_COLOR0_BASE 0x28c60 +#define CB_COLOR0_PITCH 0x28c64 +#define CB_COLOR0_SLICE 0x28c68 +#define CB_COLOR0_VIEW 0x28c6c +#define CB_COLOR0_INFO 0x28c70 +# define CB_ARRAY_MODE(x) ((x) << 8) +# define ARRAY_LINEAR_GENERAL 0 +# define ARRAY_LINEAR_ALIGNED 1 +# define ARRAY_1D_TILED_THIN1 2 +# define ARRAY_2D_TILED_THIN1 4 +#define CB_COLOR0_ATTRIB 0x28c74 +#define CB_COLOR0_DIM 0x28c78 +/* only CB0-7 blocks have these regs */ +#define CB_COLOR0_CMASK 0x28c7c +#define CB_COLOR0_CMASK_SLICE 0x28c80 +#define CB_COLOR0_FMASK 0x28c84 +#define CB_COLOR0_FMASK_SLICE 0x28c88 +#define CB_COLOR0_CLEAR_WORD0 0x28c8c +#define CB_COLOR0_CLEAR_WORD1 0x28c90 +#define CB_COLOR0_CLEAR_WORD2 0x28c94 +#define CB_COLOR0_CLEAR_WORD3 0x28c98 + +#define CB_COLOR1_BASE 0x28c9c +#define CB_COLOR2_BASE 0x28cd8 +#define CB_COLOR3_BASE 0x28d14 +#define CB_COLOR4_BASE 0x28d50 +#define CB_COLOR5_BASE 0x28d8c +#define CB_COLOR6_BASE 0x28dc8 +#define CB_COLOR7_BASE 0x28e04 +#define CB_COLOR8_BASE 0x28e40 +#define CB_COLOR9_BASE 0x28e5c +#define CB_COLOR10_BASE 0x28e78 +#define CB_COLOR11_BASE 0x28e94 + +#define CB_COLOR1_PITCH 0x28ca0 +#define CB_COLOR2_PITCH 0x28cdc +#define CB_COLOR3_PITCH 0x28d18 +#define CB_COLOR4_PITCH 0x28d54 +#define CB_COLOR5_PITCH 0x28d90 +#define CB_COLOR6_PITCH 0x28dcc +#define CB_COLOR7_PITCH 0x28e08 +#define CB_COLOR8_PITCH 0x28e44 +#define CB_COLOR9_PITCH 0x28e60 +#define CB_COLOR10_PITCH 0x28e7c +#define CB_COLOR11_PITCH 0x28e98 + +#define CB_COLOR1_SLICE 0x28ca4 +#define CB_COLOR2_SLICE 0x28ce0 +#define CB_COLOR3_SLICE 0x28d1c +#define CB_COLOR4_SLICE 0x28d58 +#define CB_COLOR5_SLICE 0x28d94 +#define CB_COLOR6_SLICE 0x28dd0 +#define CB_COLOR7_SLICE 0x28e0c +#define CB_COLOR8_SLICE 0x28e48 +#define CB_COLOR9_SLICE 0x28e64 +#define CB_COLOR10_SLICE 0x28e80 +#define CB_COLOR11_SLICE 0x28e9c + +#define CB_COLOR1_VIEW 0x28ca8 +#define CB_COLOR2_VIEW 0x28ce4 +#define CB_COLOR3_VIEW 0x28d20 +#define CB_COLOR4_VIEW 0x28d5c +#define CB_COLOR5_VIEW 0x28d98 +#define CB_COLOR6_VIEW 0x28dd4 +#define CB_COLOR7_VIEW 0x28e10 +#define CB_COLOR8_VIEW 0x28e4c +#define CB_COLOR9_VIEW 0x28e68 +#define CB_COLOR10_VIEW 0x28e84 +#define CB_COLOR11_VIEW 0x28ea0 + +#define CB_COLOR1_INFO 0x28cac +#define CB_COLOR2_INFO 0x28ce8 +#define CB_COLOR3_INFO 0x28d24 +#define CB_COLOR4_INFO 0x28d60 +#define CB_COLOR5_INFO 0x28d9c +#define CB_COLOR6_INFO 0x28dd8 +#define CB_COLOR7_INFO 0x28e14 +#define CB_COLOR8_INFO 0x28e50 +#define CB_COLOR9_INFO 0x28e6c +#define CB_COLOR10_INFO 0x28e88 +#define CB_COLOR11_INFO 0x28ea4 + +#define CB_COLOR1_ATTRIB 0x28cb0 +#define CB_COLOR2_ATTRIB 0x28cec +#define CB_COLOR3_ATTRIB 0x28d28 +#define CB_COLOR4_ATTRIB 0x28d64 +#define CB_COLOR5_ATTRIB 0x28da0 +#define CB_COLOR6_ATTRIB 0x28ddc +#define CB_COLOR7_ATTRIB 0x28e18 +#define CB_COLOR8_ATTRIB 0x28e54 +#define CB_COLOR9_ATTRIB 0x28e70 +#define CB_COLOR10_ATTRIB 0x28e8c +#define CB_COLOR11_ATTRIB 0x28ea8 + +#define CB_COLOR1_DIM 0x28cb4 +#define CB_COLOR2_DIM 0x28cf0 +#define CB_COLOR3_DIM 0x28d2c +#define CB_COLOR4_DIM 0x28d68 +#define CB_COLOR5_DIM 0x28da4 +#define CB_COLOR6_DIM 0x28de0 +#define CB_COLOR7_DIM 0x28e1c +#define CB_COLOR8_DIM 0x28e58 +#define CB_COLOR9_DIM 0x28e74 +#define CB_COLOR10_DIM 0x28e90 +#define CB_COLOR11_DIM 0x28eac + +#define CB_COLOR1_CMASK 0x28cb8 +#define CB_COLOR2_CMASK 0x28cf4 +#define CB_COLOR3_CMASK 0x28d30 +#define CB_COLOR4_CMASK 0x28d6c +#define CB_COLOR5_CMASK 0x28da8 +#define CB_COLOR6_CMASK 0x28de4 +#define CB_COLOR7_CMASK 0x28e20 + +#define CB_COLOR1_CMASK_SLICE 0x28cbc +#define CB_COLOR2_CMASK_SLICE 0x28cf8 +#define CB_COLOR3_CMASK_SLICE 0x28d34 +#define CB_COLOR4_CMASK_SLICE 0x28d70 +#define CB_COLOR5_CMASK_SLICE 0x28dac +#define CB_COLOR6_CMASK_SLICE 0x28de8 +#define CB_COLOR7_CMASK_SLICE 0x28e24 + +#define CB_COLOR1_FMASK 0x28cc0 +#define CB_COLOR2_FMASK 0x28cfc +#define CB_COLOR3_FMASK 0x28d38 +#define CB_COLOR4_FMASK 0x28d74 +#define CB_COLOR5_FMASK 0x28db0 +#define CB_COLOR6_FMASK 0x28dec +#define CB_COLOR7_FMASK 0x28e28 + +#define CB_COLOR1_FMASK_SLICE 0x28cc4 +#define CB_COLOR2_FMASK_SLICE 0x28d00 +#define CB_COLOR3_FMASK_SLICE 0x28d3c +#define CB_COLOR4_FMASK_SLICE 0x28d78 +#define CB_COLOR5_FMASK_SLICE 0x28db4 +#define CB_COLOR6_FMASK_SLICE 0x28df0 +#define CB_COLOR7_FMASK_SLICE 0x28e2c + +#define CB_COLOR1_CLEAR_WORD0 0x28cc8 +#define CB_COLOR2_CLEAR_WORD0 0x28d04 +#define CB_COLOR3_CLEAR_WORD0 0x28d40 +#define CB_COLOR4_CLEAR_WORD0 0x28d7c +#define CB_COLOR5_CLEAR_WORD0 0x28db8 +#define CB_COLOR6_CLEAR_WORD0 0x28df4 +#define CB_COLOR7_CLEAR_WORD0 0x28e30 + +#define CB_COLOR1_CLEAR_WORD1 0x28ccc +#define CB_COLOR2_CLEAR_WORD1 0x28d08 +#define CB_COLOR3_CLEAR_WORD1 0x28d44 +#define CB_COLOR4_CLEAR_WORD1 0x28d80 +#define CB_COLOR5_CLEAR_WORD1 0x28dbc +#define CB_COLOR6_CLEAR_WORD1 0x28df8 +#define CB_COLOR7_CLEAR_WORD1 0x28e34 + +#define CB_COLOR1_CLEAR_WORD2 0x28cd0 +#define CB_COLOR2_CLEAR_WORD2 0x28d0c +#define CB_COLOR3_CLEAR_WORD2 0x28d48 +#define CB_COLOR4_CLEAR_WORD2 0x28d84 +#define CB_COLOR5_CLEAR_WORD2 0x28dc0 +#define CB_COLOR6_CLEAR_WORD2 0x28dfc +#define CB_COLOR7_CLEAR_WORD2 0x28e38 + +#define CB_COLOR1_CLEAR_WORD3 0x28cd4 +#define CB_COLOR2_CLEAR_WORD3 0x28d10 +#define CB_COLOR3_CLEAR_WORD3 0x28d4c +#define CB_COLOR4_CLEAR_WORD3 0x28d88 +#define CB_COLOR5_CLEAR_WORD3 0x28dc4 +#define CB_COLOR6_CLEAR_WORD3 0x28e00 +#define CB_COLOR7_CLEAR_WORD3 0x28e3c + +#define SQ_TEX_RESOURCE_WORD0_0 0x30000 +#define SQ_TEX_RESOURCE_WORD1_0 0x30004 +# define TEX_ARRAY_MODE(x) ((x) << 28) +#define SQ_TEX_RESOURCE_WORD2_0 0x30008 +#define SQ_TEX_RESOURCE_WORD3_0 0x3000C +#define SQ_TEX_RESOURCE_WORD4_0 0x30010 +#define SQ_TEX_RESOURCE_WORD5_0 0x30014 +#define SQ_TEX_RESOURCE_WORD6_0 0x30018 +#define SQ_TEX_RESOURCE_WORD7_0 0x3001c + + #endif diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c485c2cec4da..ab37717a5d39 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -162,6 +162,11 @@ void r100_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; /* high sh */ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; @@ -172,6 +177,11 @@ void r100_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* mid mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; /* high mh */ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; @@ -1618,6 +1628,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, case RADEON_TXFORMAT_RGB332: case RADEON_TXFORMAT_Y8: track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case RADEON_TXFORMAT_AI88: case RADEON_TXFORMAT_ARGB1555: @@ -1629,12 +1640,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p, case RADEON_TXFORMAT_LDUDV655: case RADEON_TXFORMAT_DUDV88: track->textures[i].cpp = 2; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case RADEON_TXFORMAT_ARGB8888: case RADEON_TXFORMAT_RGBA8888: case RADEON_TXFORMAT_SHADOW32: case RADEON_TXFORMAT_LDUDUV8888: track->textures[i].cpp = 4; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case RADEON_TXFORMAT_DXT1: track->textures[i].cpp = 1; @@ -2594,12 +2607,6 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, int surf_index = reg * 16; int flags = 0; - /* r100/r200 divide by 16 */ - if (rdev->family < CHIP_R300) - flags = pitch / 16; - else - flags = pitch / 8; - if (rdev->family <= CHIP_RS200) { if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) @@ -2623,6 +2630,20 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, if (tiling_flags & RADEON_TILING_SWAP_32BIT) flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; + /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */ + if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) { + if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) + if (ASIC_IS_RN50(rdev)) + pitch /= 16; + } + + /* r100/r200 divide by 16 */ + if (rdev->family < CHIP_R300) + flags |= pitch / 16; + else + flags |= pitch / 8; + + DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); WREG32(RADEON_SURFACE0_INFO + surf_index, flags); WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); @@ -3137,33 +3158,6 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) DRM_ERROR("compress format %d\n", t->compress_format); } -static int r100_cs_track_cube(struct radeon_device *rdev, - struct r100_cs_track *track, unsigned idx) -{ - unsigned face, w, h; - struct radeon_bo *cube_robj; - unsigned long size; - - for (face = 0; face < 5; face++) { - cube_robj = track->textures[idx].cube_info[face].robj; - w = track->textures[idx].cube_info[face].width; - h = track->textures[idx].cube_info[face].height; - - size = w * h; - size *= track->textures[idx].cpp; - - size += track->textures[idx].cube_info[face].offset; - - if (size > radeon_bo_size(cube_robj)) { - DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", - size, radeon_bo_size(cube_robj)); - r100_cs_track_texture_print(&track->textures[idx]); - return -1; - } - } - return 0; -} - static int r100_track_compress_size(int compress_format, int w, int h) { int block_width, block_height, block_bytes; @@ -3194,6 +3188,37 @@ static int r100_track_compress_size(int compress_format, int w, int h) return sz; } +static int r100_cs_track_cube(struct radeon_device *rdev, + struct r100_cs_track *track, unsigned idx) +{ + unsigned face, w, h; + struct radeon_bo *cube_robj; + unsigned long size; + unsigned compress_format = track->textures[idx].compress_format; + + for (face = 0; face < 5; face++) { + cube_robj = track->textures[idx].cube_info[face].robj; + w = track->textures[idx].cube_info[face].width; + h = track->textures[idx].cube_info[face].height; + + if (compress_format) { + size = r100_track_compress_size(compress_format, w, h); + } else + size = w * h; + size *= track->textures[idx].cpp; + + size += track->textures[idx].cube_info[face].offset; + + if (size > radeon_bo_size(cube_robj)) { + DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", + size, radeon_bo_size(cube_robj)); + r100_cs_track_texture_print(&track->textures[idx]); + return -1; + } + } + return 0; +} + static int r100_cs_track_texture_check(struct radeon_device *rdev, struct r100_cs_track *track) { diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 85617c311212..0266d72e0a4c 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -415,6 +415,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, /* 2D, 3D, CUBE */ switch (tmp) { case 0: + case 3: + case 4: case 5: case 6: case 7: @@ -450,6 +452,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, case R200_TXFORMAT_RGB332: case R200_TXFORMAT_Y8: track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R200_TXFORMAT_AI88: case R200_TXFORMAT_ARGB1555: @@ -461,6 +464,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, case R200_TXFORMAT_DVDU88: case R200_TXFORMAT_AVYU4444: track->textures[i].cpp = 2; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R200_TXFORMAT_ARGB8888: case R200_TXFORMAT_RGBA8888: @@ -468,6 +472,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, case R200_TXFORMAT_BGR111110: case R200_TXFORMAT_LDVDU8888: track->textures[i].cpp = 4; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R200_TXFORMAT_DXT1: track->textures[i].cpp = 1; diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index b2f9efe2897c..7e81db5eb804 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -881,6 +881,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_TX_FORMAT_Y4X4: case R300_TX_FORMAT_Z3Y3X2: track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_X16: case R300_TX_FORMAT_Y8X8: @@ -892,6 +893,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_TX_FORMAT_B8G8_B8G8: case R300_TX_FORMAT_G8R8_G8B8: track->textures[i].cpp = 2; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_Y16X16: case R300_TX_FORMAT_Z11Y11X10: @@ -902,14 +904,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_TX_FORMAT_FL_I32: case 0x1e: track->textures[i].cpp = 4; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_W16Z16Y16X16: case R300_TX_FORMAT_FL_R16G16B16A16: case R300_TX_FORMAT_FL_I32A32: track->textures[i].cpp = 8; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_FL_R32G32B32A32: track->textures[i].cpp = 16; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_DXT1: track->textures[i].cpp = 1; diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 4415a5ee5871..e6c89142bb4d 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -45,9 +45,14 @@ void r420_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; /* low sh */ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; /* high sh */ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; @@ -58,6 +63,11 @@ void r420_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* mid mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; /* high mh */ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 4959619f8851..a73a6e17588d 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -130,9 +130,14 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev) break; } } - } else - rdev->pm.requested_power_state_index = - rdev->pm.current_power_state_index - 1; + } else { + if (rdev->pm.current_power_state_index == 0) + rdev->pm.requested_power_state_index = + rdev->pm.num_power_states - 1; + else + rdev->pm.requested_power_state_index = + rdev->pm.current_power_state_index - 1; + } } rdev->pm.requested_clock_mode_index = 0; /* don't use the power state if crtcs are active and no display flag is set */ @@ -291,6 +296,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; /* high sh */ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; @@ -301,6 +311,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* mid mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; /* high mh */ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; @@ -317,6 +332,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; /* high sh */ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; @@ -327,6 +347,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* mid mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; /* high mh */ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; @@ -343,6 +368,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; /* high sh */ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; @@ -353,6 +383,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* mid mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; /* high mh */ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; @@ -375,6 +410,11 @@ void r600_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; /* high sh */ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; @@ -385,6 +425,11 @@ void r600_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* mid mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; /* high mh */ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; @@ -401,7 +446,12 @@ void r600_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; /* high sh */ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; @@ -411,7 +461,12 @@ void r600_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* low mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; /* high mh */ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; @@ -430,14 +485,30 @@ void r600_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; } else { rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + } + /* mid sh */ + if (rdev->flags & RADEON_IS_MOBILITY) { + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; + } else { + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; } /* high sh */ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = @@ -453,14 +524,30 @@ void r600_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; } else { rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + } + /* mid mh */ + if (rdev->flags & RADEON_IS_MOBILITY) { + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; + } else { + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; } /* high mh */ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = @@ -475,7 +562,18 @@ void r600_pm_init_profile(struct radeon_device *rdev) void r600_pm_misc(struct radeon_device *rdev) { + int req_ps_idx = rdev->pm.requested_power_state_index; + int req_cm_idx = rdev->pm.requested_clock_mode_index; + struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; + struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; + if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { + if (voltage->voltage != rdev->pm.current_vddc) { + radeon_atom_set_voltage(rdev, voltage->voltage); + rdev->pm.current_vddc = voltage->voltage; + DRM_DEBUG("Setting: v: %d\n", voltage->voltage); + } + } } bool r600_gui_idle(struct radeon_device *rdev) @@ -1004,7 +1102,7 @@ static void r600_mc_program(struct radeon_device *rdev) WREG32(MC_VM_FB_LOCATION, tmp); WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); WREG32(HDP_NONSURFACE_INFO, (2 << 7)); - WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); + WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); if (rdev->flags & RADEON_IS_AGP) { WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); @@ -1126,8 +1224,10 @@ int r600_mc_init(struct radeon_device *rdev) rdev->mc.visible_vram_size = rdev->mc.aper_size; r600_vram_gtt_location(rdev, &rdev->mc); - if (rdev->flags & RADEON_IS_IGP) + if (rdev->flags & RADEON_IS_IGP) { + rs690_pm_info(rdev); rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); + } radeon_update_bandwidth_info(rdev); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 669feb689bfc..ab61aaa887bb 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -176,6 +176,8 @@ void radeon_pm_suspend(struct radeon_device *rdev); void radeon_pm_resume(struct radeon_device *rdev); void radeon_combios_get_power_modes(struct radeon_device *rdev); void radeon_atombios_get_power_modes(struct radeon_device *rdev); +void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); +void rs690_pm_info(struct radeon_device *rdev); /* * Fences. @@ -618,7 +620,8 @@ enum radeon_dynpm_state { DYNPM_STATE_DISABLED, DYNPM_STATE_MINIMUM, DYNPM_STATE_PAUSED, - DYNPM_STATE_ACTIVE + DYNPM_STATE_ACTIVE, + DYNPM_STATE_SUSPENDED, }; enum radeon_dynpm_action { DYNPM_ACTION_NONE, @@ -647,15 +650,18 @@ enum radeon_pm_profile_type { PM_PROFILE_DEFAULT, PM_PROFILE_AUTO, PM_PROFILE_LOW, + PM_PROFILE_MID, PM_PROFILE_HIGH, }; #define PM_PROFILE_DEFAULT_IDX 0 #define PM_PROFILE_LOW_SH_IDX 1 -#define PM_PROFILE_HIGH_SH_IDX 2 -#define PM_PROFILE_LOW_MH_IDX 3 -#define PM_PROFILE_HIGH_MH_IDX 4 -#define PM_PROFILE_MAX 5 +#define PM_PROFILE_MID_SH_IDX 2 +#define PM_PROFILE_HIGH_SH_IDX 3 +#define PM_PROFILE_LOW_MH_IDX 4 +#define PM_PROFILE_MID_MH_IDX 5 +#define PM_PROFILE_HIGH_MH_IDX 6 +#define PM_PROFILE_MAX 7 struct radeon_pm_profile { int dpms_off_ps_idx; @@ -744,6 +750,7 @@ struct radeon_pm { int default_power_state_index; u32 current_sclk; u32 current_mclk; + u32 current_vddc; struct radeon_i2c_chan *i2c_bus; /* selected pm method */ enum radeon_pm_method pm_method; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e57df08d4aeb..646f96f97c77 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -724,8 +724,8 @@ static struct radeon_asic evergreen_asic = { .irq_set = &evergreen_irq_set, .irq_process = &evergreen_irq_process, .get_vblank_counter = &evergreen_get_vblank_counter, - .fence_ring_emit = NULL, - .cs_parse = NULL, + .fence_ring_emit = &r600_fence_ring_emit, + .cs_parse = &evergreen_cs_parse, .copy_blit = NULL, .copy_dma = NULL, .copy = NULL, @@ -780,6 +780,13 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_R423: case CHIP_RV410: rdev->asic = &r420_asic; + /* handle macs */ + if (rdev->bios == NULL) { + rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock; + rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock; + rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock; + rdev->asic->set_memory_clock = NULL; + } break; case CHIP_RS400: case CHIP_RS480: diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 5c40a3dfaca2..c0bbaa64157a 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -314,6 +314,7 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev, u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); int evergreen_irq_set(struct radeon_device *rdev); int evergreen_irq_process(struct radeon_device *rdev); +extern int evergreen_cs_parse(struct radeon_cs_parser *p); extern void evergreen_pm_misc(struct radeon_device *rdev); extern void evergreen_pm_prepare(struct radeon_device *rdev); extern void evergreen_pm_finish(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 24ea683f7cf5..99bd8a9c56b3 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1538,7 +1538,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) rdev->pm.power_state[state_index].pcie_lanes = power_info->info.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); - if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { + if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || + (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = @@ -1605,7 +1606,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); - if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { + if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || + (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = @@ -1679,7 +1681,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); - if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { + if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || + (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = @@ -1755,9 +1758,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) rdev->pm.power_state[state_index].misc2 = 0; } } else { + int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + uint8_t fw_frev, fw_crev; + uint16_t fw_data_offset, vddc = 0; + union firmware_info *firmware_info; + ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; + + if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL, + &fw_frev, &fw_crev, &fw_data_offset)) { + firmware_info = + (union firmware_info *)(mode_info->atom_context->bios + + fw_data_offset); + vddc = firmware_info->info_14.usBootUpVDDCVoltage; + } + /* add the i2c bus for thermal/fan chip */ /* no support for internal controller yet */ - ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; if (controller->ucType > 0) { if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || @@ -1817,10 +1833,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) /* skip invalid modes */ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) continue; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = - VOLTAGE_SW; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = - clock_info->usVDDC; + /* voltage works differently on IGPs */ mode_index++; } else if (ASIC_IS_DCE4(rdev)) { struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info = @@ -1904,6 +1917,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; + /* patch the table values with the default slck/mclk from firmware info */ + for (j = 0; j < mode_index; j++) { + rdev->pm.power_state[state_index].clock_info[j].mclk = + rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[j].sclk = + rdev->clock.default_sclk; + if (vddc) + rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = + vddc; + } } state_index++; } @@ -1943,6 +1966,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; rdev->pm.current_clock_mode_index = 0; + rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; } void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) @@ -1998,6 +2022,42 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } +union set_voltage { + struct _SET_VOLTAGE_PS_ALLOCATION alloc; + struct _SET_VOLTAGE_PARAMETERS v1; + struct _SET_VOLTAGE_PARAMETERS_V2 v2; +}; + +void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) +{ + union set_voltage args; + int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); + u8 frev, crev, volt_index = level; + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; + + switch (crev) { + case 1: + args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; + args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; + args.v1.ucVoltageIndex = volt_index; + break; + case 2: + args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; + args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; + args.v2.usVoltageLevel = cpu_to_le16(level); + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return; + } + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + + + void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 91f5b5a29a9f..654787ec43f4 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -48,6 +48,10 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) resource_size_t vram_base; resource_size_t size = 256 * 1024; /* ??? */ + if (!(rdev->flags & RADEON_IS_IGP)) + if (!radeon_card_posted(rdev)) + return false; + rdev->bios = NULL; vram_base = pci_resource_start(rdev->pdev, 0); bios = ioremap(vram_base, size); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 7b5e10d3e9c9..d1c1d8dd93ce 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -1411,6 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; } else #endif /* CONFIG_PPC_PMAC */ +#ifdef CONFIG_PPC64 + if (ASIC_IS_RN50(rdev)) + rdev->mode_info.connector_table = CT_RN50_POWER; + else +#endif rdev->mode_info.connector_table = CT_GENERIC; } @@ -1853,6 +1858,33 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; + case CT_RN50_POWER: + DRM_INFO("Connector Table: %d (rn50-power)\n", + rdev->mode_info.connector_table); + /* VGA - primary dac */ + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); + hpd.hpd = RADEON_HPD_NONE; + radeon_add_legacy_encoder(dev, + radeon_get_encoder_id(dev, + ATOM_DEVICE_CRT1_SUPPORT, + 1), + ATOM_DEVICE_CRT1_SUPPORT); + radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, + DRM_MODE_CONNECTOR_VGA, &ddc_i2c, + CONNECTOR_OBJECT_ID_VGA, + &hpd); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); + hpd.hpd = RADEON_HPD_NONE; + radeon_add_legacy_encoder(dev, + radeon_get_encoder_id(dev, + ATOM_DEVICE_CRT2_SUPPORT, + 2), + ATOM_DEVICE_CRT2_SUPPORT); + radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, + DRM_MODE_CONNECTOR_VGA, &ddc_i2c, + CONNECTOR_OBJECT_ID_VGA, + &hpd); + break; default: DRM_INFO("Connector table: %d (invalid)\n", rdev->mode_info.connector_table); @@ -1906,15 +1938,6 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, return false; } - /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */ - if (dev->pdev->device == 0x5159 && - dev->pdev->subsystem_vendor == 0x1002 && - dev->pdev->subsystem_device == 0x013a) { - if (*legacy_connector == CONNECTOR_DVI_I_LEGACY) - *legacy_connector = CONNECTOR_CRT_LEGACY; - - } - /* X300 card with extra non-existent DVI port */ if (dev->pdev->device == 0x5B60 && dev->pdev->subsystem_vendor == 0x17af && @@ -2026,6 +2049,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); break; default: + ddc_i2c.valid = false; break; } @@ -2339,6 +2363,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) if (RBIOS8(tv_info + 6) == 'T') { if (radeon_apply_legacy_tv_quirks(dev)) { hpd.hpd = RADEON_HPD_NONE; + ddc_i2c.valid = false; radeon_add_legacy_encoder(dev, radeon_get_encoder_id (dev, @@ -2454,7 +2479,12 @@ default_mode: rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; - rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + if ((state_index > 0) && + (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO)) + rdev->pm.power_state[state_index].clock_info[0].voltage = + rdev->pm.power_state[0].clock_info[0].voltage; + else + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; rdev->pm.power_state[state_index].pcie_lanes = 16; rdev->pm.power_state[state_index].flags = 0; rdev->pm.default_power_state_index = state_index; @@ -3012,6 +3042,14 @@ void radeon_combios_asic_init(struct drm_device *dev) combios_write_ram_size(dev); } + /* quirk for rs4xx HP nx6125 laptop to make it resume + * - it hangs on resume inside the dynclk 1 table. + */ + if (rdev->family == CHIP_RS480 && + rdev->pdev->subsystem_vendor == 0x103c && + rdev->pdev->subsystem_device == 0x308b) + return; + /* DYN CLK 1 */ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); if (table) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 0c7ccc6961a3..f58f8bd8f77b 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -785,7 +785,9 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect if (connector == list_connector) continue; list_radeon_connector = to_radeon_connector(list_connector); - if (radeon_connector->devices == list_radeon_connector->devices) { + if (list_radeon_connector->shared_ddc && + (list_radeon_connector->ddc_bus->rec.i2c_id == + radeon_connector->ddc_bus->rec.i2c_id)) { if (drm_detect_hdmi_monitor(radeon_connector->edid)) { if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) { kfree(radeon_connector->edid); diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index b7023fff89eb..4eb67c0e0996 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -194,7 +194,7 @@ unpin: fail: drm_gem_object_unreference_unlocked(obj); - return 0; + return ret; } int radeon_crtc_cursor_move(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 2a897a7ca26f..37533bec1f25 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -546,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero /* don't suspend or resume card normally */ rdev->powered_down = false; radeon_resume_kms(dev); + drm_kms_helper_poll_enable(dev); } else { printk(KERN_INFO "radeon: switched off\n"); + drm_kms_helper_poll_disable(dev); radeon_suspend_kms(dev, pmm); /* don't suspend or resume card normally */ rdev->powered_down = true; @@ -711,6 +713,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) { struct radeon_device *rdev; struct drm_crtc *crtc; + struct drm_connector *connector; int r; if (dev == NULL || dev->dev_private == NULL) { @@ -723,6 +726,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) if (rdev->powered_down) return 0; + + /* turn off display hw */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + } + /* unpin the front buffers */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); @@ -770,6 +779,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) int radeon_resume_kms(struct drm_device *dev) { + struct drm_connector *connector; struct radeon_device *rdev = dev->dev_private; if (rdev->powered_down) @@ -788,6 +798,12 @@ int radeon_resume_kms(struct drm_device *dev) radeon_resume(rdev); radeon_pm_resume(rdev); radeon_restore_bios_scratch_regs(rdev); + + /* turn on display hw */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } + radeon_fbdev_set_suspend(rdev, 0); release_console_sem(); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 1006549d1570..8154cdf796e4 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -284,8 +284,7 @@ static const char *connector_names[15] = { "eDP", }; -static const char *hpd_names[7] = { - "NONE", +static const char *hpd_names[6] = { "HPD1", "HPD2", "HPD3", diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 683e281b4092..ed0ceb3fc40a 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -45,9 +45,10 @@ * - 2.2.0 - add r6xx/r7xx const buffer support * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs * - 2.4.0 - add crtc id query + * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 4 +#define KMS_DRIVER_MINOR 5 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -213,6 +214,7 @@ static struct drm_driver driver_old = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, + .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = radeon_compat_ioctl, #endif @@ -301,6 +303,7 @@ static struct drm_driver kms_driver = { .mmap = radeon_mmap, .poll = drm_poll, .fasync = drm_fasync, + .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = radeon_kms_compat_ioctl, #endif diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 1ebb100015b7..e0b30b264c28 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1072,6 +1072,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) if (is_dig) { switch (mode) { case DRM_MODE_DPMS_ON: + if (!ASIC_IS_DCE4(rdev)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); @@ -1079,8 +1081,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); } - if (!ASIC_IS_DCE4(rdev)) - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index e192acfbf0cd..dc1634bb0c11 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -363,6 +363,7 @@ int radeon_fbdev_init(struct radeon_device *rdev) { struct radeon_fbdev *rfbdev; int bpp_sel = 32; + int ret; /* select 8 bpp console on RN50 or 16MB cards */ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) @@ -376,9 +377,14 @@ int radeon_fbdev_init(struct radeon_device *rdev) rdev->mode_info.rfbdev = rfbdev; rfbdev->helper.funcs = &radeon_fb_helper_funcs; - drm_fb_helper_init(rdev->ddev, &rfbdev->helper, - rdev->num_crtc, - RADEONFB_CONN_LIMIT); + ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, + rdev->num_crtc, + RADEONFB_CONN_LIMIT); + if (ret) { + kfree(rfbdev); + return ret; + } + drm_fb_helper_single_add_all_connectors(&rfbdev->helper); drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); return 0; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 04068352ccd2..6a70c0dc7f92 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -118,7 +118,11 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) value = rdev->num_z_pipes; break; case RADEON_INFO_ACCEL_WORKING: - value = rdev->accel_working; + /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ + if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) + value = false; + else + value = rdev->accel_working; break; case RADEON_INFO_CRTC_FROM_ID: for (i = 0, found = 0; i < rdev->num_crtc; i++) { @@ -134,6 +138,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return -EINVAL; } break; + case RADEON_INFO_ACCEL_WORKING2: + value = rdev->accel_working; + break; default: DRM_DEBUG("Invalid request %d\n", info->request); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 5a13b3eeef19..bad77f40a9da 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -928,16 +928,14 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, if (ASIC_IS_R300(rdev)) { gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); - } - - if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) - disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); - else + } else if (rdev->family != CHIP_R200) disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); - - if (rdev->family == CHIP_R200) + else if (rdev->family == CHIP_R200) fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); + if (rdev->family >= CHIP_R200) + disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); + if (is_tv) { uint32_t dac_cntl; @@ -1002,15 +1000,13 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, if (ASIC_IS_R300(rdev)) { WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); - } + } else if (rdev->family != CHIP_R200) + WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); + else if (rdev->family == CHIP_R200) + WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (rdev->family >= CHIP_R200) WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); - else - WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); - - if (rdev->family == CHIP_R200) - WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (is_tv) radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); @@ -1168,6 +1164,17 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; bool color = true; + struct drm_crtc *crtc; + + /* find out if crtc2 is in use or if this encoder is using it */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + if ((radeon_crtc->crtc_id == 1) && crtc->enabled) { + if (encoder->crtc != crtc) { + return connector_status_disconnected; + } + } + } if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO || connector->connector_type == DRM_MODE_CONNECTOR_Composite || diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 67358baf28b2..95696aa57ac8 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -206,6 +206,7 @@ enum radeon_connector_table { CT_MINI_INTERNAL, CT_IMAC_G5_ISIGHT, CT_EMAC, + CT_RN50_POWER, }; enum radeon_dvo_chip { diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index a8d162c6f829..115d26b762cc 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -33,6 +33,14 @@ #define RADEON_WAIT_VBLANK_TIMEOUT 200 #define RADEON_WAIT_IDLE_TIMEOUT 200 +static const char *radeon_pm_state_type_name[5] = { + "Default", + "Powersave", + "Battery", + "Balanced", + "Performance", +}; + static void radeon_dynpm_idle_work_handler(struct work_struct *work); static int radeon_debugfs_pm_init(struct radeon_device *rdev); static bool radeon_pm_in_vbl(struct radeon_device *rdev); @@ -84,9 +92,9 @@ static void radeon_pm_update_profile(struct radeon_device *rdev) rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; } else { if (rdev->pm.active_crtc_count > 1) - rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; + rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; else - rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; + rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; } break; case PM_PROFILE_LOW: @@ -95,6 +103,12 @@ static void radeon_pm_update_profile(struct radeon_device *rdev) else rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; break; + case PM_PROFILE_MID: + if (rdev->pm.active_crtc_count > 1) + rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; + else + rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; + break; case PM_PROFILE_HIGH: if (rdev->pm.active_crtc_count > 1) rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; @@ -127,15 +141,6 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev) if (bo->tbo.mem.mem_type == TTM_PL_VRAM) ttm_bo_unmap_virtual(&bo->tbo); } - - if (rdev->gart.table.vram.robj) - ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo); - - if (rdev->stollen_vga_memory) - ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo); - - if (rdev->r600_blit.shader_obj) - ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo); } static void radeon_sync_with_vblank(struct radeon_device *rdev) @@ -151,6 +156,7 @@ static void radeon_sync_with_vblank(struct radeon_device *rdev) static void radeon_set_power_state(struct radeon_device *rdev) { u32 sclk, mclk; + bool misc_after = false; if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) @@ -167,55 +173,47 @@ static void radeon_set_power_state(struct radeon_device *rdev) if (mclk > rdev->clock.default_mclk) mclk = rdev->clock.default_mclk; - /* voltage, pcie lanes, etc.*/ - radeon_pm_misc(rdev); + /* upvolt before raising clocks, downvolt after lowering clocks */ + if (sclk < rdev->pm.current_sclk) + misc_after = true; - if (rdev->pm.pm_method == PM_METHOD_DYNPM) { - radeon_sync_with_vblank(rdev); + radeon_sync_with_vblank(rdev); + if (rdev->pm.pm_method == PM_METHOD_DYNPM) { if (!radeon_pm_in_vbl(rdev)) return; + } - radeon_pm_prepare(rdev); - /* set engine clock */ - if (sclk != rdev->pm.current_sclk) { - radeon_pm_debug_check_in_vbl(rdev, false); - radeon_set_engine_clock(rdev, sclk); - radeon_pm_debug_check_in_vbl(rdev, true); - rdev->pm.current_sclk = sclk; - DRM_DEBUG("Setting: e: %d\n", sclk); - } + radeon_pm_prepare(rdev); - /* set memory clock */ - if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { - radeon_pm_debug_check_in_vbl(rdev, false); - radeon_set_memory_clock(rdev, mclk); - radeon_pm_debug_check_in_vbl(rdev, true); - rdev->pm.current_mclk = mclk; - DRM_DEBUG("Setting: m: %d\n", mclk); - } - radeon_pm_finish(rdev); - } else { - /* set engine clock */ - if (sclk != rdev->pm.current_sclk) { - radeon_sync_with_vblank(rdev); - radeon_pm_prepare(rdev); - radeon_set_engine_clock(rdev, sclk); - radeon_pm_finish(rdev); - rdev->pm.current_sclk = sclk; - DRM_DEBUG("Setting: e: %d\n", sclk); - } - /* set memory clock */ - if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { - radeon_sync_with_vblank(rdev); - radeon_pm_prepare(rdev); - radeon_set_memory_clock(rdev, mclk); - radeon_pm_finish(rdev); - rdev->pm.current_mclk = mclk; - DRM_DEBUG("Setting: m: %d\n", mclk); - } + if (!misc_after) + /* voltage, pcie lanes, etc.*/ + radeon_pm_misc(rdev); + + /* set engine clock */ + if (sclk != rdev->pm.current_sclk) { + radeon_pm_debug_check_in_vbl(rdev, false); + radeon_set_engine_clock(rdev, sclk); + radeon_pm_debug_check_in_vbl(rdev, true); + rdev->pm.current_sclk = sclk; + DRM_DEBUG("Setting: e: %d\n", sclk); } + /* set memory clock */ + if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { + radeon_pm_debug_check_in_vbl(rdev, false); + radeon_set_memory_clock(rdev, mclk); + radeon_pm_debug_check_in_vbl(rdev, true); + rdev->pm.current_mclk = mclk; + DRM_DEBUG("Setting: m: %d\n", mclk); + } + + if (misc_after) + /* voltage, pcie lanes, etc.*/ + radeon_pm_misc(rdev); + + radeon_pm_finish(rdev); + rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; } else @@ -288,6 +286,42 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) mutex_unlock(&rdev->ddev->struct_mutex); } +static void radeon_pm_print_states(struct radeon_device *rdev) +{ + int i, j; + struct radeon_power_state *power_state; + struct radeon_pm_clock_info *clock_info; + + DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states); + for (i = 0; i < rdev->pm.num_power_states; i++) { + power_state = &rdev->pm.power_state[i]; + DRM_DEBUG("State %d: %s\n", i, + radeon_pm_state_type_name[power_state->type]); + if (i == rdev->pm.default_power_state_index) + DRM_DEBUG("\tDefault"); + if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) + DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes); + if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) + DRM_DEBUG("\tSingle display only\n"); + DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes); + for (j = 0; j < power_state->num_clock_modes; j++) { + clock_info = &(power_state->clock_info[j]); + if (rdev->flags & RADEON_IS_IGP) + DRM_DEBUG("\t\t%d e: %d%s\n", + j, + clock_info->sclk * 10, + clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); + else + DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n", + j, + clock_info->sclk * 10, + clock_info->mclk * 10, + clock_info->voltage.voltage, + clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); + } + } +} + static ssize_t radeon_get_pm_profile(struct device *dev, struct device_attribute *attr, char *buf) @@ -318,6 +352,8 @@ static ssize_t radeon_set_pm_profile(struct device *dev, rdev->pm.profile = PM_PROFILE_AUTO; else if (strncmp("low", buf, strlen("low")) == 0) rdev->pm.profile = PM_PROFILE_LOW; + else if (strncmp("mid", buf, strlen("mid")) == 0) + rdev->pm.profile = PM_PROFILE_MID; else if (strncmp("high", buf, strlen("high")) == 0) rdev->pm.profile = PM_PROFILE_HIGH; else { @@ -361,13 +397,20 @@ static ssize_t radeon_set_pm_method(struct device *dev, rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; mutex_unlock(&rdev->pm.mutex); } else if (strncmp("profile", buf, strlen("profile")) == 0) { + bool flush_wq = false; + mutex_lock(&rdev->pm.mutex); - rdev->pm.pm_method = PM_METHOD_PROFILE; + if (rdev->pm.pm_method == PM_METHOD_DYNPM) { + cancel_delayed_work(&rdev->pm.dynpm_idle_work); + flush_wq = true; + } /* disable dynpm */ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; - cancel_delayed_work(&rdev->pm.dynpm_idle_work); + rdev->pm.pm_method = PM_METHOD_PROFILE; mutex_unlock(&rdev->pm.mutex); + if (flush_wq) + flush_workqueue(rdev->wq); } else { DRM_ERROR("invalid power method!\n"); goto fail; @@ -382,17 +425,36 @@ static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon void radeon_pm_suspend(struct radeon_device *rdev) { + bool flush_wq = false; + mutex_lock(&rdev->pm.mutex); - cancel_delayed_work(&rdev->pm.dynpm_idle_work); - rdev->pm.current_power_state_index = -1; - rdev->pm.current_clock_mode_index = -1; - rdev->pm.current_sclk = 0; - rdev->pm.current_mclk = 0; + if (rdev->pm.pm_method == PM_METHOD_DYNPM) { + cancel_delayed_work(&rdev->pm.dynpm_idle_work); + if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) + rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; + flush_wq = true; + } mutex_unlock(&rdev->pm.mutex); + if (flush_wq) + flush_workqueue(rdev->wq); } void radeon_pm_resume(struct radeon_device *rdev) { + /* asic init will reset the default power state */ + mutex_lock(&rdev->pm.mutex); + rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; + rdev->pm.current_clock_mode_index = 0; + rdev->pm.current_sclk = rdev->clock.default_sclk; + rdev->pm.current_mclk = rdev->clock.default_mclk; + rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; + if (rdev->pm.pm_method == PM_METHOD_DYNPM + && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { + rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; + queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); + } + mutex_unlock(&rdev->pm.mutex); radeon_pm_compute_clocks(rdev); } @@ -401,32 +463,24 @@ int radeon_pm_init(struct radeon_device *rdev) int ret; /* default to profile method */ rdev->pm.pm_method = PM_METHOD_PROFILE; + rdev->pm.profile = PM_PROFILE_DEFAULT; rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; rdev->pm.dynpm_can_upclock = true; rdev->pm.dynpm_can_downclock = true; - rdev->pm.current_sclk = 0; - rdev->pm.current_mclk = 0; + rdev->pm.current_sclk = rdev->clock.default_sclk; + rdev->pm.current_mclk = rdev->clock.default_mclk; if (rdev->bios) { if (rdev->is_atom_bios) radeon_atombios_get_power_modes(rdev); else radeon_combios_get_power_modes(rdev); + radeon_pm_print_states(rdev); radeon_pm_init_profile(rdev); - rdev->pm.current_power_state_index = -1; - rdev->pm.current_clock_mode_index = -1; } if (rdev->pm.num_power_states > 1) { - if (rdev->pm.pm_method == PM_METHOD_PROFILE) { - mutex_lock(&rdev->pm.mutex); - rdev->pm.profile = PM_PROFILE_DEFAULT; - radeon_pm_update_profile(rdev); - radeon_pm_set_clocks(rdev); - mutex_unlock(&rdev->pm.mutex); - } - /* where's the best place to put these? */ ret = device_create_file(rdev->dev, &dev_attr_power_profile); if (ret) @@ -454,6 +508,8 @@ int radeon_pm_init(struct radeon_device *rdev) void radeon_pm_fini(struct radeon_device *rdev) { if (rdev->pm.num_power_states > 1) { + bool flush_wq = false; + mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_PROFILE) { rdev->pm.profile = PM_PROFILE_DEFAULT; @@ -461,13 +517,16 @@ void radeon_pm_fini(struct radeon_device *rdev) radeon_pm_set_clocks(rdev); } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { /* cancel work */ - cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); + cancel_delayed_work(&rdev->pm.dynpm_idle_work); + flush_wq = true; /* reset default clocks */ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; radeon_pm_set_clocks(rdev); } mutex_unlock(&rdev->pm.mutex); + if (flush_wq) + flush_workqueue(rdev->wq); device_remove_file(rdev->dev, &dev_attr_power_profile); device_remove_file(rdev->dev, &dev_attr_power_method); @@ -688,12 +747,12 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) radeon_pm_get_dynpm_state(rdev); radeon_pm_set_clocks(rdev); } + + queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); } mutex_unlock(&rdev->pm.mutex); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); - - queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, - msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); } /* @@ -712,6 +771,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); if (rdev->asic->get_memory_clock) seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); + if (rdev->pm.current_vddc) + seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); if (rdev->asic->get_pcie_lanes) seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 11ce94c668e7..b3ba44c0a818 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -424,7 +424,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3); - offset = *cmd << 10; + offset = *cmd3 << 10; if (radeon_check_and_fixup_offset (dev_priv, file_priv, &offset)) { DRM_ERROR("Invalid second packet offset\n"); @@ -2896,9 +2896,12 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, return rv; rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer, cmdbuf->bufsz); - if (rv) + if (rv) { + drm_buffer_free(cmdbuf->buffer); return rv; - } + } + } else + goto done; orig_nbox = cmdbuf->nbox; @@ -2906,8 +2909,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, int temp; temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); - if (cmdbuf->bufsz != 0) - drm_buffer_free(cmdbuf->buffer); + drm_buffer_free(cmdbuf->buffer); return temp; } @@ -3013,16 +3015,15 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, } } - if (cmdbuf->bufsz != 0) - drm_buffer_free(cmdbuf->buffer); + drm_buffer_free(cmdbuf->buffer); + done: DRM_DEBUG("DONE\n"); COMMIT_RING(); return 0; err: - if (cmdbuf->bufsz != 0) - drm_buffer_free(cmdbuf->buffer); + drm_buffer_free(cmdbuf->buffer); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen new file mode 100644 index 000000000000..f78fd592544d --- /dev/null +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen @@ -0,0 +1,611 @@ +evergreen 0x9400 +0x00008040 WAIT_UNTIL +0x00008044 WAIT_UNTIL_POLL_CNTL +0x00008048 WAIT_UNTIL_POLL_MASK +0x0000804c WAIT_UNTIL_POLL_REFDATA +0x000088B0 VGT_VTX_VECT_EJECT_REG +0x000088C4 VGT_CACHE_INVALIDATION +0x000088D4 VGT_GS_VERTEX_REUSE +0x00008958 VGT_PRIMITIVE_TYPE +0x0000895C VGT_INDEX_TYPE +0x00008970 VGT_NUM_INDICES +0x00008974 VGT_NUM_INSTANCES +0x00008990 VGT_COMPUTE_DIM_X +0x00008994 VGT_COMPUTE_DIM_Y +0x00008998 VGT_COMPUTE_DIM_Z +0x0000899C VGT_COMPUTE_START_X +0x000089A0 VGT_COMPUTE_START_Y +0x000089A4 VGT_COMPUTE_START_Z +0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE +0x00008A14 PA_CL_ENHANCE +0x00008A60 PA_SC_LINE_STIPPLE_VALUE +0x00008B10 PA_SC_LINE_STIPPLE_STATE +0x00008BF0 PA_SC_ENHANCE +0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ +0x00008C00 SQ_CONFIG +0x00008C04 SQ_GPR_RESOURCE_MGMT_1 +0x00008C08 SQ_GPR_RESOURCE_MGMT_2 +0x00008C0C SQ_GPR_RESOURCE_MGMT_3 +0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1 +0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2 +0x00008C18 SQ_THREAD_RESOURCE_MGMT +0x00008C1C SQ_THREAD_RESOURCE_MGMT_2 +0x00008C20 SQ_STACK_RESOURCE_MGMT_1 +0x00008C24 SQ_STACK_RESOURCE_MGMT_2 +0x00008C28 SQ_STACK_RESOURCE_MGMT_3 +0x00008DF8 SQ_CONST_MEM_BASE +0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS +0x00009100 SPI_CONFIG_CNTL +0x0000913C SPI_CONFIG_CNTL_1 +0x00009700 VC_CNTL +0x00009714 VC_ENHANCE +0x00009830 DB_DEBUG +0x00009834 DB_DEBUG2 +0x00009838 DB_DEBUG3 +0x0000983C DB_DEBUG4 +0x00009854 DB_WATERMARKS +0x0000A400 TD_PS_BORDER_COLOR_INDEX +0x0000A404 TD_PS_BORDER_COLOR_RED +0x0000A408 TD_PS_BORDER_COLOR_GREEN +0x0000A40C TD_PS_BORDER_COLOR_BLUE +0x0000A410 TD_PS_BORDER_COLOR_ALPHA +0x0000A414 TD_VS_BORDER_COLOR_INDEX +0x0000A418 TD_VS_BORDER_COLOR_RED +0x0000A41C TD_VS_BORDER_COLOR_GREEN +0x0000A420 TD_VS_BORDER_COLOR_BLUE +0x0000A424 TD_VS_BORDER_COLOR_ALPHA +0x0000A428 TD_GS_BORDER_COLOR_INDEX +0x0000A42C TD_GS_BORDER_COLOR_RED +0x0000A430 TD_GS_BORDER_COLOR_GREEN +0x0000A434 TD_GS_BORDER_COLOR_BLUE +0x0000A438 TD_GS_BORDER_COLOR_ALPHA +0x0000A43C TD_HS_BORDER_COLOR_INDEX +0x0000A440 TD_HS_BORDER_COLOR_RED +0x0000A444 TD_HS_BORDER_COLOR_GREEN +0x0000A448 TD_HS_BORDER_COLOR_BLUE +0x0000A44C TD_HS_BORDER_COLOR_ALPHA +0x0000A450 TD_LS_BORDER_COLOR_INDEX +0x0000A454 TD_LS_BORDER_COLOR_RED +0x0000A458 TD_LS_BORDER_COLOR_GREEN +0x0000A45C TD_LS_BORDER_COLOR_BLUE +0x0000A460 TD_LS_BORDER_COLOR_ALPHA +0x0000A464 TD_CS_BORDER_COLOR_INDEX +0x0000A468 TD_CS_BORDER_COLOR_RED +0x0000A46C TD_CS_BORDER_COLOR_GREEN +0x0000A470 TD_CS_BORDER_COLOR_BLUE +0x0000A474 TD_CS_BORDER_COLOR_ALPHA +0x00028000 DB_RENDER_CONTROL +0x00028004 DB_COUNT_CONTROL +0x0002800C DB_RENDER_OVERRIDE +0x00028010 DB_RENDER_OVERRIDE2 +0x00028028 DB_STENCIL_CLEAR +0x0002802C DB_DEPTH_CLEAR +0x00028030 PA_SC_SCREEN_SCISSOR_TL +0x00028034 PA_SC_SCREEN_SCISSOR_BR +0x0002805C DB_DEPTH_SLICE +0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 +0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 +0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2 +0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3 +0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4 +0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5 +0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6 +0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7 +0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8 +0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9 +0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10 +0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11 +0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12 +0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13 +0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14 +0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15 +0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0 +0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1 +0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2 +0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3 +0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4 +0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5 +0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6 +0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7 +0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8 +0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9 +0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10 +0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11 +0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12 +0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 +0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 +0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 +0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 +0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 +0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 +0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3 +0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4 +0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5 +0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6 +0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7 +0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8 +0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9 +0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10 +0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11 +0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12 +0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13 +0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14 +0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15 +0x00028200 PA_SC_WINDOW_OFFSET +0x00028204 PA_SC_WINDOW_SCISSOR_TL +0x00028208 PA_SC_WINDOW_SCISSOR_BR +0x0002820C PA_SC_CLIPRECT_RULE +0x00028210 PA_SC_CLIPRECT_0_TL +0x00028214 PA_SC_CLIPRECT_0_BR +0x00028218 PA_SC_CLIPRECT_1_TL +0x0002821C PA_SC_CLIPRECT_1_BR +0x00028220 PA_SC_CLIPRECT_2_TL +0x00028224 PA_SC_CLIPRECT_2_BR +0x00028228 PA_SC_CLIPRECT_3_TL +0x0002822C PA_SC_CLIPRECT_3_BR +0x00028230 PA_SC_EDGERULE +0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET +0x00028240 PA_SC_GENERIC_SCISSOR_TL +0x00028244 PA_SC_GENERIC_SCISSOR_BR +0x00028250 PA_SC_VPORT_SCISSOR_0_TL +0x00028254 PA_SC_VPORT_SCISSOR_0_BR +0x00028258 PA_SC_VPORT_SCISSOR_1_TL +0x0002825C PA_SC_VPORT_SCISSOR_1_BR +0x00028260 PA_SC_VPORT_SCISSOR_2_TL +0x00028264 PA_SC_VPORT_SCISSOR_2_BR +0x00028268 PA_SC_VPORT_SCISSOR_3_TL +0x0002826C PA_SC_VPORT_SCISSOR_3_BR +0x00028270 PA_SC_VPORT_SCISSOR_4_TL +0x00028274 PA_SC_VPORT_SCISSOR_4_BR +0x00028278 PA_SC_VPORT_SCISSOR_5_TL +0x0002827C PA_SC_VPORT_SCISSOR_5_BR +0x00028280 PA_SC_VPORT_SCISSOR_6_TL +0x00028284 PA_SC_VPORT_SCISSOR_6_BR +0x00028288 PA_SC_VPORT_SCISSOR_7_TL +0x0002828C PA_SC_VPORT_SCISSOR_7_BR +0x00028290 PA_SC_VPORT_SCISSOR_8_TL +0x00028294 PA_SC_VPORT_SCISSOR_8_BR +0x00028298 PA_SC_VPORT_SCISSOR_9_TL +0x0002829C PA_SC_VPORT_SCISSOR_9_BR +0x000282A0 PA_SC_VPORT_SCISSOR_10_TL +0x000282A4 PA_SC_VPORT_SCISSOR_10_BR +0x000282A8 PA_SC_VPORT_SCISSOR_11_TL +0x000282AC PA_SC_VPORT_SCISSOR_11_BR +0x000282B0 PA_SC_VPORT_SCISSOR_12_TL +0x000282B4 PA_SC_VPORT_SCISSOR_12_BR +0x000282B8 PA_SC_VPORT_SCISSOR_13_TL +0x000282BC PA_SC_VPORT_SCISSOR_13_BR +0x000282C0 PA_SC_VPORT_SCISSOR_14_TL +0x000282C4 PA_SC_VPORT_SCISSOR_14_BR +0x000282C8 PA_SC_VPORT_SCISSOR_15_TL +0x000282CC PA_SC_VPORT_SCISSOR_15_BR +0x000282D0 PA_SC_VPORT_ZMIN_0 +0x000282D4 PA_SC_VPORT_ZMAX_0 +0x000282D8 PA_SC_VPORT_ZMIN_1 +0x000282DC PA_SC_VPORT_ZMAX_1 +0x000282E0 PA_SC_VPORT_ZMIN_2 +0x000282E4 PA_SC_VPORT_ZMAX_2 +0x000282E8 PA_SC_VPORT_ZMIN_3 +0x000282EC PA_SC_VPORT_ZMAX_3 +0x000282F0 PA_SC_VPORT_ZMIN_4 +0x000282F4 PA_SC_VPORT_ZMAX_4 +0x000282F8 PA_SC_VPORT_ZMIN_5 +0x000282FC PA_SC_VPORT_ZMAX_5 +0x00028300 PA_SC_VPORT_ZMIN_6 +0x00028304 PA_SC_VPORT_ZMAX_6 +0x00028308 PA_SC_VPORT_ZMIN_7 +0x0002830C PA_SC_VPORT_ZMAX_7 +0x00028310 PA_SC_VPORT_ZMIN_8 +0x00028314 PA_SC_VPORT_ZMAX_8 +0x00028318 PA_SC_VPORT_ZMIN_9 +0x0002831C PA_SC_VPORT_ZMAX_9 +0x00028320 PA_SC_VPORT_ZMIN_10 +0x00028324 PA_SC_VPORT_ZMAX_10 +0x00028328 PA_SC_VPORT_ZMIN_11 +0x0002832C PA_SC_VPORT_ZMAX_11 +0x00028330 PA_SC_VPORT_ZMIN_12 +0x00028334 PA_SC_VPORT_ZMAX_12 +0x00028338 PA_SC_VPORT_ZMIN_13 +0x0002833C PA_SC_VPORT_ZMAX_13 +0x00028340 PA_SC_VPORT_ZMIN_14 +0x00028344 PA_SC_VPORT_ZMAX_14 +0x00028348 PA_SC_VPORT_ZMIN_15 +0x0002834C PA_SC_VPORT_ZMAX_15 +0x00028350 SX_MISC +0x00028380 SQ_VTX_SEMANTIC_0 +0x00028384 SQ_VTX_SEMANTIC_1 +0x00028388 SQ_VTX_SEMANTIC_2 +0x0002838C SQ_VTX_SEMANTIC_3 +0x00028390 SQ_VTX_SEMANTIC_4 +0x00028394 SQ_VTX_SEMANTIC_5 +0x00028398 SQ_VTX_SEMANTIC_6 +0x0002839C SQ_VTX_SEMANTIC_7 +0x000283A0 SQ_VTX_SEMANTIC_8 +0x000283A4 SQ_VTX_SEMANTIC_9 +0x000283A8 SQ_VTX_SEMANTIC_10 +0x000283AC SQ_VTX_SEMANTIC_11 +0x000283B0 SQ_VTX_SEMANTIC_12 +0x000283B4 SQ_VTX_SEMANTIC_13 +0x000283B8 SQ_VTX_SEMANTIC_14 +0x000283BC SQ_VTX_SEMANTIC_15 +0x000283C0 SQ_VTX_SEMANTIC_16 +0x000283C4 SQ_VTX_SEMANTIC_17 +0x000283C8 SQ_VTX_SEMANTIC_18 +0x000283CC SQ_VTX_SEMANTIC_19 +0x000283D0 SQ_VTX_SEMANTIC_20 +0x000283D4 SQ_VTX_SEMANTIC_21 +0x000283D8 SQ_VTX_SEMANTIC_22 +0x000283DC SQ_VTX_SEMANTIC_23 +0x000283E0 SQ_VTX_SEMANTIC_24 +0x000283E4 SQ_VTX_SEMANTIC_25 +0x000283E8 SQ_VTX_SEMANTIC_26 +0x000283EC SQ_VTX_SEMANTIC_27 +0x000283F0 SQ_VTX_SEMANTIC_28 +0x000283F4 SQ_VTX_SEMANTIC_29 +0x000283F8 SQ_VTX_SEMANTIC_30 +0x000283FC SQ_VTX_SEMANTIC_31 +0x00028400 VGT_MAX_VTX_INDX +0x00028404 VGT_MIN_VTX_INDX +0x00028408 VGT_INDX_OFFSET +0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX +0x00028410 SX_ALPHA_TEST_CONTROL +0x00028414 CB_BLEND_RED +0x00028418 CB_BLEND_GREEN +0x0002841C CB_BLEND_BLUE +0x00028420 CB_BLEND_ALPHA +0x00028430 DB_STENCILREFMASK +0x00028434 DB_STENCILREFMASK_BF +0x00028438 SX_ALPHA_REF +0x0002843C PA_CL_VPORT_XSCALE_0 +0x00028440 PA_CL_VPORT_XOFFSET_0 +0x00028444 PA_CL_VPORT_YSCALE_0 +0x00028448 PA_CL_VPORT_YOFFSET_0 +0x0002844C PA_CL_VPORT_ZSCALE_0 +0x00028450 PA_CL_VPORT_ZOFFSET_0 +0x00028454 PA_CL_VPORT_XSCALE_1 +0x00028458 PA_CL_VPORT_XOFFSET_1 +0x0002845C PA_CL_VPORT_YSCALE_1 +0x00028460 PA_CL_VPORT_YOFFSET_1 +0x00028464 PA_CL_VPORT_ZSCALE_1 +0x00028468 PA_CL_VPORT_ZOFFSET_1 +0x0002846C PA_CL_VPORT_XSCALE_2 +0x00028470 PA_CL_VPORT_XOFFSET_2 +0x00028474 PA_CL_VPORT_YSCALE_2 +0x00028478 PA_CL_VPORT_YOFFSET_2 +0x0002847C PA_CL_VPORT_ZSCALE_2 +0x00028480 PA_CL_VPORT_ZOFFSET_2 +0x00028484 PA_CL_VPORT_XSCALE_3 +0x00028488 PA_CL_VPORT_XOFFSET_3 +0x0002848C PA_CL_VPORT_YSCALE_3 +0x00028490 PA_CL_VPORT_YOFFSET_3 +0x00028494 PA_CL_VPORT_ZSCALE_3 +0x00028498 PA_CL_VPORT_ZOFFSET_3 +0x0002849C PA_CL_VPORT_XSCALE_4 +0x000284A0 PA_CL_VPORT_XOFFSET_4 +0x000284A4 PA_CL_VPORT_YSCALE_4 +0x000284A8 PA_CL_VPORT_YOFFSET_4 +0x000284AC PA_CL_VPORT_ZSCALE_4 +0x000284B0 PA_CL_VPORT_ZOFFSET_4 +0x000284B4 PA_CL_VPORT_XSCALE_5 +0x000284B8 PA_CL_VPORT_XOFFSET_5 +0x000284BC PA_CL_VPORT_YSCALE_5 +0x000284C0 PA_CL_VPORT_YOFFSET_5 +0x000284C4 PA_CL_VPORT_ZSCALE_5 +0x000284C8 PA_CL_VPORT_ZOFFSET_5 +0x000284CC PA_CL_VPORT_XSCALE_6 +0x000284D0 PA_CL_VPORT_XOFFSET_6 +0x000284D4 PA_CL_VPORT_YSCALE_6 +0x000284D8 PA_CL_VPORT_YOFFSET_6 +0x000284DC PA_CL_VPORT_ZSCALE_6 +0x000284E0 PA_CL_VPORT_ZOFFSET_6 +0x000284E4 PA_CL_VPORT_XSCALE_7 +0x000284E8 PA_CL_VPORT_XOFFSET_7 +0x000284EC PA_CL_VPORT_YSCALE_7 +0x000284F0 PA_CL_VPORT_YOFFSET_7 +0x000284F4 PA_CL_VPORT_ZSCALE_7 +0x000284F8 PA_CL_VPORT_ZOFFSET_7 +0x000284FC PA_CL_VPORT_XSCALE_8 +0x00028500 PA_CL_VPORT_XOFFSET_8 +0x00028504 PA_CL_VPORT_YSCALE_8 +0x00028508 PA_CL_VPORT_YOFFSET_8 +0x0002850C PA_CL_VPORT_ZSCALE_8 +0x00028510 PA_CL_VPORT_ZOFFSET_8 +0x00028514 PA_CL_VPORT_XSCALE_9 +0x00028518 PA_CL_VPORT_XOFFSET_9 +0x0002851C PA_CL_VPORT_YSCALE_9 +0x00028520 PA_CL_VPORT_YOFFSET_9 +0x00028524 PA_CL_VPORT_ZSCALE_9 +0x00028528 PA_CL_VPORT_ZOFFSET_9 +0x0002852C PA_CL_VPORT_XSCALE_10 +0x00028530 PA_CL_VPORT_XOFFSET_10 +0x00028534 PA_CL_VPORT_YSCALE_10 +0x00028538 PA_CL_VPORT_YOFFSET_10 +0x0002853C PA_CL_VPORT_ZSCALE_10 +0x00028540 PA_CL_VPORT_ZOFFSET_10 +0x00028544 PA_CL_VPORT_XSCALE_11 +0x00028548 PA_CL_VPORT_XOFFSET_11 +0x0002854C PA_CL_VPORT_YSCALE_11 +0x00028550 PA_CL_VPORT_YOFFSET_11 +0x00028554 PA_CL_VPORT_ZSCALE_11 +0x00028558 PA_CL_VPORT_ZOFFSET_11 +0x0002855C PA_CL_VPORT_XSCALE_12 +0x00028560 PA_CL_VPORT_XOFFSET_12 +0x00028564 PA_CL_VPORT_YSCALE_12 +0x00028568 PA_CL_VPORT_YOFFSET_12 +0x0002856C PA_CL_VPORT_ZSCALE_12 +0x00028570 PA_CL_VPORT_ZOFFSET_12 +0x00028574 PA_CL_VPORT_XSCALE_13 +0x00028578 PA_CL_VPORT_XOFFSET_13 +0x0002857C PA_CL_VPORT_YSCALE_13 +0x00028580 PA_CL_VPORT_YOFFSET_13 +0x00028584 PA_CL_VPORT_ZSCALE_13 +0x00028588 PA_CL_VPORT_ZOFFSET_13 +0x0002858C PA_CL_VPORT_XSCALE_14 +0x00028590 PA_CL_VPORT_XOFFSET_14 +0x00028594 PA_CL_VPORT_YSCALE_14 +0x00028598 PA_CL_VPORT_YOFFSET_14 +0x0002859C PA_CL_VPORT_ZSCALE_14 +0x000285A0 PA_CL_VPORT_ZOFFSET_14 +0x000285A4 PA_CL_VPORT_XSCALE_15 +0x000285A8 PA_CL_VPORT_XOFFSET_15 +0x000285AC PA_CL_VPORT_YSCALE_15 +0x000285B0 PA_CL_VPORT_YOFFSET_15 +0x000285B4 PA_CL_VPORT_ZSCALE_15 +0x000285B8 PA_CL_VPORT_ZOFFSET_15 +0x000285BC PA_CL_UCP_0_X +0x000285C0 PA_CL_UCP_0_Y +0x000285C4 PA_CL_UCP_0_Z +0x000285C8 PA_CL_UCP_0_W +0x000285CC PA_CL_UCP_1_X +0x000285D0 PA_CL_UCP_1_Y +0x000285D4 PA_CL_UCP_1_Z +0x000285D8 PA_CL_UCP_1_W +0x000285DC PA_CL_UCP_2_X +0x000285E0 PA_CL_UCP_2_Y +0x000285E4 PA_CL_UCP_2_Z +0x000285E8 PA_CL_UCP_2_W +0x000285EC PA_CL_UCP_3_X +0x000285F0 PA_CL_UCP_3_Y +0x000285F4 PA_CL_UCP_3_Z +0x000285F8 PA_CL_UCP_3_W +0x000285FC PA_CL_UCP_4_X +0x00028600 PA_CL_UCP_4_Y +0x00028604 PA_CL_UCP_4_Z +0x00028608 PA_CL_UCP_4_W +0x0002860C PA_CL_UCP_5_X +0x00028610 PA_CL_UCP_5_Y +0x00028614 PA_CL_UCP_5_Z +0x00028618 PA_CL_UCP_5_W +0x0002861C SPI_VS_OUT_ID_0 +0x00028620 SPI_VS_OUT_ID_1 +0x00028624 SPI_VS_OUT_ID_2 +0x00028628 SPI_VS_OUT_ID_3 +0x0002862C SPI_VS_OUT_ID_4 +0x00028630 SPI_VS_OUT_ID_5 +0x00028634 SPI_VS_OUT_ID_6 +0x00028638 SPI_VS_OUT_ID_7 +0x0002863C SPI_VS_OUT_ID_8 +0x00028640 SPI_VS_OUT_ID_9 +0x00028644 SPI_PS_INPUT_CNTL_0 +0x00028648 SPI_PS_INPUT_CNTL_1 +0x0002864C SPI_PS_INPUT_CNTL_2 +0x00028650 SPI_PS_INPUT_CNTL_3 +0x00028654 SPI_PS_INPUT_CNTL_4 +0x00028658 SPI_PS_INPUT_CNTL_5 +0x0002865C SPI_PS_INPUT_CNTL_6 +0x00028660 SPI_PS_INPUT_CNTL_7 +0x00028664 SPI_PS_INPUT_CNTL_8 +0x00028668 SPI_PS_INPUT_CNTL_9 +0x0002866C SPI_PS_INPUT_CNTL_10 +0x00028670 SPI_PS_INPUT_CNTL_11 +0x00028674 SPI_PS_INPUT_CNTL_12 +0x00028678 SPI_PS_INPUT_CNTL_13 +0x0002867C SPI_PS_INPUT_CNTL_14 +0x00028680 SPI_PS_INPUT_CNTL_15 +0x00028684 SPI_PS_INPUT_CNTL_16 +0x00028688 SPI_PS_INPUT_CNTL_17 +0x0002868C SPI_PS_INPUT_CNTL_18 +0x00028690 SPI_PS_INPUT_CNTL_19 +0x00028694 SPI_PS_INPUT_CNTL_20 +0x00028698 SPI_PS_INPUT_CNTL_21 +0x0002869C SPI_PS_INPUT_CNTL_22 +0x000286A0 SPI_PS_INPUT_CNTL_23 +0x000286A4 SPI_PS_INPUT_CNTL_24 +0x000286A8 SPI_PS_INPUT_CNTL_25 +0x000286AC SPI_PS_INPUT_CNTL_26 +0x000286B0 SPI_PS_INPUT_CNTL_27 +0x000286B4 SPI_PS_INPUT_CNTL_28 +0x000286B8 SPI_PS_INPUT_CNTL_29 +0x000286BC SPI_PS_INPUT_CNTL_30 +0x000286C0 SPI_PS_INPUT_CNTL_31 +0x000286C4 SPI_VS_OUT_CONFIG +0x000286C8 SPI_THREAD_GROUPING +0x000286CC SPI_PS_IN_CONTROL_0 +0x000286D0 SPI_PS_IN_CONTROL_1 +0x000286D4 SPI_INTERP_CONTROL_0 +0x000286D8 SPI_INPUT_Z +0x000286DC SPI_FOG_CNTL +0x000286E0 SPI_BARYC_CNTL +0x000286E4 SPI_PS_IN_CONTROL_2 +0x000286E8 SPI_COMPUTE_INPUT_CNTL +0x000286EC SPI_COMPUTE_NUM_THREAD_X +0x000286F0 SPI_COMPUTE_NUM_THREAD_Y +0x000286F4 SPI_COMPUTE_NUM_THREAD_Z +0x000286F8 GDS_ADDR_SIZE +0x00028780 CB_BLEND0_CONTROL +0x00028784 CB_BLEND1_CONTROL +0x00028788 CB_BLEND2_CONTROL +0x0002878C CB_BLEND3_CONTROL +0x00028790 CB_BLEND4_CONTROL +0x00028794 CB_BLEND5_CONTROL +0x00028798 CB_BLEND6_CONTROL +0x0002879C CB_BLEND7_CONTROL +0x000287CC CS_COPY_STATE +0x000287D0 GFX_COPY_STATE +0x000287D4 PA_CL_POINT_X_RAD +0x000287D8 PA_CL_POINT_Y_RAD +0x000287DC PA_CL_POINT_SIZE +0x000287E0 PA_CL_POINT_CULL_RAD +0x00028808 CB_COLOR_CONTROL +0x0002880C DB_SHADER_CONTROL +0x00028810 PA_CL_CLIP_CNTL +0x00028814 PA_SU_SC_MODE_CNTL +0x00028818 PA_CL_VTE_CNTL +0x0002881C PA_CL_VS_OUT_CNTL +0x00028820 PA_CL_NANINF_CNTL +0x00028824 PA_SU_LINE_STIPPLE_CNTL +0x00028828 PA_SU_LINE_STIPPLE_SCALE +0x0002882C PA_SU_PRIM_FILTER_CNTL +0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1 +0x00028844 SQ_PGM_RESOURCES_PS +0x00028848 SQ_PGM_RESOURCES_2_PS +0x0002884C SQ_PGM_EXPORTS_PS +0x00028860 SQ_PGM_RESOURCES_VS +0x00028864 SQ_PGM_RESOURCES_2_VS +0x00028878 SQ_PGM_RESOURCES_GS +0x0002887C SQ_PGM_RESOURCES_2_GS +0x00028890 SQ_PGM_RESOURCES_ES +0x00028894 SQ_PGM_RESOURCES_2_ES +0x000288A8 SQ_PGM_RESOURCES_FS +0x000288BC SQ_PGM_RESOURCES_HS +0x000288C0 SQ_PGM_RESOURCES_2_HS +0x000288D4 SQ_PGM_RESOURCES_LS +0x000288D8 SQ_PGM_RESOURCES_2_LS +0x000288E8 SQ_LDS_ALLOC +0x000288EC SQ_LDS_ALLOC_PS +0x000288F0 SQ_VTX_SEMANTIC_CLEAR +0x00028A00 PA_SU_POINT_SIZE +0x00028A04 PA_SU_POINT_MINMAX +0x00028A08 PA_SU_LINE_CNTL +0x00028A0C PA_SC_LINE_STIPPLE +0x00028A10 VGT_OUTPUT_PATH_CNTL +0x00028A14 VGT_HOS_CNTL +0x00028A18 VGT_HOS_MAX_TESS_LEVEL +0x00028A1C VGT_HOS_MIN_TESS_LEVEL +0x00028A20 VGT_HOS_REUSE_DEPTH +0x00028A24 VGT_GROUP_PRIM_TYPE +0x00028A28 VGT_GROUP_FIRST_DECR +0x00028A2C VGT_GROUP_DECR +0x00028A30 VGT_GROUP_VECT_0_CNTL +0x00028A34 VGT_GROUP_VECT_1_CNTL +0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL +0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL +0x00028A40 VGT_GS_MODE +0x00028A48 PA_SC_MODE_CNTL_0 +0x00028A4C PA_SC_MODE_CNTL_1 +0x00028A50 VGT_ENHANCE +0x00028A54 VGT_GS_PER_ES +0x00028A58 VGT_ES_PER_GS +0x00028A5C VGT_GS_PER_VS +0x00028A6C VGT_GS_OUT_PRIM_TYPE +0x00028A84 VGT_PRIMITIVEID_EN +0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN +0x00028AA0 VGT_INSTANCE_STEP_RATE_0 +0x00028AA4 VGT_INSTANCE_STEP_RATE_1 +0x00028AB4 VGT_REUSE_OFF +0x00028AB8 VGT_VTX_CNT_EN +0x00028ABC DB_HTILE_SURFACE +0x00028AC0 DB_SRESULTS_COMPARE_STATE0 +0x00028AC4 DB_SRESULTS_COMPARE_STATE1 +0x00028AC8 DB_PRELOAD_CONTROL +0x00028B38 VGT_GS_MAX_VERT_OUT +0x00028B54 VGT_SHADER_STAGES_EN +0x00028B58 VGT_LS_HS_CONFIG +0x00028B5C VGT_LS_SIZE +0x00028B60 VGT_HS_SIZE +0x00028B64 VGT_LS_HS_ALLOC +0x00028B68 VGT_HS_PATCH_CONST +0x00028B6C VGT_TF_PARAM +0x00028B70 DB_ALPHA_TO_MASK +0x00028B74 VGT_DISPATCH_INITIATOR +0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL +0x00028B7C PA_SU_POLY_OFFSET_CLAMP +0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE +0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET +0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE +0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET +0x00028B74 VGT_GS_INSTANCE_CNT +0x00028C00 PA_SC_LINE_CNTL +0x00028C08 PA_SU_VTX_CNTL +0x00028C0C PA_CL_GB_VERT_CLIP_ADJ +0x00028C10 PA_CL_GB_VERT_DISC_ADJ +0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ +0x00028C18 PA_CL_GB_HORZ_DISC_ADJ +0x00028C1C PA_SC_AA_SAMPLE_LOCS_0 +0x00028C20 PA_SC_AA_SAMPLE_LOCS_1 +0x00028C24 PA_SC_AA_SAMPLE_LOCS_2 +0x00028C28 PA_SC_AA_SAMPLE_LOCS_3 +0x00028C2C PA_SC_AA_SAMPLE_LOCS_4 +0x00028C30 PA_SC_AA_SAMPLE_LOCS_5 +0x00028C34 PA_SC_AA_SAMPLE_LOCS_6 +0x00028C38 PA_SC_AA_SAMPLE_LOCS_7 +0x00028C3C PA_SC_AA_MASK +0x00028C8C CB_COLOR0_CLEAR_WORD0 +0x00028C90 CB_COLOR0_CLEAR_WORD1 +0x00028C94 CB_COLOR0_CLEAR_WORD2 +0x00028C98 CB_COLOR0_CLEAR_WORD3 +0x00028CC8 CB_COLOR1_CLEAR_WORD0 +0x00028CCC CB_COLOR1_CLEAR_WORD1 +0x00028CD0 CB_COLOR1_CLEAR_WORD2 +0x00028CD4 CB_COLOR1_CLEAR_WORD3 +0x00028D04 CB_COLOR2_CLEAR_WORD0 +0x00028D08 CB_COLOR2_CLEAR_WORD1 +0x00028D0C CB_COLOR2_CLEAR_WORD2 +0x00028D10 CB_COLOR2_CLEAR_WORD3 +0x00028D40 CB_COLOR3_CLEAR_WORD0 +0x00028D44 CB_COLOR3_CLEAR_WORD1 +0x00028D48 CB_COLOR3_CLEAR_WORD2 +0x00028D4C CB_COLOR3_CLEAR_WORD3 +0x00028D7C CB_COLOR4_CLEAR_WORD0 +0x00028D80 CB_COLOR4_CLEAR_WORD1 +0x00028D84 CB_COLOR4_CLEAR_WORD2 +0x00028D88 CB_COLOR4_CLEAR_WORD3 +0x00028DB8 CB_COLOR5_CLEAR_WORD0 +0x00028DBC CB_COLOR5_CLEAR_WORD1 +0x00028DC0 CB_COLOR5_CLEAR_WORD2 +0x00028DC4 CB_COLOR5_CLEAR_WORD3 +0x00028DF4 CB_COLOR6_CLEAR_WORD0 +0x00028DF8 CB_COLOR6_CLEAR_WORD1 +0x00028DFC CB_COLOR6_CLEAR_WORD2 +0x00028E00 CB_COLOR6_CLEAR_WORD3 +0x00028E30 CB_COLOR7_CLEAR_WORD0 +0x00028E34 CB_COLOR7_CLEAR_WORD1 +0x00028E38 CB_COLOR7_CLEAR_WORD2 +0x00028E3C CB_COLOR7_CLEAR_WORD3 +0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0 +0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1 +0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2 +0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3 +0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4 +0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5 +0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6 +0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7 +0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8 +0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9 +0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10 +0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11 +0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12 +0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13 +0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14 +0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15 +0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0 +0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1 +0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2 +0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3 +0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4 +0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5 +0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6 +0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7 +0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8 +0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9 +0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10 +0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11 +0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12 +0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13 +0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14 +0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15 +0x0003CFF0 SQ_VTX_BASE_VTX_LOC +0x0003CFF4 SQ_VTX_START_INST_LOC +0x0003FF00 SQ_TEX_SAMPLER_CLEAR +0x0003FF04 SQ_TEX_RESOURCE_CLEAR +0x0003FF08 SQ_LOOP_BOOL_CLEAR diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 340c7611f2ac..5ce3ccc7a423 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -74,7 +74,8 @@ void rs600_pm_misc(struct radeon_device *rdev) if (voltage->delay) udelay(voltage->delay); } - } + } else if (voltage->type == VOLTAGE_VDDC) + radeon_atom_set_voltage(rdev, voltage->vddc_id); dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index a18ba98885f3..5fea094ed8cb 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -79,7 +79,13 @@ void rs690_pm_info(struct radeon_device *rdev) tmp.full = dfixed_const(100); rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); - rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); + if (info->info.usK8MemoryClock) + rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); + else if (rdev->clock.default_mclk) { + rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); + rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); + } else + rdev->pm.igp_system_mclk.full = dfixed_const(400); rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); break; @@ -87,34 +93,31 @@ void rs690_pm_info(struct radeon_device *rdev) tmp.full = dfixed_const(100); rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); - rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); + if (info->info_v2.ulBootUpUMAClock) + rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); + else if (rdev->clock.default_mclk) + rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); + else + rdev->pm.igp_system_mclk.full = dfixed_const(66700); rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); break; default: - tmp.full = dfixed_const(100); /* We assume the slower possible clock ie worst case */ - /* DDR 333Mhz */ - rdev->pm.igp_sideport_mclk.full = dfixed_const(333); - /* FIXME: system clock ? */ - rdev->pm.igp_system_mclk.full = dfixed_const(100); - rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); - rdev->pm.igp_ht_link_clk.full = dfixed_const(200); + rdev->pm.igp_sideport_mclk.full = dfixed_const(200); + rdev->pm.igp_system_mclk.full = dfixed_const(200); + rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); rdev->pm.igp_ht_link_width.full = dfixed_const(8); DRM_ERROR("No integrated system info for your GPU, using safe default\n"); break; } } else { - tmp.full = dfixed_const(100); /* We assume the slower possible clock ie worst case */ - /* DDR 333Mhz */ - rdev->pm.igp_sideport_mclk.full = dfixed_const(333); - /* FIXME: system clock ? */ - rdev->pm.igp_system_mclk.full = dfixed_const(100); - rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); - rdev->pm.igp_ht_link_clk.full = dfixed_const(200); + rdev->pm.igp_sideport_mclk.full = dfixed_const(200); + rdev->pm.igp_system_mclk.full = dfixed_const(200); + rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); rdev->pm.igp_ht_link_width.full = dfixed_const(8); DRM_ERROR("No integrated system info for your GPU, using safe default\n"); } @@ -228,10 +231,6 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, fixed20_12 a, b, c; fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; - /* FIXME: detect IGP with sideport memory, i don't think there is any - * such product available - */ - bool sideport = false; if (!crtc->base.enabled) { /* FIXME: wouldn't it better to set priority mark to maximum */ @@ -300,7 +299,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, /* Maximun bandwidth is the minimun bandwidth of all component */ rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; - if (sideport) { + if (rdev->mc.igp_sideport_enabled) { if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && rdev->pm.sideport_bandwidth.full) rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 5c7f0b97c6aa..6a7bf1091971 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -44,7 +44,18 @@ void rv770_fini(struct radeon_device *rdev); void rv770_pm_misc(struct radeon_device *rdev) { - + int req_ps_idx = rdev->pm.requested_power_state_index; + int req_cm_idx = rdev->pm.requested_clock_mode_index; + struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; + struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; + + if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { + if (voltage->voltage != rdev->pm.current_vddc) { + radeon_atom_set_voltage(rdev, voltage->voltage); + rdev->pm.current_vddc = voltage->voltage; + DRM_DEBUG("Setting: v: %d\n", voltage->voltage); + } + } } /* @@ -213,7 +224,7 @@ static void rv770_mc_program(struct radeon_device *rdev) WREG32(MC_VM_FB_LOCATION, tmp); WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); WREG32(HDP_NONSURFACE_INFO, (2 << 7)); - WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); + WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); if (rdev->flags & RADEON_IS_AGP) { WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); |