diff options
author | Dave Airlie <airlied@redhat.com> | 2010-04-20 13:16:50 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-04-20 13:16:50 +1000 |
commit | 153549b8b63d71a9c5d8cbde887097b995c32bd6 (patch) | |
tree | 306ece7479bb98172272013dd02b243e6677a3f4 /drivers | |
parent | 7fff400be6fbf64f10abca9939718aaf1d61c255 (diff) | |
parent | a7433742d62c6e0e1173bd144a4aef7724b48d60 (diff) | |
download | blackbird-op-linux-153549b8b63d71a9c5d8cbde887097b995c32bd6.tar.gz blackbird-op-linux-153549b8b63d71a9c5d8cbde887097b995c32bd6.zip |
Merge branch 'drm-radeon-evergreen-accel' into drm-core-next
* drm-radeon-evergreen-accel:
drm/radeon: fix cypress firmware typo.
drm/radeon/kms/evergreen: add hpd support
drm/radeon/kms/evergreen: implement irq support
drm/radeon/kms/evergreen: setup and enable the CP
drm/radeon/kms/evergreen: implement gfx init
drm/radeon/kms/evergreen: add soft reset function
drm/radeon/kms/evergreen: add gart support
drm/radeon/kms: add support for evergreen power tables
drm/radeon/kms: update atombios.h power tables for evergreen
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/radeon/atombios.h | 76 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/atombios_crtc.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 1483 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/evergreend.h | 556 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 71 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_asic.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_asic.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_atombios.c | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rv770.c | 10 |
10 files changed, 2218 insertions, 84 deletions
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 6732b5dd8ff4..26986c8e1f45 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -5742,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER #define ATOM_PP_THERMALCONTROLLER_RV6xx 7 #define ATOM_PP_THERMALCONTROLLER_RV770 8 #define ATOM_PP_THERMALCONTROLLER_ADT7473 9 +#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 +#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 +#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller typedef struct _ATOM_PPLIB_STATE { @@ -5749,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE UCHAR ucClockStateIndices[1]; // variable-sized } ATOM_PPLIB_STATE; +typedef struct _ATOM_PPLIB_FANTABLE +{ + UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same. + UCHAR ucTHyst; // Temperature hysteresis. Integer. + USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. + USHORT usTMed; // The middle temperature where we change slopes. + USHORT usTHigh; // The high point above TMed for adjusting the second slope. + USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments). + USHORT usPWMMed; // The PWM value (in percent) at TMed. + USHORT usPWMHigh; // The PWM value at THigh. +} ATOM_PPLIB_FANTABLE; + +typedef struct _ATOM_PPLIB_EXTENDEDHEADER +{ + USHORT usSize; + ULONG ulMaxEngineClock; // For Overdrive. + ULONG ulMaxMemoryClock; // For Overdrive. + // Add extra system parameters here, always adjust size to include all fields. +} ATOM_PPLIB_EXTENDEDHEADER; + //// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps #define ATOM_PP_PLATFORM_CAP_BACKBIAS 1 #define ATOM_PP_PLATFORM_CAP_POWERPLAY 2 @@ -5762,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE #define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512 #define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024 #define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048 +#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096 +#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition. +#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition). +#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC. +#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature. +#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state. typedef struct _ATOM_PPLIB_POWERPLAYTABLE { @@ -5797,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE } ATOM_PPLIB_POWERPLAYTABLE; +typedef struct _ATOM_PPLIB_POWERPLAYTABLE2 +{ + ATOM_PPLIB_POWERPLAYTABLE basicTable; + UCHAR ucNumCustomThermalPolicy; + USHORT usCustomThermalPolicyArrayOffset; +}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2; + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 +{ + ATOM_PPLIB_POWERPLAYTABLE2 basicTable2; + USHORT usFormatID; // To be used ONLY by PPGen. + USHORT usFanTableOffset; + USHORT usExtendendedHeaderOffset; +} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; + //// ATOM_PPLIB_NONCLOCK_INFO::usClassification #define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 @@ -5816,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE #define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400 #define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800 #define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 -// remaining 3 bits are reserved +#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000 +#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 +#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 @@ -5840,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE #define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000 #define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000 +#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 #define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000 -#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 +//memory related flags +#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000 + +//M3 Arb //2bits, current 3 sets of parameters in total +#define ATOM_PPLIB_M3ARB_MASK 0x00060000 +#define ATOM_PPLIB_M3ARB_SHIFT 17 // Contained in an array starting at the offset // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. @@ -5860,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO // Contained in an array starting at the offset // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. // referenced from ATOM_PPLIB_STATE::ucClockStateIndices +#define ATOM_PPLIB_NONCLOCKINFO_VER1 12 +#define ATOM_PPLIB_NONCLOCKINFO_VER2 24 + typedef struct _ATOM_PPLIB_R600_CLOCK_INFO { USHORT usEngineClockLow; @@ -5882,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO #define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4 #define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8 #define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16 +#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0). + +typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO +{ + USHORT usEngineClockLow; + UCHAR ucEngineClockHigh; + + USHORT usMemoryClockLow; + UCHAR ucMemoryClockHigh; + + USHORT usVDDC; + USHORT usVDDCI; + USHORT usUnused; + + ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* + +} ATOM_PPLIB_EVERGREEN_CLOCK_INFO; typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a87990b3ae84..3feca6aec4c4 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -249,17 +249,13 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) if (ASIC_IS_DCE3(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); atombios_blank_crtc(crtc, ATOM_DISABLE); - /* XXX re-enable when interrupt support is added */ - if (!ASIC_IS_DCE4(rdev)) - drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - /* XXX re-enable when interrupt support is added */ - if (!ASIC_IS_DCE4(rdev)) - drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); + drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); atombios_blank_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index bd1bb9fb2b2a..3295154e5934 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -28,39 +28,194 @@ #include "radeon.h" #include "radeon_asic.h" #include "radeon_drm.h" -#include "rv770d.h" +#include "evergreend.h" #include "atom.h" #include "avivod.h" #include "evergreen_reg.h" +#define EVERGREEN_PFP_UCODE_SIZE 1120 +#define EVERGREEN_PM4_UCODE_SIZE 1376 + static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) { bool connected = false; - /* XXX */ + + switch (hpd) { + case RADEON_HPD_1: + if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_2: + if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_3: + if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_4: + if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_5: + if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_6: + if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + default: + break; + } + return connected; } void evergreen_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd) { - /* XXX */ + u32 tmp; + bool connected = evergreen_hpd_sense(rdev, hpd); + + switch (hpd) { + case RADEON_HPD_1: + tmp = RREG32(DC_HPD1_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD1_INT_CONTROL, tmp); + break; + case RADEON_HPD_2: + tmp = RREG32(DC_HPD2_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD2_INT_CONTROL, tmp); + break; + case RADEON_HPD_3: + tmp = RREG32(DC_HPD3_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD3_INT_CONTROL, tmp); + break; + case RADEON_HPD_4: + tmp = RREG32(DC_HPD4_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD4_INT_CONTROL, tmp); + break; + case RADEON_HPD_5: + tmp = RREG32(DC_HPD5_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD5_INT_CONTROL, tmp); + break; + case RADEON_HPD_6: + tmp = RREG32(DC_HPD6_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD6_INT_CONTROL, tmp); + break; + default: + break; + } } void evergreen_hpd_init(struct radeon_device *rdev) { - /* XXX */ + struct drm_device *dev = rdev->ddev; + struct drm_connector *connector; + u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | + DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + switch (radeon_connector->hpd.hpd) { + case RADEON_HPD_1: + WREG32(DC_HPD1_CONTROL, tmp); + rdev->irq.hpd[0] = true; + break; + case RADEON_HPD_2: + WREG32(DC_HPD2_CONTROL, tmp); + rdev->irq.hpd[1] = true; + break; + case RADEON_HPD_3: + WREG32(DC_HPD3_CONTROL, tmp); + rdev->irq.hpd[2] = true; + break; + case RADEON_HPD_4: + WREG32(DC_HPD4_CONTROL, tmp); + rdev->irq.hpd[3] = true; + break; + case RADEON_HPD_5: + WREG32(DC_HPD5_CONTROL, tmp); + rdev->irq.hpd[4] = true; + break; + case RADEON_HPD_6: + WREG32(DC_HPD6_CONTROL, tmp); + rdev->irq.hpd[5] = true; + break; + default: + break; + } + } + if (rdev->irq.installed) + evergreen_irq_set(rdev); } - -void evergreen_bandwidth_update(struct radeon_device *rdev) +void evergreen_hpd_fini(struct radeon_device *rdev) { - /* XXX */ + struct drm_device *dev = rdev->ddev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + switch (radeon_connector->hpd.hpd) { + case RADEON_HPD_1: + WREG32(DC_HPD1_CONTROL, 0); + rdev->irq.hpd[0] = false; + break; + case RADEON_HPD_2: + WREG32(DC_HPD2_CONTROL, 0); + rdev->irq.hpd[1] = false; + break; + case RADEON_HPD_3: + WREG32(DC_HPD3_CONTROL, 0); + rdev->irq.hpd[2] = false; + break; + case RADEON_HPD_4: + WREG32(DC_HPD4_CONTROL, 0); + rdev->irq.hpd[3] = false; + break; + case RADEON_HPD_5: + WREG32(DC_HPD5_CONTROL, 0); + rdev->irq.hpd[4] = false; + break; + case RADEON_HPD_6: + WREG32(DC_HPD6_CONTROL, 0); + rdev->irq.hpd[5] = false; + break; + default: + break; + } + } } -void evergreen_hpd_fini(struct radeon_device *rdev) +void evergreen_bandwidth_update(struct radeon_device *rdev) { /* XXX */ } @@ -83,10 +238,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev) /* * GART */ +void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev) +{ + unsigned i; + u32 tmp; + + WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); + for (i = 0; i < rdev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); + tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; + if (tmp == 2) { + printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); + return; + } + if (tmp) { + return; + } + udelay(1); + } +} + int evergreen_pcie_gart_enable(struct radeon_device *rdev) { u32 tmp; - int r, i; + int r; if (rdev->gart.table.vram.robj == NULL) { dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); @@ -121,10 +297,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); - for (i = 1; i < 7; i++) - WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); + WREG32(VM_CONTEXT1_CNTL, 0); - r600_pcie_gart_tlb_flush(rdev); + evergreen_pcie_gart_tlb_flush(rdev); rdev->gart.ready = true; return 0; } @@ -132,11 +307,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) void evergreen_pcie_gart_disable(struct radeon_device *rdev) { u32 tmp; - int i, r; + int r; /* Disable all tables */ - for (i = 0; i < 7; i++) - WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); + WREG32(VM_CONTEXT0_CNTL, 0); + WREG32(VM_CONTEXT1_CNTL, 0); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | @@ -173,7 +348,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev) void evergreen_agp_enable(struct radeon_device *rdev) { u32 tmp; - int i; /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | @@ -193,8 +367,8 @@ void evergreen_agp_enable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); - for (i = 0; i < 7; i++) - WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); + WREG32(VM_CONTEXT0_CNTL, 0); + WREG32(VM_CONTEXT1_CNTL, 0); } static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) @@ -400,40 +574,656 @@ static void evergreen_mc_program(struct radeon_device *rdev) rv515_vga_render_disable(rdev); } -#if 0 /* * CP. */ -static void evergreen_cp_stop(struct radeon_device *rdev) -{ - /* XXX */ -} - static int evergreen_cp_load_microcode(struct radeon_device *rdev) { - /* XXX */ + const __be32 *fw_data; + int i; + + if (!rdev->me_fw || !rdev->pfp_fw) + return -EINVAL; + r700_cp_stop(rdev); + WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); + + fw_data = (const __be32 *)rdev->pfp_fw->data; + WREG32(CP_PFP_UCODE_ADDR, 0); + for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++) + WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_PFP_UCODE_ADDR, 0); + + fw_data = (const __be32 *)rdev->me_fw->data; + WREG32(CP_ME_RAM_WADDR, 0); + for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++) + WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); + + WREG32(CP_PFP_UCODE_ADDR, 0); + WREG32(CP_ME_RAM_WADDR, 0); + WREG32(CP_ME_RAM_RADDR, 0); return 0; } +int evergreen_cp_resume(struct radeon_device *rdev) +{ + u32 tmp; + u32 rb_bufsz; + int r; + + /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ + WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | + SOFT_RESET_PA | + SOFT_RESET_SH | + SOFT_RESET_VGT | + SOFT_RESET_SX)); + RREG32(GRBM_SOFT_RESET); + mdelay(15); + WREG32(GRBM_SOFT_RESET, 0); + RREG32(GRBM_SOFT_RESET); + + /* Set ring buffer size */ + rb_bufsz = drm_order(rdev->cp.ring_size / 8); + tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; +#ifdef __BIG_ENDIAN + tmp |= BUF_SWAP_32BIT; +#endif + WREG32(CP_RB_CNTL, tmp); + WREG32(CP_SEM_WAIT_TIMER, 0x4); + + /* Set the write pointer delay */ + WREG32(CP_RB_WPTR_DELAY, 0); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); + WREG32(CP_RB_RPTR_WR, 0); + WREG32(CP_RB_WPTR, 0); + WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); + WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); + mdelay(1); + WREG32(CP_RB_CNTL, tmp); + + WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8); + WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); + + rdev->cp.rptr = RREG32(CP_RB_RPTR); + rdev->cp.wptr = RREG32(CP_RB_WPTR); + + r600_cp_start(rdev); + rdev->cp.ready = true; + r = radeon_ring_test(rdev); + if (r) { + rdev->cp.ready = false; + return r; + } + return 0; +} /* * Core functions */ -static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes, +static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, + u32 num_tile_pipes, u32 num_backends, u32 backend_disable_mask) { u32 backend_map = 0; + u32 enabled_backends_mask = 0; + u32 enabled_backends_count = 0; + u32 cur_pipe; + u32 swizzle_pipe[EVERGREEN_MAX_PIPES]; + u32 cur_backend = 0; + u32 i; + bool force_no_swizzle; + + if (num_tile_pipes > EVERGREEN_MAX_PIPES) + num_tile_pipes = EVERGREEN_MAX_PIPES; + if (num_tile_pipes < 1) + num_tile_pipes = 1; + if (num_backends > EVERGREEN_MAX_BACKENDS) + num_backends = EVERGREEN_MAX_BACKENDS; + if (num_backends < 1) + num_backends = 1; + + for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { + if (((backend_disable_mask >> i) & 1) == 0) { + enabled_backends_mask |= (1 << i); + ++enabled_backends_count; + } + if (enabled_backends_count == num_backends) + break; + } + + if (enabled_backends_count == 0) { + enabled_backends_mask = 1; + enabled_backends_count = 1; + } + + if (enabled_backends_count != num_backends) + num_backends = enabled_backends_count; + + memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES); + switch (rdev->family) { + case CHIP_CEDAR: + case CHIP_REDWOOD: + force_no_swizzle = false; + break; + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + case CHIP_JUNIPER: + default: + force_no_swizzle = true; + break; + } + if (force_no_swizzle) { + bool last_backend_enabled = false; + + force_no_swizzle = false; + for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { + if (((enabled_backends_mask >> i) & 1) == 1) { + if (last_backend_enabled) + force_no_swizzle = true; + last_backend_enabled = true; + } else + last_backend_enabled = false; + } + } + + switch (num_tile_pipes) { + case 1: + case 3: + case 5: + case 7: + DRM_ERROR("odd number of pipes!\n"); + break; + case 2: + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + break; + case 4: + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 1; + swizzle_pipe[3] = 3; + } + break; + case 6: + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + swizzle_pipe[5] = 5; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 1; + swizzle_pipe[4] = 3; + swizzle_pipe[5] = 5; + } + break; + case 8: + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + swizzle_pipe[5] = 5; + swizzle_pipe[6] = 6; + swizzle_pipe[7] = 7; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 6; + swizzle_pipe[4] = 1; + swizzle_pipe[5] = 3; + swizzle_pipe[6] = 5; + swizzle_pipe[7] = 7; + } + break; + } + + for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { + while (((1 << cur_backend) & enabled_backends_mask) == 0) + cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; + + backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); + + cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; + } return backend_map; } -#endif static void evergreen_gpu_init(struct radeon_device *rdev) { - /* XXX */ + u32 cc_rb_backend_disable = 0; + u32 cc_gc_shader_pipe_config; + u32 gb_addr_config = 0; + u32 mc_shared_chmap, mc_arb_ramcfg; + u32 gb_backend_map; + u32 grbm_gfx_index; + u32 sx_debug_1; + u32 smx_dc_ctl0; + u32 sq_config; + u32 sq_lds_resource_mgmt; + u32 sq_gpr_resource_mgmt_1; + u32 sq_gpr_resource_mgmt_2; + u32 sq_gpr_resource_mgmt_3; + u32 sq_thread_resource_mgmt; + u32 sq_thread_resource_mgmt_2; + u32 sq_stack_resource_mgmt_1; + u32 sq_stack_resource_mgmt_2; + u32 sq_stack_resource_mgmt_3; + u32 vgt_cache_invalidation; + u32 hdp_host_path_cntl; + int i, j, num_shader_engines, ps_thread_count; + + switch (rdev->family) { + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + rdev->config.evergreen.num_ses = 2; + rdev->config.evergreen.max_pipes = 4; + rdev->config.evergreen.max_tile_pipes = 8; + rdev->config.evergreen.max_simds = 10; + rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 248; + rdev->config.evergreen.max_gs_threads = 32; + rdev->config.evergreen.max_stack_entries = 512; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 256; + rdev->config.evergreen.sx_max_export_pos_size = 64; + rdev->config.evergreen.sx_max_export_smx_size = 192; + rdev->config.evergreen.max_hw_contexts = 8; + rdev->config.evergreen.sq_num_cf_insts = 2; + + rdev->config.evergreen.sc_prim_fifo_size = 0x100; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_JUNIPER: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 4; + rdev->config.evergreen.max_tile_pipes = 4; + rdev->config.evergreen.max_simds = 10; + rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 248; + rdev->config.evergreen.max_gs_threads = 32; + rdev->config.evergreen.max_stack_entries = 512; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 256; + rdev->config.evergreen.sx_max_export_pos_size = 64; + rdev->config.evergreen.sx_max_export_smx_size = 192; + rdev->config.evergreen.max_hw_contexts = 8; + rdev->config.evergreen.sq_num_cf_insts = 2; + + rdev->config.evergreen.sc_prim_fifo_size = 0x100; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_REDWOOD: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 4; + rdev->config.evergreen.max_tile_pipes = 4; + rdev->config.evergreen.max_simds = 5; + rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 248; + rdev->config.evergreen.max_gs_threads = 32; + rdev->config.evergreen.max_stack_entries = 256; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 256; + rdev->config.evergreen.sx_max_export_pos_size = 64; + rdev->config.evergreen.sx_max_export_smx_size = 192; + rdev->config.evergreen.max_hw_contexts = 8; + rdev->config.evergreen.sq_num_cf_insts = 2; + + rdev->config.evergreen.sc_prim_fifo_size = 0x100; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_CEDAR: + default: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 2; + rdev->config.evergreen.max_tile_pipes = 2; + rdev->config.evergreen.max_simds = 2; + rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 192; + rdev->config.evergreen.max_gs_threads = 16; + rdev->config.evergreen.max_stack_entries = 256; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 128; + rdev->config.evergreen.sx_max_export_pos_size = 32; + rdev->config.evergreen.sx_max_export_smx_size = 96; + rdev->config.evergreen.max_hw_contexts = 4; + rdev->config.evergreen.sq_num_cf_insts = 1; + + rdev->config.evergreen.sc_prim_fifo_size = 0x40; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + } + + /* Initialize HDP */ + for (i = 0, j = 0; i < 32; i++, j += 0x18) { + WREG32((0x2c14 + j), 0x00000000); + WREG32((0x2c18 + j), 0x00000000); + WREG32((0x2c1c + j), 0x00000000); + WREG32((0x2c20 + j), 0x00000000); + WREG32((0x2c24 + j), 0x00000000); + } + + WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + + cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; + + cc_gc_shader_pipe_config |= + INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes) + & EVERGREEN_MAX_PIPES_MASK); + cc_gc_shader_pipe_config |= + INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds) + & EVERGREEN_MAX_SIMDS_MASK); + + cc_rb_backend_disable = + BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends) + & EVERGREEN_MAX_BACKENDS_MASK); + + + mc_shared_chmap = RREG32(MC_SHARED_CHMAP); + mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); + + switch (rdev->config.evergreen.max_tile_pipes) { + case 1: + default: + gb_addr_config |= NUM_PIPES(0); + break; + case 2: + gb_addr_config |= NUM_PIPES(1); + break; + case 4: + gb_addr_config |= NUM_PIPES(2); + break; + case 8: + gb_addr_config |= NUM_PIPES(3); + break; + } + + gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); + gb_addr_config |= BANK_INTERLEAVE_SIZE(0); + gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1); + gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1); + gb_addr_config |= NUM_GPUS(0); /* Hemlock? */ + gb_addr_config |= MULTI_GPU_TILE_SIZE(2); + + if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2) + gb_addr_config |= ROW_SIZE(2); + else + gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT); + + if (rdev->ddev->pdev->device == 0x689e) { + u32 efuse_straps_4; + u32 efuse_straps_3; + u8 efuse_box_bit_131_124; + + WREG32(RCU_IND_INDEX, 0x204); + efuse_straps_4 = RREG32(RCU_IND_DATA); + WREG32(RCU_IND_INDEX, 0x203); + efuse_straps_3 = RREG32(RCU_IND_DATA); + efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28)); + + switch(efuse_box_bit_131_124) { + case 0x00: + gb_backend_map = 0x76543210; + break; + case 0x55: + gb_backend_map = 0x77553311; + break; + case 0x56: + gb_backend_map = 0x77553300; + break; + case 0x59: + gb_backend_map = 0x77552211; + break; + case 0x66: + gb_backend_map = 0x77443300; + break; + case 0x99: + gb_backend_map = 0x66552211; + break; + case 0x5a: + gb_backend_map = 0x77552200; + break; + case 0xaa: + gb_backend_map = 0x66442200; + break; + case 0x95: + gb_backend_map = 0x66553311; + break; + default: + DRM_ERROR("bad backend map, using default\n"); + gb_backend_map = + evergreen_get_tile_pipe_to_backend_map(rdev, + rdev->config.evergreen.max_tile_pipes, + rdev->config.evergreen.max_backends, + ((EVERGREEN_MAX_BACKENDS_MASK << + rdev->config.evergreen.max_backends) & + EVERGREEN_MAX_BACKENDS_MASK)); + break; + } + } else if (rdev->ddev->pdev->device == 0x68b9) { + u32 efuse_straps_3; + u8 efuse_box_bit_127_124; + + WREG32(RCU_IND_INDEX, 0x203); + efuse_straps_3 = RREG32(RCU_IND_DATA); + efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28; + + switch(efuse_box_bit_127_124) { + case 0x0: + gb_backend_map = 0x00003210; + break; + case 0x5: + case 0x6: + case 0x9: + case 0xa: + gb_backend_map = 0x00003311; + break; + default: + DRM_ERROR("bad backend map, using default\n"); + gb_backend_map = + evergreen_get_tile_pipe_to_backend_map(rdev, + rdev->config.evergreen.max_tile_pipes, + rdev->config.evergreen.max_backends, + ((EVERGREEN_MAX_BACKENDS_MASK << + rdev->config.evergreen.max_backends) & + EVERGREEN_MAX_BACKENDS_MASK)); + break; + } + } else + gb_backend_map = + evergreen_get_tile_pipe_to_backend_map(rdev, + rdev->config.evergreen.max_tile_pipes, + rdev->config.evergreen.max_backends, + ((EVERGREEN_MAX_BACKENDS_MASK << + rdev->config.evergreen.max_backends) & + EVERGREEN_MAX_BACKENDS_MASK)); + + WREG32(GB_BACKEND_MAP, gb_backend_map); + WREG32(GB_ADDR_CONFIG, gb_addr_config); + WREG32(DMIF_ADDR_CONFIG, gb_addr_config); + WREG32(HDP_ADDR_CONFIG, gb_addr_config); + + num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; + grbm_gfx_index = INSTANCE_BROADCAST_WRITES; + + for (i = 0; i < rdev->config.evergreen.num_ses; i++) { + u32 rb = cc_rb_backend_disable | (0xf0 << 16); + u32 sp = cc_gc_shader_pipe_config; + u32 gfx = grbm_gfx_index | SE_INDEX(i); + + if (i == num_shader_engines) { + rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); + sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); + } + + WREG32(GRBM_GFX_INDEX, gfx); + WREG32(RLC_GFX_INDEX, gfx); + + WREG32(CC_RB_BACKEND_DISABLE, rb); + WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); + WREG32(GC_USER_RB_BACKEND_DISABLE, rb); + WREG32(CC_GC_SHADER_PIPE_CONFIG, sp); + } + + grbm_gfx_index |= SE_BROADCAST_WRITES; + WREG32(GRBM_GFX_INDEX, grbm_gfx_index); + WREG32(RLC_GFX_INDEX, grbm_gfx_index); + + WREG32(CGTS_SYS_TCC_DISABLE, 0); + WREG32(CGTS_TCC_DISABLE, 0); + WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); + WREG32(CGTS_USER_TCC_DISABLE, 0); + + /* set HW defaults for 3D engine */ + WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | + ROQ_IB2_START(0x2b))); + + WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); + + WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | + SYNC_GRADIENT | + SYNC_WALKER | + SYNC_ALIGNER)); + + sx_debug_1 = RREG32(SX_DEBUG_1); + sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; + WREG32(SX_DEBUG_1, sx_debug_1); + + + smx_dc_ctl0 = RREG32(SMX_DC_CTL0); + smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); + smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); + WREG32(SMX_DC_CTL0, smx_dc_ctl0); + + WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | + POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | + SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); + + WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | + SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | + SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); + + WREG32(VGT_NUM_INSTANCES, 1); + WREG32(SPI_CONFIG_CNTL, 0); + WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); + WREG32(CP_PERFMON_CNTL, 0); + + WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | + FETCH_FIFO_HIWATER(0x4) | + DONE_FIFO_HIWATER(0xe0) | + ALU_UPDATE_FIFO_HIWATER(0x8))); + + sq_config = RREG32(SQ_CONFIG); + sq_config &= ~(PS_PRIO(3) | + VS_PRIO(3) | + GS_PRIO(3) | + ES_PRIO(3)); + sq_config |= (VC_ENABLE | + EXPORT_SRC_C | + PS_PRIO(0) | + VS_PRIO(1) | + GS_PRIO(2) | + ES_PRIO(3)); + + if (rdev->family == CHIP_CEDAR) + /* no vertex cache */ + sq_config &= ~VC_ENABLE; + + sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); + + sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32); + sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32); + sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4); + sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); + sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); + sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); + sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); + + if (rdev->family == CHIP_CEDAR) + ps_thread_count = 96; + else + ps_thread_count = 128; + + sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); + sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; + sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; + sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; + sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; + sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; + + sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + + WREG32(SQ_CONFIG, sq_config); + WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); + WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); + WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3); + WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); + WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2); + WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); + WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); + WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3); + WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0); + WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt); + + WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | + FORCE_EOV_MAX_REZ_CNT(255))); + + if (rdev->family == CHIP_CEDAR) + vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); + else + vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); + vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); + WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); + + WREG32(VGT_GS_VERTEX_REUSE, 16); + WREG32(PA_SC_LINE_STIPPLE_STATE, 0); + + WREG32(CB_PERF_CTR0_SEL_0, 0); + WREG32(CB_PERF_CTR0_SEL_1, 0); + WREG32(CB_PERF_CTR1_SEL_0, 0); + WREG32(CB_PERF_CTR1_SEL_1, 0); + WREG32(CB_PERF_CTR2_SEL_0, 0); + WREG32(CB_PERF_CTR2_SEL_1, 0); + WREG32(CB_PERF_CTR3_SEL_0, 0); + WREG32(CB_PERF_CTR3_SEL_1, 0); + + hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); + WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); + + WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); + + udelay(50); + } int evergreen_mc_init(struct radeon_device *rdev) @@ -493,15 +1283,604 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev) return false; } +static int evergreen_gpu_soft_reset(struct radeon_device *rdev) +{ + struct evergreen_mc_save save; + u32 srbm_reset = 0; + u32 grbm_reset = 0; + + dev_info(rdev->dev, "GPU softreset \n"); + dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", + RREG32(GRBM_STATUS)); + dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", + RREG32(GRBM_STATUS_SE0)); + dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", + RREG32(GRBM_STATUS_SE1)); + dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", + RREG32(SRBM_STATUS)); + evergreen_mc_stop(rdev, &save); + if (evergreen_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + } + /* Disable CP parsing/prefetching */ + WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); + + /* reset all the gfx blocks */ + grbm_reset = (SOFT_RESET_CP | + SOFT_RESET_CB | + SOFT_RESET_DB | + SOFT_RESET_PA | + SOFT_RESET_SC | + SOFT_RESET_SPI | + SOFT_RESET_SH | + SOFT_RESET_SX | + SOFT_RESET_TC | + SOFT_RESET_TA | + SOFT_RESET_VC | + SOFT_RESET_VGT); + + dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); + WREG32(GRBM_SOFT_RESET, grbm_reset); + (void)RREG32(GRBM_SOFT_RESET); + udelay(50); + WREG32(GRBM_SOFT_RESET, 0); + (void)RREG32(GRBM_SOFT_RESET); + + /* reset all the system blocks */ + srbm_reset = SRBM_SOFT_RESET_ALL_MASK; + + dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset); + WREG32(SRBM_SOFT_RESET, srbm_reset); + (void)RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + (void)RREG32(SRBM_SOFT_RESET); + /* Wait a little for things to settle down */ + udelay(50); + dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", + RREG32(GRBM_STATUS)); + dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", + RREG32(GRBM_STATUS_SE0)); + dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", + RREG32(GRBM_STATUS_SE1)); + dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", + RREG32(SRBM_STATUS)); + /* After reset we need to reinit the asic as GPU often endup in an + * incoherent state. + */ + atom_asic_init(rdev->mode_info.atom_context); + evergreen_mc_resume(rdev, &save); + return 0; +} + int evergreen_asic_reset(struct radeon_device *rdev) { - /* FIXME: implement for evergreen */ + return evergreen_gpu_soft_reset(rdev); +} + +/* Interrupts */ + +u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) +{ + switch (crtc) { + case 0: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET); + case 1: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET); + case 2: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET); + case 3: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET); + case 4: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET); + case 5: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET); + default: + return 0; + } +} + +void evergreen_disable_interrupt_state(struct radeon_device *rdev) +{ + u32 tmp; + + WREG32(CP_INT_CNTL, 0); + WREG32(GRBM_INT_CNTL, 0); + WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + + WREG32(DACA_AUTODETECT_INT_CONTROL, 0); + WREG32(DACB_AUTODETECT_INT_CONTROL, 0); + + tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD1_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD2_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD3_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD4_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD5_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD6_INT_CONTROL, tmp); + +} + +int evergreen_irq_set(struct radeon_device *rdev) +{ + u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; + u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; + u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; + + if (!rdev->irq.installed) { + WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); + return -EINVAL; + } + /* don't enable anything if the ih is disabled */ + if (!rdev->ih.enabled) { + r600_disable_interrupts(rdev); + /* force the active interrupt state to all disabled */ + evergreen_disable_interrupt_state(rdev); + return 0; + } + + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + + if (rdev->irq.sw_int) { + DRM_DEBUG("evergreen_irq_set: sw int\n"); + cp_int_cntl |= RB_INT_ENABLE; + } + if (rdev->irq.crtc_vblank_int[0]) { + DRM_DEBUG("evergreen_irq_set: vblank 0\n"); + crtc1 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[1]) { + DRM_DEBUG("evergreen_irq_set: vblank 1\n"); + crtc2 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[2]) { + DRM_DEBUG("evergreen_irq_set: vblank 2\n"); + crtc3 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[3]) { + DRM_DEBUG("evergreen_irq_set: vblank 3\n"); + crtc4 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[4]) { + DRM_DEBUG("evergreen_irq_set: vblank 4\n"); + crtc5 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[5]) { + DRM_DEBUG("evergreen_irq_set: vblank 5\n"); + crtc6 |= VBLANK_INT_MASK; + } + if (rdev->irq.hpd[0]) { + DRM_DEBUG("evergreen_irq_set: hpd 1\n"); + hpd1 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[1]) { + DRM_DEBUG("evergreen_irq_set: hpd 2\n"); + hpd2 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[2]) { + DRM_DEBUG("evergreen_irq_set: hpd 3\n"); + hpd3 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[3]) { + DRM_DEBUG("evergreen_irq_set: hpd 4\n"); + hpd4 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[4]) { + DRM_DEBUG("evergreen_irq_set: hpd 5\n"); + hpd5 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[5]) { + DRM_DEBUG("evergreen_irq_set: hpd 6\n"); + hpd6 |= DC_HPDx_INT_EN; + } + + WREG32(CP_INT_CNTL, cp_int_cntl); + + WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); + WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); + WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); + WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); + WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); + WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); + + WREG32(DC_HPD1_INT_CONTROL, hpd1); + WREG32(DC_HPD2_INT_CONTROL, hpd2); + WREG32(DC_HPD3_INT_CONTROL, hpd3); + WREG32(DC_HPD4_INT_CONTROL, hpd4); + WREG32(DC_HPD5_INT_CONTROL, hpd5); + WREG32(DC_HPD6_INT_CONTROL, hpd6); + return 0; } +static inline void evergreen_irq_ack(struct radeon_device *rdev, + u32 *disp_int, + u32 *disp_int_cont, + u32 *disp_int_cont2, + u32 *disp_int_cont3, + u32 *disp_int_cont4, + u32 *disp_int_cont5) +{ + u32 tmp; + + *disp_int = RREG32(DISP_INTERRUPT_STATUS); + *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); + *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); + *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); + *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); + *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); + + if (*disp_int & LB_D1_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); + if (*disp_int & LB_D1_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); + + if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); + if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); + + if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); + if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); + + if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); + if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); + + if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); + if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); + + if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); + if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); + + if (*disp_int & DC_HPD1_INTERRUPT) { + tmp = RREG32(DC_HPD1_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD1_INT_CONTROL, tmp); + } + if (*disp_int_cont & DC_HPD2_INTERRUPT) { + tmp = RREG32(DC_HPD2_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD2_INT_CONTROL, tmp); + } + if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { + tmp = RREG32(DC_HPD3_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD3_INT_CONTROL, tmp); + } + if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { + tmp = RREG32(DC_HPD4_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD4_INT_CONTROL, tmp); + } + if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +} + +void evergreen_irq_disable(struct radeon_device *rdev) +{ + u32 disp_int, disp_int_cont, disp_int_cont2; + u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; + + r600_disable_interrupts(rdev); + /* Wait and acknowledge irq */ + mdelay(1); + evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, + &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); + evergreen_disable_interrupt_state(rdev); +} + +static void evergreen_irq_suspend(struct radeon_device *rdev) +{ + evergreen_irq_disable(rdev); + r600_rlc_stop(rdev); +} + +static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) +{ + u32 wptr, tmp; + + /* XXX use writeback */ + wptr = RREG32(IH_RB_WPTR); + + if (wptr & RB_OVERFLOW) { + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", + wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); + rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; + tmp = RREG32(IH_RB_CNTL); + tmp |= IH_WPTR_OVERFLOW_CLEAR; + WREG32(IH_RB_CNTL, tmp); + } + return (wptr & rdev->ih.ptr_mask); +} + +int evergreen_irq_process(struct radeon_device *rdev) +{ + u32 wptr = evergreen_get_ih_wptr(rdev); + u32 rptr = rdev->ih.rptr; + u32 src_id, src_data; + u32 ring_index; + u32 disp_int, disp_int_cont, disp_int_cont2; + u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; + unsigned long flags; + bool queue_hotplug = false; + + DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); + if (!rdev->ih.enabled) + return IRQ_NONE; + + spin_lock_irqsave(&rdev->ih.lock, flags); + + if (rptr == wptr) { + spin_unlock_irqrestore(&rdev->ih.lock, flags); + return IRQ_NONE; + } + if (rdev->shutdown) { + spin_unlock_irqrestore(&rdev->ih.lock, flags); + return IRQ_NONE; + } + +restart_ih: + /* display interrupts */ + evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, + &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); + + rdev->ih.wptr = wptr; + while (rptr != wptr) { + /* wptr/rptr are in bytes! */ + ring_index = rptr / 4; + src_id = rdev->ih.ring[ring_index] & 0xff; + src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; + + switch (src_id) { + case 1: /* D1 vblank/vline */ + switch (src_data) { + case 0: /* D1 vblank */ + if (disp_int & LB_D1_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 0); + wake_up(&rdev->irq.vblank_queue); + disp_int &= ~LB_D1_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D1 vblank\n"); + } + break; + case 1: /* D1 vline */ + if (disp_int & LB_D1_VLINE_INTERRUPT) { + disp_int &= ~LB_D1_VLINE_INTERRUPT; + DRM_DEBUG("IH: D1 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 2: /* D2 vblank/vline */ + switch (src_data) { + case 0: /* D2 vblank */ + if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 1); + wake_up(&rdev->irq.vblank_queue); + disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D2 vblank\n"); + } + break; + case 1: /* D2 vline */ + if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { + disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; + DRM_DEBUG("IH: D2 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 3: /* D3 vblank/vline */ + switch (src_data) { + case 0: /* D3 vblank */ + if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 2); + wake_up(&rdev->irq.vblank_queue); + disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D3 vblank\n"); + } + break; + case 1: /* D3 vline */ + if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { + disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; + DRM_DEBUG("IH: D3 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 4: /* D4 vblank/vline */ + switch (src_data) { + case 0: /* D4 vblank */ + if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 3); + wake_up(&rdev->irq.vblank_queue); + disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D4 vblank\n"); + } + break; + case 1: /* D4 vline */ + if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { + disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; + DRM_DEBUG("IH: D4 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 5: /* D5 vblank/vline */ + switch (src_data) { + case 0: /* D5 vblank */ + if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 4); + wake_up(&rdev->irq.vblank_queue); + disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D5 vblank\n"); + } + break; + case 1: /* D5 vline */ + if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { + disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; + DRM_DEBUG("IH: D5 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 6: /* D6 vblank/vline */ + switch (src_data) { + case 0: /* D6 vblank */ + if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 5); + wake_up(&rdev->irq.vblank_queue); + disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D6 vblank\n"); + } + break; + case 1: /* D6 vline */ + if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { + disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; + DRM_DEBUG("IH: D6 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 42: /* HPD hotplug */ + switch (src_data) { + case 0: + if (disp_int & DC_HPD1_INTERRUPT) { + disp_int &= ~DC_HPD1_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD1\n"); + } + break; + case 1: + if (disp_int_cont & DC_HPD2_INTERRUPT) { + disp_int_cont &= ~DC_HPD2_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD2\n"); + } + break; + case 2: + if (disp_int_cont2 & DC_HPD3_INTERRUPT) { + disp_int_cont2 &= ~DC_HPD3_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD3\n"); + } + break; + case 3: + if (disp_int_cont3 & DC_HPD4_INTERRUPT) { + disp_int_cont3 &= ~DC_HPD4_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD4\n"); + } + break; + case 4: + if (disp_int_cont4 & DC_HPD5_INTERRUPT) { + disp_int_cont4 &= ~DC_HPD5_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD5\n"); + } + break; + case 5: + if (disp_int_cont5 & DC_HPD6_INTERRUPT) { + disp_int_cont5 &= ~DC_HPD6_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD6\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 176: /* CP_INT in ring buffer */ + case 177: /* CP_INT in IB1 */ + case 178: /* CP_INT in IB2 */ + DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); + radeon_fence_process(rdev); + break; + case 181: /* CP EOP event */ + DRM_DEBUG("IH: CP EOP\n"); + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + + /* wptr/rptr are in bytes! */ + rptr += 16; + rptr &= rdev->ih.ptr_mask; + } + /* make sure wptr hasn't changed while processing */ + wptr = evergreen_get_ih_wptr(rdev); + if (wptr != rdev->ih.wptr) + goto restart_ih; + if (queue_hotplug) + queue_work(rdev->wq, &rdev->hotplug_work); + rdev->ih.rptr = rptr; + WREG32(IH_RB_RPTR, rdev->ih.rptr); + spin_unlock_irqrestore(&rdev->ih.lock, flags); + return IRQ_HANDLED; +} + static int evergreen_startup(struct radeon_device *rdev) { -#if 0 int r; if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { @@ -511,17 +1890,15 @@ static int evergreen_startup(struct radeon_device *rdev) return r; } } -#endif + evergreen_mc_program(rdev); -#if 0 if (rdev->flags & RADEON_IS_AGP) { - evergreem_agp_enable(rdev); + evergreen_agp_enable(rdev); } else { r = evergreen_pcie_gart_enable(rdev); if (r) return r; } -#endif evergreen_gpu_init(rdev); #if 0 if (!rdev->r600_blit.shader_obj) { @@ -542,6 +1919,7 @@ static int evergreen_startup(struct radeon_device *rdev) DRM_ERROR("failed to pin blit object %d\n", r); return r; } +#endif /* Enable IRQ */ r = r600_irq_init(rdev); @@ -550,7 +1928,7 @@ static int evergreen_startup(struct radeon_device *rdev) radeon_irq_kms_fini(rdev); return r; } - r600_irq_set(rdev); + evergreen_irq_set(rdev); r = radeon_ring_init(rdev, rdev->cp.ring_size); if (r) @@ -558,12 +1936,12 @@ static int evergreen_startup(struct radeon_device *rdev) r = evergreen_cp_load_microcode(rdev); if (r) return r; - r = r600_cp_resume(rdev); + r = evergreen_cp_resume(rdev); if (r) return r; /* write back buffer are not vital so don't worry about failure */ r600_wb_enable(rdev); -#endif + return 0; } @@ -588,13 +1966,13 @@ int evergreen_resume(struct radeon_device *rdev) DRM_ERROR("r600 startup failed on resume\n"); return r; } -#if 0 + r = r600_ib_test(rdev); if (r) { DRM_ERROR("radeon: failled testing IB (%d).\n", r); return r; } -#endif + return r; } @@ -603,12 +1981,14 @@ int evergreen_suspend(struct radeon_device *rdev) { #if 0 int r; - +#endif /* FIXME: we should wait for ring to be empty */ r700_cp_stop(rdev); rdev->cp.ready = false; + evergreen_irq_suspend(rdev); r600_wb_disable(rdev); evergreen_pcie_gart_disable(rdev); +#if 0 /* unpin shaders bo */ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (likely(r == 0)) { @@ -708,7 +2088,7 @@ int evergreen_init(struct radeon_device *rdev) r = radeon_bo_init(rdev); if (r) return r; -#if 0 + r = radeon_irq_kms_init(rdev); if (r) return r; @@ -722,14 +2102,16 @@ int evergreen_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) return r; -#endif + rdev->accel_working = false; r = evergreen_startup(rdev); if (r) { - evergreen_suspend(rdev); - /*r600_wb_fini(rdev);*/ - /*radeon_ring_fini(rdev);*/ - /*evergreen_pcie_gart_fini(rdev);*/ + dev_err(rdev->dev, "disabling GPU acceleration\n"); + r700_cp_fini(rdev); + r600_wb_fini(rdev); + r600_irq_fini(rdev); + radeon_irq_kms_fini(rdev); + evergreen_pcie_gart_fini(rdev); rdev->accel_working = false; } if (rdev->accel_working) { @@ -750,15 +2132,12 @@ int evergreen_init(struct radeon_device *rdev) void evergreen_fini(struct radeon_device *rdev) { radeon_pm_fini(rdev); - evergreen_suspend(rdev); -#if 0 - r600_blit_fini(rdev); + /*r600_blit_fini(rdev);*/ + r700_cp_fini(rdev); + r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); - radeon_ring_fini(rdev); - r600_wb_fini(rdev); evergreen_pcie_gart_fini(rdev); -#endif radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h new file mode 100644 index 000000000000..93e9e17ad54a --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -0,0 +1,556 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#ifndef EVERGREEND_H +#define EVERGREEND_H + +#define EVERGREEN_MAX_SH_GPRS 256 +#define EVERGREEN_MAX_TEMP_GPRS 16 +#define EVERGREEN_MAX_SH_THREADS 256 +#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096 +#define EVERGREEN_MAX_FRC_EOV_CNT 16384 +#define EVERGREEN_MAX_BACKENDS 8 +#define EVERGREEN_MAX_BACKENDS_MASK 0xFF +#define EVERGREEN_MAX_SIMDS 16 +#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF +#define EVERGREEN_MAX_PIPES 8 +#define EVERGREEN_MAX_PIPES_MASK 0xFF +#define EVERGREEN_MAX_LDS_NUM 0xFFFF + +/* Registers */ + +#define RCU_IND_INDEX 0x100 +#define RCU_IND_DATA 0x104 + +#define GRBM_GFX_INDEX 0x802C +#define INSTANCE_INDEX(x) ((x) << 0) +#define SE_INDEX(x) ((x) << 16) +#define INSTANCE_BROADCAST_WRITES (1 << 30) +#define SE_BROADCAST_WRITES (1 << 31) +#define RLC_GFX_INDEX 0x3fC4 +#define CC_GC_SHADER_PIPE_CONFIG 0x8950 +#define WRITE_DIS (1 << 0) +#define CC_RB_BACKEND_DISABLE 0x98F4 +#define BACKEND_DISABLE(x) ((x) << 16) +#define GB_ADDR_CONFIG 0x98F8 +#define NUM_PIPES(x) ((x) << 0) +#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) +#define BANK_INTERLEAVE_SIZE(x) ((x) << 8) +#define NUM_SHADER_ENGINES(x) ((x) << 12) +#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16) +#define NUM_GPUS(x) ((x) << 20) +#define MULTI_GPU_TILE_SIZE(x) ((x) << 24) +#define ROW_SIZE(x) ((x) << 28) +#define GB_BACKEND_MAP 0x98FC +#define DMIF_ADDR_CONFIG 0xBD4 +#define HDP_ADDR_CONFIG 0x2F48 + +#define CC_SYS_RB_BACKEND_DISABLE 0x3F88 +#define GC_USER_RB_BACKEND_DISABLE 0x9B7C + +#define CGTS_SYS_TCC_DISABLE 0x3F90 +#define CGTS_TCC_DISABLE 0x9148 +#define CGTS_USER_SYS_TCC_DISABLE 0x3F94 +#define CGTS_USER_TCC_DISABLE 0x914C + +#define CONFIG_MEMSIZE 0x5428 + +#define CP_ME_CNTL 0x86D8 +#define CP_ME_HALT (1 << 28) +#define CP_PFP_HALT (1 << 26) +#define CP_ME_RAM_DATA 0xC160 +#define CP_ME_RAM_RADDR 0xC158 +#define CP_ME_RAM_WADDR 0xC15C +#define CP_MEQ_THRESHOLDS 0x8764 +#define STQ_SPLIT(x) ((x) << 0) +#define CP_PERFMON_CNTL 0x87FC +#define CP_PFP_UCODE_ADDR 0xC150 +#define CP_PFP_UCODE_DATA 0xC154 +#define CP_QUEUE_THRESHOLDS 0x8760 +#define ROQ_IB1_START(x) ((x) << 0) +#define ROQ_IB2_START(x) ((x) << 8) +#define CP_RB_BASE 0xC100 +#define CP_RB_CNTL 0xC104 +#define RB_BUFSZ(x) ((x) << 0) +#define RB_BLKSZ(x) ((x) << 8) +#define RB_NO_UPDATE (1 << 27) +#define RB_RPTR_WR_ENA (1 << 31) +#define BUF_SWAP_32BIT (2 << 16) +#define CP_RB_RPTR 0x8700 +#define CP_RB_RPTR_ADDR 0xC10C +#define CP_RB_RPTR_ADDR_HI 0xC110 +#define CP_RB_RPTR_WR 0xC108 +#define CP_RB_WPTR 0xC114 +#define CP_RB_WPTR_ADDR 0xC118 +#define CP_RB_WPTR_ADDR_HI 0xC11C +#define CP_RB_WPTR_DELAY 0x8704 +#define CP_SEM_WAIT_TIMER 0x85BC +#define CP_DEBUG 0xC1FC + + +#define GC_USER_SHADER_PIPE_CONFIG 0x8954 +#define INACTIVE_QD_PIPES(x) ((x) << 8) +#define INACTIVE_QD_PIPES_MASK 0x0000FF00 +#define INACTIVE_SIMDS(x) ((x) << 16) +#define INACTIVE_SIMDS_MASK 0x00FF0000 + +#define GRBM_CNTL 0x8000 +#define GRBM_READ_TIMEOUT(x) ((x) << 0) +#define GRBM_SOFT_RESET 0x8020 +#define SOFT_RESET_CP (1 << 0) +#define SOFT_RESET_CB (1 << 1) +#define SOFT_RESET_DB (1 << 3) +#define SOFT_RESET_PA (1 << 5) +#define SOFT_RESET_SC (1 << 6) +#define SOFT_RESET_SPI (1 << 8) +#define SOFT_RESET_SH (1 << 9) +#define SOFT_RESET_SX (1 << 10) +#define SOFT_RESET_TC (1 << 11) +#define SOFT_RESET_TA (1 << 12) +#define SOFT_RESET_VC (1 << 13) +#define SOFT_RESET_VGT (1 << 14) + +#define GRBM_STATUS 0x8010 +#define CMDFIFO_AVAIL_MASK 0x0000000F +#define SRBM_RQ_PENDING (1 << 5) +#define CF_RQ_PENDING (1 << 7) +#define PF_RQ_PENDING (1 << 8) +#define GRBM_EE_BUSY (1 << 10) +#define SX_CLEAN (1 << 11) +#define DB_CLEAN (1 << 12) +#define CB_CLEAN (1 << 13) +#define TA_BUSY (1 << 14) +#define VGT_BUSY_NO_DMA (1 << 16) +#define VGT_BUSY (1 << 17) +#define SX_BUSY (1 << 20) +#define SH_BUSY (1 << 21) +#define SPI_BUSY (1 << 22) +#define SC_BUSY (1 << 24) +#define PA_BUSY (1 << 25) +#define DB_BUSY (1 << 26) +#define CP_COHERENCY_BUSY (1 << 28) +#define CP_BUSY (1 << 29) +#define CB_BUSY (1 << 30) +#define GUI_ACTIVE (1 << 31) +#define GRBM_STATUS_SE0 0x8014 +#define GRBM_STATUS_SE1 0x8018 +#define SE_SX_CLEAN (1 << 0) +#define SE_DB_CLEAN (1 << 1) +#define SE_CB_CLEAN (1 << 2) +#define SE_TA_BUSY (1 << 25) +#define SE_SX_BUSY (1 << 26) +#define SE_SPI_BUSY (1 << 27) +#define SE_SH_BUSY (1 << 28) +#define SE_SC_BUSY (1 << 29) +#define SE_DB_BUSY (1 << 30) +#define SE_CB_BUSY (1 << 31) + +#define HDP_HOST_PATH_CNTL 0x2C00 +#define HDP_NONSURFACE_BASE 0x2C04 +#define HDP_NONSURFACE_INFO 0x2C08 +#define HDP_NONSURFACE_SIZE 0x2C0C +#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 +#define HDP_TILING_CONFIG 0x2F3C + +#define MC_SHARED_CHMAP 0x2004 +#define NOOFCHAN_SHIFT 12 +#define NOOFCHAN_MASK 0x00003000 + +#define MC_ARB_RAMCFG 0x2760 +#define NOOFBANK_SHIFT 0 +#define NOOFBANK_MASK 0x00000003 +#define NOOFRANK_SHIFT 2 +#define NOOFRANK_MASK 0x00000004 +#define NOOFROWS_SHIFT 3 +#define NOOFROWS_MASK 0x00000038 +#define NOOFCOLS_SHIFT 6 +#define NOOFCOLS_MASK 0x000000C0 +#define CHANSIZE_SHIFT 8 +#define CHANSIZE_MASK 0x00000100 +#define BURSTLENGTH_SHIFT 9 +#define BURSTLENGTH_MASK 0x00000200 +#define CHANSIZE_OVERRIDE (1 << 11) +#define MC_VM_AGP_TOP 0x2028 +#define MC_VM_AGP_BOT 0x202C +#define MC_VM_AGP_BASE 0x2030 +#define MC_VM_FB_LOCATION 0x2024 +#define MC_VM_MB_L1_TLB0_CNTL 0x2234 +#define MC_VM_MB_L1_TLB1_CNTL 0x2238 +#define MC_VM_MB_L1_TLB2_CNTL 0x223C +#define MC_VM_MB_L1_TLB3_CNTL 0x2240 +#define ENABLE_L1_TLB (1 << 0) +#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) +#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3) +#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3) +#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3) +#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3) +#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5) +#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15) +#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18) +#define MC_VM_MD_L1_TLB0_CNTL 0x2654 +#define MC_VM_MD_L1_TLB1_CNTL 0x2658 +#define MC_VM_MD_L1_TLB2_CNTL 0x265C +#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C +#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 +#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 + +#define PA_CL_ENHANCE 0x8A14 +#define CLIP_VTX_REORDER_ENA (1 << 0) +#define NUM_CLIP_SEQ(x) ((x) << 1) +#define PA_SC_AA_CONFIG 0x28C04 +#define PA_SC_CLIPRECT_RULE 0x2820C +#define PA_SC_EDGERULE 0x28230 +#define PA_SC_FIFO_SIZE 0x8BCC +#define SC_PRIM_FIFO_SIZE(x) ((x) << 0) +#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12) +#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20) +#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24 +#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) +#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) +#define PA_SC_LINE_STIPPLE 0x28A0C +#define PA_SC_LINE_STIPPLE_STATE 0x8B10 + +#define SCRATCH_REG0 0x8500 +#define SCRATCH_REG1 0x8504 +#define SCRATCH_REG2 0x8508 +#define SCRATCH_REG3 0x850C +#define SCRATCH_REG4 0x8510 +#define SCRATCH_REG5 0x8514 +#define SCRATCH_REG6 0x8518 +#define SCRATCH_REG7 0x851C +#define SCRATCH_UMSK 0x8540 +#define SCRATCH_ADDR 0x8544 + +#define SMX_DC_CTL0 0xA020 +#define USE_HASH_FUNCTION (1 << 0) +#define NUMBER_OF_SETS(x) ((x) << 1) +#define FLUSH_ALL_ON_EVENT (1 << 10) +#define STALL_ON_EVENT (1 << 11) +#define SMX_EVENT_CTL 0xA02C +#define ES_FLUSH_CTL(x) ((x) << 0) +#define GS_FLUSH_CTL(x) ((x) << 3) +#define ACK_FLUSH_CTL(x) ((x) << 6) +#define SYNC_FLUSH_CTL (1 << 8) + +#define SPI_CONFIG_CNTL 0x9100 +#define GPR_WRITE_PRIORITY(x) ((x) << 0) +#define SPI_CONFIG_CNTL_1 0x913C +#define VTX_DONE_DELAY(x) ((x) << 0) +#define INTERP_ONE_PRIM_PER_ROW (1 << 4) +#define SPI_INPUT_Z 0x286D8 +#define SPI_PS_IN_CONTROL_0 0x286CC +#define NUM_INTERP(x) ((x)<<0) +#define POSITION_ENA (1<<8) +#define POSITION_CENTROID (1<<9) +#define POSITION_ADDR(x) ((x)<<10) +#define PARAM_GEN(x) ((x)<<15) +#define PARAM_GEN_ADDR(x) ((x)<<19) +#define BARYC_SAMPLE_CNTL(x) ((x)<<26) +#define PERSP_GRADIENT_ENA (1<<28) +#define LINEAR_GRADIENT_ENA (1<<29) +#define POSITION_SAMPLE (1<<30) +#define BARYC_AT_SAMPLE_ENA (1<<31) + +#define SQ_CONFIG 0x8C00 +#define VC_ENABLE (1 << 0) +#define EXPORT_SRC_C (1 << 1) +#define CS_PRIO(x) ((x) << 18) +#define LS_PRIO(x) ((x) << 20) +#define HS_PRIO(x) ((x) << 22) +#define PS_PRIO(x) ((x) << 24) +#define VS_PRIO(x) ((x) << 26) +#define GS_PRIO(x) ((x) << 28) +#define ES_PRIO(x) ((x) << 30) +#define SQ_GPR_RESOURCE_MGMT_1 0x8C04 +#define NUM_PS_GPRS(x) ((x) << 0) +#define NUM_VS_GPRS(x) ((x) << 16) +#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28) +#define SQ_GPR_RESOURCE_MGMT_2 0x8C08 +#define NUM_GS_GPRS(x) ((x) << 0) +#define NUM_ES_GPRS(x) ((x) << 16) +#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C +#define NUM_HS_GPRS(x) ((x) << 0) +#define NUM_LS_GPRS(x) ((x) << 16) +#define SQ_THREAD_RESOURCE_MGMT 0x8C18 +#define NUM_PS_THREADS(x) ((x) << 0) +#define NUM_VS_THREADS(x) ((x) << 8) +#define NUM_GS_THREADS(x) ((x) << 16) +#define NUM_ES_THREADS(x) ((x) << 24) +#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C +#define NUM_HS_THREADS(x) ((x) << 0) +#define NUM_LS_THREADS(x) ((x) << 8) +#define SQ_STACK_RESOURCE_MGMT_1 0x8C20 +#define NUM_PS_STACK_ENTRIES(x) ((x) << 0) +#define NUM_VS_STACK_ENTRIES(x) ((x) << 16) +#define SQ_STACK_RESOURCE_MGMT_2 0x8C24 +#define NUM_GS_STACK_ENTRIES(x) ((x) << 0) +#define NUM_ES_STACK_ENTRIES(x) ((x) << 16) +#define SQ_STACK_RESOURCE_MGMT_3 0x8C28 +#define NUM_HS_STACK_ENTRIES(x) ((x) << 0) +#define NUM_LS_STACK_ENTRIES(x) ((x) << 16) +#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C +#define SQ_LDS_RESOURCE_MGMT 0x8E2C + +#define SQ_MS_FIFO_SIZES 0x8CF0 +#define CACHE_FIFO_SIZE(x) ((x) << 0) +#define FETCH_FIFO_HIWATER(x) ((x) << 8) +#define DONE_FIFO_HIWATER(x) ((x) << 16) +#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24) + +#define SX_DEBUG_1 0x9058 +#define ENABLE_NEW_SMX_ADDRESS (1 << 16) +#define SX_EXPORT_BUFFER_SIZES 0x900C +#define COLOR_BUFFER_SIZE(x) ((x) << 0) +#define POSITION_BUFFER_SIZE(x) ((x) << 8) +#define SMX_BUFFER_SIZE(x) ((x) << 16) +#define SX_MISC 0x28350 + +#define CB_PERF_CTR0_SEL_0 0x9A20 +#define CB_PERF_CTR0_SEL_1 0x9A24 +#define CB_PERF_CTR1_SEL_0 0x9A28 +#define CB_PERF_CTR1_SEL_1 0x9A2C +#define CB_PERF_CTR2_SEL_0 0x9A30 +#define CB_PERF_CTR2_SEL_1 0x9A34 +#define CB_PERF_CTR3_SEL_0 0x9A38 +#define CB_PERF_CTR3_SEL_1 0x9A3C + +#define TA_CNTL_AUX 0x9508 +#define DISABLE_CUBE_WRAP (1 << 0) +#define DISABLE_CUBE_ANISO (1 << 1) +#define SYNC_GRADIENT (1 << 24) +#define SYNC_WALKER (1 << 25) +#define SYNC_ALIGNER (1 << 26) + +#define VGT_CACHE_INVALIDATION 0x88C4 +#define CACHE_INVALIDATION(x) ((x) << 0) +#define VC_ONLY 0 +#define TC_ONLY 1 +#define VC_AND_TC 2 +#define AUTO_INVLD_EN(x) ((x) << 6) +#define NO_AUTO 0 +#define ES_AUTO 1 +#define GS_AUTO 2 +#define ES_AND_GS_AUTO 3 +#define VGT_GS_VERTEX_REUSE 0x88D4 +#define VGT_NUM_INSTANCES 0x8974 +#define VGT_OUT_DEALLOC_CNTL 0x28C5C +#define DEALLOC_DIST_MASK 0x0000007F +#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 +#define VTX_REUSE_DEPTH_MASK 0x000000FF + +#define VM_CONTEXT0_CNTL 0x1410 +#define ENABLE_CONTEXT (1 << 0) +#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) +#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) +#define VM_CONTEXT1_CNTL 0x1414 +#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C +#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C +#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C +#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 +#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 +#define REQUEST_TYPE(x) (((x) & 0xf) << 0) +#define RESPONSE_TYPE_MASK 0x000000F0 +#define RESPONSE_TYPE_SHIFT 4 +#define VM_L2_CNTL 0x1400 +#define ENABLE_L2_CACHE (1 << 0) +#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) +#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9) +#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14) +#define VM_L2_CNTL2 0x1404 +#define INVALIDATE_ALL_L1_TLBS (1 << 0) +#define INVALIDATE_L2_CACHE (1 << 1) +#define VM_L2_CNTL3 0x1408 +#define BANK_SELECT(x) ((x) << 0) +#define CACHE_UPDATE_MODE(x) ((x) << 6) +#define VM_L2_STATUS 0x140C +#define L2_BUSY (1 << 0) + +#define WAIT_UNTIL 0x8040 + +#define SRBM_STATUS 0x0E50 +#define SRBM_SOFT_RESET 0x0E60 +#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6 +#define SOFT_RESET_BIF (1 << 1) +#define SOFT_RESET_CG (1 << 2) +#define SOFT_RESET_DC (1 << 5) +#define SOFT_RESET_GRBM (1 << 8) +#define SOFT_RESET_HDP (1 << 9) +#define SOFT_RESET_IH (1 << 10) +#define SOFT_RESET_MC (1 << 11) +#define SOFT_RESET_RLC (1 << 13) +#define SOFT_RESET_ROM (1 << 14) +#define SOFT_RESET_SEM (1 << 15) +#define SOFT_RESET_VMC (1 << 17) +#define SOFT_RESET_TST (1 << 21) +#define SOFT_RESET_REGBB (1 << 22) +#define SOFT_RESET_ORB (1 << 23) + +#define IH_RB_CNTL 0x3e00 +# define IH_RB_ENABLE (1 << 0) +# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ +# define IH_RB_FULL_DRAIN_ENABLE (1 << 6) +# define IH_WPTR_WRITEBACK_ENABLE (1 << 8) +# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ +# define IH_WPTR_OVERFLOW_ENABLE (1 << 16) +# define IH_WPTR_OVERFLOW_CLEAR (1 << 31) +#define IH_RB_BASE 0x3e04 +#define IH_RB_RPTR 0x3e08 +#define IH_RB_WPTR 0x3e0c +# define RB_OVERFLOW (1 << 0) +# define WPTR_OFFSET_MASK 0x3fffc +#define IH_RB_WPTR_ADDR_HI 0x3e10 +#define IH_RB_WPTR_ADDR_LO 0x3e14 +#define IH_CNTL 0x3e18 +# define ENABLE_INTR (1 << 0) +# define IH_MC_SWAP(x) ((x) << 2) +# define IH_MC_SWAP_NONE 0 +# define IH_MC_SWAP_16BIT 1 +# define IH_MC_SWAP_32BIT 2 +# define IH_MC_SWAP_64BIT 3 +# define RPTR_REARM (1 << 4) +# define MC_WRREQ_CREDIT(x) ((x) << 15) +# define MC_WR_CLEAN_CNT(x) ((x) << 20) + +#define CP_INT_CNTL 0xc124 +# define CNTX_BUSY_INT_ENABLE (1 << 19) +# define CNTX_EMPTY_INT_ENABLE (1 << 20) +# define SCRATCH_INT_ENABLE (1 << 25) +# define TIME_STAMP_INT_ENABLE (1 << 26) +# define IB2_INT_ENABLE (1 << 29) +# define IB1_INT_ENABLE (1 << 30) +# define RB_INT_ENABLE (1 << 31) +#define CP_INT_STATUS 0xc128 +# define SCRATCH_INT_STAT (1 << 25) +# define TIME_STAMP_INT_STAT (1 << 26) +# define IB2_INT_STAT (1 << 29) +# define IB1_INT_STAT (1 << 30) +# define RB_INT_STAT (1 << 31) + +#define GRBM_INT_CNTL 0x8060 +# define RDERR_INT_ENABLE (1 << 0) +# define GUI_IDLE_INT_ENABLE (1 << 19) + +/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ +#define CRTC_STATUS_FRAME_COUNT 0x6e98 + +/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */ +#define VLINE_STATUS 0x6bb8 +# define VLINE_OCCURRED (1 << 0) +# define VLINE_ACK (1 << 4) +# define VLINE_STAT (1 << 12) +# define VLINE_INTERRUPT (1 << 16) +# define VLINE_INTERRUPT_TYPE (1 << 17) +/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */ +#define VBLANK_STATUS 0x6bbc +# define VBLANK_OCCURRED (1 << 0) +# define VBLANK_ACK (1 << 4) +# define VBLANK_STAT (1 << 12) +# define VBLANK_INTERRUPT (1 << 16) +# define VBLANK_INTERRUPT_TYPE (1 << 17) + +/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */ +#define INT_MASK 0x6b40 +# define VBLANK_INT_MASK (1 << 0) +# define VLINE_INT_MASK (1 << 4) + +#define DISP_INTERRUPT_STATUS 0x60f4 +# define LB_D1_VLINE_INTERRUPT (1 << 2) +# define LB_D1_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD1_INTERRUPT (1 << 17) +# define DC_HPD1_RX_INTERRUPT (1 << 18) +# define DACA_AUTODETECT_INTERRUPT (1 << 22) +# define DACB_AUTODETECT_INTERRUPT (1 << 23) +# define DC_I2C_SW_DONE_INTERRUPT (1 << 24) +# define DC_I2C_HW_DONE_INTERRUPT (1 << 25) +#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8 +# define LB_D2_VLINE_INTERRUPT (1 << 2) +# define LB_D2_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD2_INTERRUPT (1 << 17) +# define DC_HPD2_RX_INTERRUPT (1 << 18) +# define DISP_TIMER_INTERRUPT (1 << 24) +#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc +# define LB_D3_VLINE_INTERRUPT (1 << 2) +# define LB_D3_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD3_INTERRUPT (1 << 17) +# define DC_HPD3_RX_INTERRUPT (1 << 18) +#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100 +# define LB_D4_VLINE_INTERRUPT (1 << 2) +# define LB_D4_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD4_INTERRUPT (1 << 17) +# define DC_HPD4_RX_INTERRUPT (1 << 18) +#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c +# define LB_D5_VLINE_INTERRUPT (1 << 2) +# define LB_D5_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD5_INTERRUPT (1 << 17) +# define DC_HPD5_RX_INTERRUPT (1 << 18) +#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050 +# define LB_D6_VLINE_INTERRUPT (1 << 2) +# define LB_D6_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD6_INTERRUPT (1 << 17) +# define DC_HPD6_RX_INTERRUPT (1 << 18) + +/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ +#define GRPH_INT_STATUS 0x6858 +# define GRPH_PFLIP_INT_OCCURRED (1 << 0) +# define GRPH_PFLIP_INT_CLEAR (1 << 8) +/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ +#define GRPH_INT_CONTROL 0x685c +# define GRPH_PFLIP_INT_MASK (1 << 0) +# define GRPH_PFLIP_INT_TYPE (1 << 8) + +#define DACA_AUTODETECT_INT_CONTROL 0x66c8 +#define DACB_AUTODETECT_INT_CONTROL 0x67c8 + +#define DC_HPD1_INT_STATUS 0x601c +#define DC_HPD2_INT_STATUS 0x6028 +#define DC_HPD3_INT_STATUS 0x6034 +#define DC_HPD4_INT_STATUS 0x6040 +#define DC_HPD5_INT_STATUS 0x604c +#define DC_HPD6_INT_STATUS 0x6058 +# define DC_HPDx_INT_STATUS (1 << 0) +# define DC_HPDx_SENSE (1 << 1) +# define DC_HPDx_RX_INT_STATUS (1 << 8) + +#define DC_HPD1_INT_CONTROL 0x6020 +#define DC_HPD2_INT_CONTROL 0x602c +#define DC_HPD3_INT_CONTROL 0x6038 +#define DC_HPD4_INT_CONTROL 0x6044 +#define DC_HPD5_INT_CONTROL 0x6050 +#define DC_HPD6_INT_CONTROL 0x605c +# define DC_HPDx_INT_ACK (1 << 0) +# define DC_HPDx_INT_POLARITY (1 << 8) +# define DC_HPDx_INT_EN (1 << 16) +# define DC_HPDx_RX_INT_ACK (1 << 20) +# define DC_HPDx_RX_INT_EN (1 << 24) + +#define DC_HPD1_CONTROL 0x6024 +#define DC_HPD2_CONTROL 0x6030 +#define DC_HPD3_CONTROL 0x603c +#define DC_HPD4_CONTROL 0x6048 +#define DC_HPD5_CONTROL 0x6054 +#define DC_HPD6_CONTROL 0x6060 +# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0) +# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) +# define DC_HPDx_EN (1 << 28) + +#endif diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 6ea947d669e9..9b08c5743c86 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -44,6 +44,9 @@ #define R700_PFP_UCODE_SIZE 848 #define R700_PM4_UCODE_SIZE 1360 #define R700_RLC_UCODE_SIZE 1024 +#define EVERGREEN_PFP_UCODE_SIZE 1120 +#define EVERGREEN_PM4_UCODE_SIZE 1376 +#define EVERGREEN_RLC_UCODE_SIZE 768 /* Firmware Names */ MODULE_FIRMWARE("radeon/R600_pfp.bin"); @@ -68,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin"); MODULE_FIRMWARE("radeon/RV710_me.bin"); MODULE_FIRMWARE("radeon/R600_rlc.bin"); MODULE_FIRMWARE("radeon/R700_rlc.bin"); +MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); +MODULE_FIRMWARE("radeon/CEDAR_me.bin"); +MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); +MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); +MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); +MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); +MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); +MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); +MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); +MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); +MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); +MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); int r600_debugfs_mc_info_init(struct radeon_device *rdev); @@ -75,6 +90,7 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev); int r600_mc_wait_for_idle(struct radeon_device *rdev); void r600_gpu_init(struct radeon_device *rdev); void r600_fini(struct radeon_device *rdev); +void r600_irq_disable(struct radeon_device *rdev); /* hpd for digital panel detect/disconnect */ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) @@ -1450,10 +1466,31 @@ int r600_init_microcode(struct radeon_device *rdev) chip_name = "RV710"; rlc_chip_name = "R700"; break; + case CHIP_CEDAR: + chip_name = "CEDAR"; + rlc_chip_name = "CEDAR"; + break; + case CHIP_REDWOOD: + chip_name = "REDWOOD"; + rlc_chip_name = "REDWOOD"; + break; + case CHIP_JUNIPER: + chip_name = "JUNIPER"; + rlc_chip_name = "JUNIPER"; + break; + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + chip_name = "CYPRESS"; + rlc_chip_name = "CYPRESS"; + break; default: BUG(); } - if (rdev->family >= CHIP_RV770) { + if (rdev->family >= CHIP_CEDAR) { + pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; + me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; + rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; + } else if (rdev->family >= CHIP_RV770) { pfp_req_size = R700_PFP_UCODE_SIZE * 4; me_req_size = R700_PM4_UCODE_SIZE * 4; rlc_req_size = R700_RLC_UCODE_SIZE * 4; @@ -1567,12 +1604,15 @@ int r600_cp_start(struct radeon_device *rdev) } radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); radeon_ring_write(rdev, 0x1); - if (rdev->family < CHIP_RV770) { - radeon_ring_write(rdev, 0x3); - radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1); - } else { + if (rdev->family >= CHIP_CEDAR) { + radeon_ring_write(rdev, 0x0); + radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); + } else if (rdev->family >= CHIP_RV770) { radeon_ring_write(rdev, 0x0); radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); + } else { + radeon_ring_write(rdev, 0x3); + radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1); } radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(rdev, 0); @@ -2273,10 +2313,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev) } } -static void r600_rlc_stop(struct radeon_device *rdev) +void r600_rlc_stop(struct radeon_device *rdev) { - if (rdev->family >= CHIP_RV770) { + if ((rdev->family >= CHIP_RV770) && + (rdev->family <= CHIP_RV740)) { /* r7xx asics need to soft reset RLC before halting */ WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); RREG32(SRBM_SOFT_RESET); @@ -2313,7 +2354,12 @@ static int r600_rlc_init(struct radeon_device *rdev) WREG32(RLC_UCODE_CNTL, 0); fw_data = (const __be32 *)rdev->rlc_fw->data; - if (rdev->family >= CHIP_RV770) { + if (rdev->family >= CHIP_CEDAR) { + for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); + } + } else if (rdev->family >= CHIP_RV770) { for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { WREG32(RLC_UCODE_ADDR, i); WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); @@ -2343,7 +2389,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev) rdev->ih.enabled = true; } -static void r600_disable_interrupts(struct radeon_device *rdev) +void r600_disable_interrupts(struct radeon_device *rdev) { u32 ih_rb_cntl = RREG32(IH_RB_CNTL); u32 ih_cntl = RREG32(IH_CNTL); @@ -2458,7 +2504,10 @@ int r600_irq_init(struct radeon_device *rdev) WREG32(IH_CNTL, ih_cntl); /* force the active interrupt state to all disabled */ - r600_disable_interrupt_state(rdev); + if (rdev->family >= CHIP_CEDAR) + evergreen_disable_interrupt_state(rdev); + else + r600_disable_interrupt_state(rdev); /* enable irqs */ r600_enable_interrupts(rdev); @@ -2468,7 +2517,7 @@ int r600_irq_init(struct radeon_device *rdev) void r600_irq_suspend(struct radeon_device *rdev) { - r600_disable_interrupts(rdev); + r600_irq_disable(rdev); r600_rlc_stop(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index be7570ac901c..e9120985a652 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -372,7 +372,7 @@ struct radeon_irq { bool installed; bool sw_int; /* FIXME: use a define max crtc rather than hardcode it */ - bool crtc_vblank_int[2]; + bool crtc_vblank_int[6]; wait_queue_head_t vblank_queue; /* FIXME: use defines for max hpd/dacs */ bool hpd[6]; @@ -870,11 +870,36 @@ struct rv770_asic { struct r100_gpu_lockup lockup; }; +struct evergreen_asic { + unsigned num_ses; + unsigned max_pipes; + unsigned max_tile_pipes; + unsigned max_simds; + unsigned max_backends; + unsigned max_gprs; + unsigned max_threads; + unsigned max_stack_entries; + unsigned max_hw_contexts; + unsigned max_gs_threads; + unsigned sx_max_export_size; + unsigned sx_max_export_pos_size; + unsigned sx_max_export_smx_size; + unsigned sq_num_cf_insts; + unsigned sx_num_of_sets; + unsigned sc_prim_fifo_size; + unsigned sc_hiz_tile_fifo_size; + unsigned sc_earlyz_tile_fifo_size; + unsigned tiling_nbanks; + unsigned tiling_npipes; + unsigned tiling_group_size; +}; + union radeon_asic_config { struct r300_asic r300; struct r100_asic r100; struct r600_asic r600; struct rv770_asic rv770; + struct evergreen_asic evergreen; }; /* @@ -1272,6 +1297,7 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev, extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern bool r600_card_posted(struct radeon_device *rdev); extern void r600_cp_stop(struct radeon_device *rdev); +extern int r600_cp_start(struct radeon_device *rdev); extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); extern int r600_cp_resume(struct radeon_device *rdev); extern void r600_cp_fini(struct radeon_device *rdev); @@ -1295,6 +1321,8 @@ extern void r600_irq_fini(struct radeon_device *rdev); extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); extern int r600_irq_set(struct radeon_device *rdev); extern void r600_irq_suspend(struct radeon_device *rdev); +extern void r600_disable_interrupts(struct radeon_device *rdev); +extern void r600_rlc_stop(struct radeon_device *rdev); /* r600 audio */ extern int r600_audio_init(struct radeon_device *rdev); extern int r600_audio_tmds_index(struct drm_encoder *encoder); @@ -1312,6 +1340,11 @@ extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, uint8_t status_bits, uint8_t category_code); +extern void r700_cp_stop(struct radeon_device *rdev); +extern void r700_cp_fini(struct radeon_device *rdev); +extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); +extern int evergreen_irq_set(struct radeon_device *rdev); + /* evergreen */ struct evergreen_mc_save { u32 vga_control[6]; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 0d7664b8e489..f835333c1b69 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -635,17 +635,17 @@ static struct radeon_asic evergreen_asic = { .fini = &evergreen_fini, .suspend = &evergreen_suspend, .resume = &evergreen_resume, - .cp_commit = NULL, + .cp_commit = &r600_cp_commit, .gpu_is_lockup = &evergreen_gpu_is_lockup, .asic_reset = &evergreen_asic_reset, .vga_set_state = &r600_vga_set_state, - .gart_tlb_flush = &r600_pcie_gart_tlb_flush, + .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, .gart_set_page = &rs600_gart_set_page, - .ring_test = NULL, - .ring_ib_execute = NULL, - .irq_set = NULL, - .irq_process = NULL, - .get_vblank_counter = NULL, + .ring_test = &r600_ring_test, + .ring_ib_execute = &r600_ring_ib_execute, + .irq_set = &evergreen_irq_set, + .irq_process = &evergreen_irq_process, + .get_vblank_counter = &evergreen_get_vblank_counter, .fence_ring_emit = NULL, .cs_parse = NULL, .copy_blit = NULL, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 77d48ba4a29a..ef2c7ba1bdc9 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -281,6 +281,7 @@ int rv770_resume(struct radeon_device *rdev); /* * evergreen */ +void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); int evergreen_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); int evergreen_suspend(struct radeon_device *rdev); @@ -293,4 +294,8 @@ void evergreen_hpd_fini(struct radeon_device *rdev); bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); void evergreen_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd); +u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); +int evergreen_irq_set(struct radeon_device *rdev); +int evergreen_irq_process(struct radeon_device *rdev); + #endif diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 5673665ff216..273019925e0b 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1462,6 +1462,10 @@ static const char *pp_lib_thermal_controller_names[] = { "RV6xx", "RV770", "ADT7473", + "External GPIO", + "Evergreen", + "ADT7473 with internal", + }; union power_info { @@ -1707,15 +1711,21 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) break; } } - } else if (frev == 4) { + } else { /* add the i2c bus for thermal/fan chip */ /* no support for internal controller yet */ if (power_info->info_4.sThermalController.ucType > 0) { if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || - (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) { + (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770) || + (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) { DRM_INFO("Internal thermal controller %s fan control\n", (power_info->info_4.sThermalController.ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + } else if ((power_info->info_4.sThermalController.ucType == + ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || + (power_info->info_4.sThermalController.ucType == + ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) { + DRM_INFO("Special thermal controller config\n"); } else { DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType], @@ -1763,6 +1773,36 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = clock_info->usVDDC; mode_index++; + } else if (ASIC_IS_DCE4(rdev)) { + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info = + (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *) + (mode_info->atom_context->bios + + data_offset + + le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + + (power_state->ucClockStateIndices[j] * + power_info->info_4.ucClockInfoSize)); + sclk = le16_to_cpu(clock_info->usEngineClockLow); + sclk |= clock_info->ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->usMemoryClockLow); + mclk |= clock_info->ucMemoryClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) + continue; + /* skip overclock modes for now */ + if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk > + rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || + (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > + rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) + continue; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = + VOLTAGE_SW; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = + clock_info->usVDDC; + /* XXX usVDDCI */ + mode_index++; } else { struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info = (struct _ATOM_PPLIB_R600_CLOCK_INFO *) diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 72bc57a92388..c14f3be25b4b 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -237,7 +237,6 @@ void r700_cp_stop(struct radeon_device *rdev) WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); } - static int rv770_cp_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data; @@ -272,6 +271,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) return 0; } +void r700_cp_fini(struct radeon_device *rdev) +{ + r700_cp_stop(rdev); + radeon_ring_fini(rdev); +} /* * Core functions @@ -1126,7 +1130,7 @@ int rv770_init(struct radeon_device *rdev) r = rv770_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); - r600_cp_fini(rdev); + r700_cp_fini(rdev); r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); @@ -1160,7 +1164,7 @@ void rv770_fini(struct radeon_device *rdev) { radeon_pm_fini(rdev); r600_blit_fini(rdev); - r600_cp_fini(rdev); + r700_cp_fini(rdev); r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); |