summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dmub
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dmub')
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h78
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h15
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h29
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h19
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c57
7 files changed, 134 insertions, 72 deletions
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index b10728f33f62..cd9532b4f14d 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -36,6 +36,7 @@
#define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY)
#define REG_SET_MASK 0xFFFF
+
/*
* Command IDs should be treated as stable ABI.
* Do not reuse or modify IDs.
@@ -47,6 +48,7 @@ enum dmub_cmd_type {
DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ = 2,
DMUB_CMD__REG_SEQ_BURST_WRITE = 3,
DMUB_CMD__REG_REG_WAIT = 4,
+ DMUB_CMD__PLAT_54186_WA = 5,
DMUB_CMD__PSR = 64,
DMUB_CMD__VBIOS = 128,
};
@@ -145,6 +147,32 @@ struct dmub_rb_cmd_reg_wait {
struct dmub_cmd_reg_wait_data reg_wait;
};
+#ifndef PHYSICAL_ADDRESS_LOC
+#define PHYSICAL_ADDRESS_LOC union large_integer
+#endif
+
+struct dmub_cmd_PLAT_54186_wa {
+ uint32_t DCSURF_SURFACE_CONTROL;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ struct {
+ uint8_t hubp_inst : 4;
+ uint8_t tmz_surface : 1;
+ uint8_t immediate :1;
+ uint8_t vmid : 4;
+ uint8_t grph_stereo : 1;
+ uint32_t reserved : 21;
+ } flip_params;
+ uint32_t reserved[9];
+};
+
+struct dmub_rb_cmd_PLAT_54186_wa {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_PLAT_54186_wa flip;
+};
+
struct dmub_cmd_digx_encoder_control_data {
union dig_encoder_control_parameters_v1_5 dig;
};
@@ -187,9 +215,28 @@ struct dmub_rb_cmd_dpphy_init {
};
struct dmub_cmd_psr_copy_settings_data {
- uint32_t reg1;
- uint32_t reg2;
- uint32_t reg3;
+ uint16_t psr_level;
+ uint8_t hubp_inst;
+ uint8_t dpp_inst;
+ uint8_t mpcc_inst;
+ uint8_t opp_inst;
+ uint8_t otg_inst;
+ uint8_t digfe_inst;
+ uint8_t digbe_inst;
+ uint8_t dpphy_inst;
+ uint8_t aux_inst;
+ uint8_t hyst_frames;
+ uint8_t hyst_lines;
+ uint8_t phy_num;
+ uint8_t phy_type;
+ uint8_t aux_repeat;
+ uint8_t smu_optimizations_en;
+ uint8_t skip_wait_for_pll_lock;
+ uint8_t frame_delay;
+ uint8_t smu_phy_id;
+ uint8_t num_of_controllers;
+ uint8_t link_rate;
+ uint8_t frame_cap_ind;
};
struct dmub_rb_cmd_psr_copy_settings {
@@ -206,31 +253,17 @@ struct dmub_rb_cmd_psr_set_level {
struct dmub_cmd_psr_set_level_data psr_set_level_data;
};
-struct dmub_rb_cmd_psr_disable {
- struct dmub_cmd_header header;
-};
-
struct dmub_rb_cmd_psr_enable {
struct dmub_cmd_header header;
};
-struct dmub_cmd_psr_notify_vblank_data {
- uint32_t vblank_int; // Which vblank interrupt was triggered
-};
-
-struct dmub_rb_cmd_notify_vblank {
- struct dmub_cmd_header header;
- struct dmub_cmd_psr_notify_vblank_data psr_notify_vblank_data;
-};
-
-struct dmub_cmd_psr_notify_static_state_data {
- uint32_t ss_int; // Which static screen interrupt was triggered
- uint32_t ss_enter; // Enter (1) or exit (0) static screen
+struct dmub_cmd_psr_setup_data {
+ enum psr_version version; // PSR version 1 or 2
};
-struct dmub_rb_cmd_psr_notify_static_state {
+struct dmub_rb_cmd_psr_setup {
struct dmub_cmd_header header;
- struct dmub_cmd_psr_notify_static_state_data psr_notify_static_state_data;
+ struct dmub_cmd_psr_setup_data psr_setup_data;
};
union dmub_rb_cmd {
@@ -245,9 +278,10 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_dpphy_init dpphy_init;
struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
struct dmub_rb_cmd_psr_enable psr_enable;
- struct dmub_rb_cmd_psr_disable psr_disable;
struct dmub_rb_cmd_psr_copy_settings psr_copy_settings;
struct dmub_rb_cmd_psr_set_level psr_set_level;
+ struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa;
+ struct dmub_rb_cmd_psr_setup psr_setup;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
index 14f13e8a6f3b..7b69eb37f762 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -32,10 +32,17 @@
*/
enum dmub_cmd_psr_type {
- DMUB_CMD__PSR_ENABLE = 0,
- DMUB_CMD__PSR_DISABLE = 1,
- DMUB_CMD__PSR_COPY_SETTINGS = 2,
- DMUB_CMD__PSR_SET_LEVEL = 3,
+ DMUB_CMD__PSR_SETUP = 0,
+ DMUB_CMD__PSR_COPY_SETTINGS = 1,
+ DMUB_CMD__PSR_ENABLE = 2,
+ DMUB_CMD__PSR_DISABLE = 3,
+ DMUB_CMD__PSR_SET_LEVEL = 4,
+};
+
+enum psr_version {
+ PSR_VERSION_1 = 0x10, // PSR Version 1
+ PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update
+ PSR_VERSION_2_Y_COORD = 0x21, // PSR Version 2, includes Y-coordinate support for SU
};
#endif /* _DMUB_CMD_DAL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
index ac22744eaa94..df875fdd2ab0 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
@@ -73,12 +73,17 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
const struct dmub_cmd_header *cmd)
{
- uint8_t *wt_ptr = (uint8_t *)(rb->base_address) + rb->wrpt;
+ uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
+ const uint64_t *src = (const uint64_t *)cmd;
+ int i;
if (dmub_rb_full(rb))
return false;
- dmub_memcpy(wt_ptr, cmd, DMUB_RB_CMD_SIZE);
+ // copying data
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ *dst++ = *src++;
+
rb->wrpt += DMUB_RB_CMD_SIZE;
if (rb->wrpt >= rb->capacity)
@@ -113,6 +118,26 @@ static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
return true;
}
+static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
+{
+ uint32_t rptr = rb->rptr;
+ uint32_t wptr = rb->wrpt;
+
+ while (rptr != wptr) {
+ uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
+ //uint64_t volatile *p = (uint64_t volatile *)data;
+ uint64_t temp;
+ int i;
+
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ temp = *data++;
+
+ rptr += DMUB_RB_CMD_SIZE;
+ if (rptr >= rb->capacity)
+ rptr %= rb->capacity;
+ }
+}
+
static inline void dmub_rb_init(struct dmub_rb *rb,
struct dmub_rb_init_params *init_params)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index f34a50dd36ea..8e23a7017588 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -445,25 +445,6 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
/**
- * dmub_srv_cmd_submit() - submits a command to the DMUB immediately
- * @dmub: the dmub service
- * @cmd: the command to submit
- * @timeout_us: the maximum number of microseconds to wait
- *
- * Submits a command to the DMUB with an optional timeout.
- * If timeout_us is given then the service will attempt to
- * resubmit for the given number of microseconds.
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_TIMEOUT - wait for submit timed out
- * DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_cmd_submit(struct dmub_srv *dmub,
- const struct dmub_cmd_header *cmd,
- uint32_t timeout_us);
-
-/**
* dmub_srv_wait_for_auto_load() - Waits for firmware auto load to complete
* @dmub: the dmub service
* @timeout_us: the maximum number of microseconds to wait
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index f45e14ada685..cd51c6138894 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -66,10 +66,12 @@ void dmub_dcn20_reset(struct dmub_srv *dmub)
{
REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
}
void dmub_dcn20_reset_release(struct dmub_srv *dmub)
{
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF);
REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 0);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index 68af9b190288..53bfd4da69ad 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -91,7 +91,8 @@ struct dmub_srv;
DMUB_SR(DMCUB_SCRATCH13) \
DMUB_SR(DMCUB_SCRATCH14) \
DMUB_SR(DMCUB_SCRATCH15) \
- DMUB_SR(CC_DC_PIPE_DIS)
+ DMUB_SR(CC_DC_PIPE_DIS) \
+ DMUB_SR(MMHUBBUB_SOFT_RESET)
#define DMUB_COMMON_FIELDS() \
DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \
@@ -119,7 +120,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE) \
DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
- DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE)
+ DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
+ DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET)
struct dmub_srv_common_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 9a959f871f11..dee676335d73 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -67,6 +67,26 @@ static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
return (val + factor - 1) / factor * factor;
}
+static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
+{
+ const uint8_t *base = (const uint8_t *)fb->cpu_addr;
+ uint8_t buf[64];
+ uint32_t pos, end;
+
+ /**
+ * Read 64-byte chunks since we don't want to store a
+ * large temporary buffer for this purpose.
+ */
+ end = fb->size / sizeof(buf) * sizeof(buf);
+
+ for (pos = 0; pos < end; pos += sizeof(buf))
+ dmub_memcpy(buf, base + pos, sizeof(buf));
+
+ /* Read anything leftover into the buffer. */
+ if (end < fb->size)
+ dmub_memcpy(buf, base + pos, fb->size - end);
+}
+
static const struct dmub_fw_meta_info *
dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size)
{
@@ -329,6 +349,13 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
cw1.region.base = DMUB_CW1_BASE;
cw1.region.top = cw1.region.base + stack_fb->size - 1;
+ /**
+ * Read back all the instruction memory so we don't hang the
+ * DMCUB when backdoor loading if the write from x86 hasn't been
+ * flushed yet. This only occurs in backdoor loading.
+ */
+ dmub_flush_buffer_mem(inst_fb);
+
if (params->load_inst_const && dmub->hw_funcs.backdoor_load)
dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
}
@@ -405,33 +432,17 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
+ /**
+ * Read back all the queued commands to ensure that they've
+ * been flushed to framebuffer memory. Otherwise DMCUB might
+ * read back stale, fully invalid or partially invalid data.
+ */
+ dmub_rb_flush_pending(&dmub->inbox1_rb);
+
dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_submit(struct dmub_srv *dmub,
- const struct dmub_cmd_header *cmd,
- uint32_t timeout_us)
-{
- uint32_t i = 0;
-
- if (!dmub->hw_init)
- return DMUB_STATUS_INVALID;
-
- for (i = 0; i <= timeout_us; ++i) {
- dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
- if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) {
- dmub->hw_funcs.set_inbox1_wptr(dmub,
- dmub->inbox1_rb.wrpt);
- return DMUB_STATUS_OK;
- }
-
- udelay(1);
- }
-
- return DMUB_STATUS_TIMEOUT;
-}
-
enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
uint32_t timeout_us)
{
OpenPOWER on IntegriCloud