summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2010-06-17 23:02:27 +0000
committerDave Airlie <airlied@redhat.com>2010-07-01 11:59:44 +1000
commit3f53eb6f84545a7fc55a36657755371f42c63fca (patch)
tree2812536734e73a67a92e0b8755931b057ec6876d /drivers/gpu/drm/radeon
parentf892034a8ce80ed7098f667aae2eb6300e570603 (diff)
downloadblackbird-op-linux-3f53eb6f84545a7fc55a36657755371f42c63fca.tar.gz
blackbird-op-linux-3f53eb6f84545a7fc55a36657755371f42c63fca.zip
DRM / radeon / KMS: Fix hibernation regression related to radeon PM (was: Re: [Regression, post-2.6.34] Hibernation broken on machines with radeon/KMS and r300)
There is a regression from 2.6.34 related to the recent radeon power management changes, caused by attempting to cancel a delayed work item that's never been scheduled. However, the code as is has some other issues potentially leading to visible problems. First, the mutex around cancel_delayed_work() in radeon_pm_suspend() doesn't really serve any purpose, because cancel_delayed_work() only tries to delete the work's timer. Moreover, it doesn't prevent the work handler from running, so the handler can do some wrong things if it wins the race and in that case it will rearm itself to do some more wrong things going forward. So, I think it's better to wait for the handler to return in case it's already been queued up for execution. Also, it should be prevented from rearming itself in that case. Second, in radeon_set_pm_method() the cancel_delayed_work() is not sufficient to prevent the work handler from running and queing up itself for the next run (the failure scenario is that cancel_delayed_work() returns 0, so the handler is run, it waits on the mutex and then rearms itself after the mutex has been released), so again the work handler should be prevented from rearming itself in that case.. Finally, there's a potential deadlock in radeon_pm_fini(), because cancel_delayed_work_sync() is called under rdev->pm.mutex, but the work handler tries to acquire the same mutex (if it wins the race). Fix the issues described above. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Reviewed-by: Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c41
2 files changed, 36 insertions, 8 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5bbf97e26d87..ab61aaa887bb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -620,7 +620,8 @@ enum radeon_dynpm_state {
DYNPM_STATE_DISABLED,
DYNPM_STATE_MINIMUM,
DYNPM_STATE_PAUSED,
- DYNPM_STATE_ACTIVE
+ DYNPM_STATE_ACTIVE,
+ DYNPM_STATE_SUSPENDED,
};
enum radeon_dynpm_action {
DYNPM_ACTION_NONE,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 63f679a04b25..115d26b762cc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -397,13 +397,20 @@ static ssize_t radeon_set_pm_method(struct device *dev,
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
mutex_unlock(&rdev->pm.mutex);
} else if (strncmp("profile", buf, strlen("profile")) == 0) {
+ bool flush_wq = false;
+
mutex_lock(&rdev->pm.mutex);
- rdev->pm.pm_method = PM_METHOD_PROFILE;
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ flush_wq = true;
+ }
/* disable dynpm */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
- cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
mutex_unlock(&rdev->pm.mutex);
+ if (flush_wq)
+ flush_workqueue(rdev->wq);
} else {
DRM_ERROR("invalid power method!\n");
goto fail;
@@ -418,9 +425,18 @@ static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon
void radeon_pm_suspend(struct radeon_device *rdev)
{
+ bool flush_wq = false;
+
mutex_lock(&rdev->pm.mutex);
- cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
+ rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
+ flush_wq = true;
+ }
mutex_unlock(&rdev->pm.mutex);
+ if (flush_wq)
+ flush_workqueue(rdev->wq);
}
void radeon_pm_resume(struct radeon_device *rdev)
@@ -432,6 +448,12 @@ void radeon_pm_resume(struct radeon_device *rdev)
rdev->pm.current_sclk = rdev->clock.default_sclk;
rdev->pm.current_mclk = rdev->clock.default_mclk;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM
+ && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ }
mutex_unlock(&rdev->pm.mutex);
radeon_pm_compute_clocks(rdev);
}
@@ -486,6 +508,8 @@ int radeon_pm_init(struct radeon_device *rdev)
void radeon_pm_fini(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
+ bool flush_wq = false;
+
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
rdev->pm.profile = PM_PROFILE_DEFAULT;
@@ -493,13 +517,16 @@ void radeon_pm_fini(struct radeon_device *rdev)
radeon_pm_set_clocks(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
/* cancel work */
- cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ flush_wq = true;
/* reset default clocks */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
radeon_pm_set_clocks(rdev);
}
mutex_unlock(&rdev->pm.mutex);
+ if (flush_wq)
+ flush_workqueue(rdev->wq);
device_remove_file(rdev->dev, &dev_attr_power_profile);
device_remove_file(rdev->dev, &dev_attr_power_method);
@@ -720,12 +747,12 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
}
+
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
-
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
/*
OpenPOWER on IntegriCloud