diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c | 181 |
1 files changed, 170 insertions, 11 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index ada73e13d1af..da57c8a60608 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -1,5 +1,5 @@ /* - * Copyright 2015 Red Hat Inc. + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -14,17 +14,138 @@ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs <bskeggs@redhat.com> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #include <core/tegra.h> #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER #include "priv.h" +static int +nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) +{ + int ret; + + ret = regulator_enable(tdev->vdd); + if (ret) + goto err_power; + + ret = clk_prepare_enable(tdev->clk); + if (ret) + goto err_clk; + ret = clk_prepare_enable(tdev->clk_pwr); + if (ret) + goto err_clk_pwr; + clk_set_rate(tdev->clk_pwr, 204000000); + udelay(10); + + reset_control_assert(tdev->rst); + udelay(10); + + ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); + if (ret) + goto err_clamp; + udelay(10); + + reset_control_deassert(tdev->rst); + udelay(10); + + return 0; + +err_clamp: + clk_disable_unprepare(tdev->clk_pwr); +err_clk_pwr: + clk_disable_unprepare(tdev->clk); +err_clk: + regulator_disable(tdev->vdd); +err_power: + return ret; +} + +static int +nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) +{ + reset_control_assert(tdev->rst); + udelay(10); + + clk_disable_unprepare(tdev->clk_pwr); + clk_disable_unprepare(tdev->clk); + udelay(10); + + return regulator_disable(tdev->vdd); +} + +static void +nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) +{ +#if IS_ENABLED(CONFIG_IOMMU_API) + struct device *dev = &tdev->pdev->dev; + unsigned long pgsize_bitmap; + int ret; + + mutex_init(&tdev->iommu.mutex); + + if (iommu_present(&platform_bus_type)) { + tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); + if (IS_ERR(tdev->iommu.domain)) + goto error; + + /* + * A IOMMU is only usable if it supports page sizes smaller + * or equal to the system's PAGE_SIZE, with a preference if + * both are equal. + */ + pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; + if (pgsize_bitmap & PAGE_SIZE) { + tdev->iommu.pgshift = PAGE_SHIFT; + } else { + tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); + if (tdev->iommu.pgshift == 0) { + dev_warn(dev, "unsupported IOMMU page size\n"); + goto free_domain; + } + tdev->iommu.pgshift -= 1; + } + + ret = iommu_attach_device(tdev->iommu.domain, dev); + if (ret) + goto free_domain; + + ret = nvkm_mm_init(&tdev->iommu.mm, 0, + (1ULL << 40) >> tdev->iommu.pgshift, 1); + if (ret) + goto detach_device; + } + + return; + +detach_device: + iommu_detach_device(tdev->iommu.domain, dev); + +free_domain: + iommu_domain_free(tdev->iommu.domain); + +error: + tdev->iommu.domain = NULL; + tdev->iommu.pgshift = 0; + dev_err(dev, "cannot initialize IOMMU MM\n"); +#endif +} + +static void +nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev) +{ +#if IS_ENABLED(CONFIG_IOMMU_API) + if (tdev->iommu.domain) { + nvkm_mm_fini(&tdev->iommu.mm); + iommu_detach_device(tdev->iommu.domain, tdev->device.dev); + iommu_domain_free(tdev->iommu.domain); + } +#endif +} + static struct nvkm_device_tegra * nvkm_device_tegra(struct nvkm_device *device) { @@ -95,9 +216,19 @@ nvkm_device_tegra_init(struct nvkm_device *device) return 0; } +static void * +nvkm_device_tegra_dtor(struct nvkm_device *device) +{ + struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); + nvkm_device_tegra_power_down(tdev); + nvkm_device_tegra_remove_iommu(tdev); + return tdev; +} + static const struct nvkm_device_func nvkm_device_tegra_func = { .tegra = nvkm_device_tegra, + .dtor = nvkm_device_tegra_dtor, .init = nvkm_device_tegra_init, .fini = nvkm_device_tegra_fini, .resource_addr = nvkm_device_tegra_resource_addr, @@ -112,6 +243,7 @@ nvkm_device_tegra_new(struct platform_device *pdev, struct nvkm_device **pdevice) { struct nvkm_device_tegra *tdev; + int ret; if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) return -ENOMEM; @@ -119,10 +251,37 @@ nvkm_device_tegra_new(struct platform_device *pdev, tdev->pdev = pdev; tdev->irq = -1; - return nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, - NVKM_DEVICE_TEGRA, pdev->id, NULL, - cfg, dbg, detect, mmio, subdev_mask, - &tdev->device); + tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); + if (IS_ERR(tdev->vdd)) + return PTR_ERR(tdev->vdd); + + tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); + if (IS_ERR(tdev->rst)) + return PTR_ERR(tdev->rst); + + tdev->clk = devm_clk_get(&pdev->dev, "gpu"); + if (IS_ERR(tdev->clk)) + return PTR_ERR(tdev->clk); + + tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); + if (IS_ERR(tdev->clk_pwr)) + return PTR_ERR(tdev->clk_pwr); + + nvkm_device_tegra_probe_iommu(tdev); + + ret = nvkm_device_tegra_power_up(tdev); + if (ret) + return ret; + + tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; + ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, + NVKM_DEVICE_TEGRA, pdev->id, NULL, + cfg, dbg, detect, mmio, subdev_mask, + &tdev->device); + if (ret) + return ret; + + return 0; } #else int |