summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/clk
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/clk')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c176
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c318
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c326
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c344
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c282
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c173
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c294
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h26
16 files changed, 1232 insertions, 1199 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
index 9c2f688c9602..ed7717bcc3a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -8,5 +8,6 @@ nvkm-y += nvkm/subdev/clk/mcp77.o
nvkm-y += nvkm/subdev/clk/gf100.o
nvkm-y += nvkm/subdev/clk/gk104.o
nvkm-y += nvkm/subdev/clk/gk20a.o
+
nvkm-y += nvkm/subdev/clk/pllnv04.o
nvkm-y += nvkm/subdev/clk/pllgt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 39a83d82e0cd..dc8682c91cc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -21,7 +21,8 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/clk.h>
+#include "priv.h"
+
#include <subdev/bios.h>
#include <subdev/bios/boost.h>
#include <subdev/bios/cstep.h>
@@ -30,7 +31,6 @@
#include <subdev/therm.h>
#include <subdev/volt.h>
-#include <core/device.h>
#include <core/option.h>
/******************************************************************************
@@ -40,7 +40,7 @@ static u32
nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
u8 pstate, u8 domain, u32 input)
{
- struct nvkm_bios *bios = nvkm_bios(clk);
+ struct nvkm_bios *bios = clk->subdev.device->bios;
struct nvbios_boostE boostE;
u8 ver, hdr, cnt, len;
u16 data;
@@ -77,8 +77,10 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
static int
nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
{
- struct nvkm_therm *ptherm = nvkm_therm(clk);
- struct nvkm_volt *volt = nvkm_volt(clk);
+ struct nvkm_subdev *subdev = &clk->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_therm *therm = device->therm;
+ struct nvkm_volt *volt = device->volt;
struct nvkm_cstate *cstate;
int ret;
@@ -88,41 +90,41 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
cstate = &pstate->base;
}
- if (ptherm) {
- ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, +1);
+ if (therm) {
+ ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
if (ret && ret != -ENODEV) {
- nv_error(clk, "failed to raise fan speed: %d\n", ret);
+ nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
return ret;
}
}
if (volt) {
- ret = volt->set_id(volt, cstate->voltage, +1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
if (ret && ret != -ENODEV) {
- nv_error(clk, "failed to raise voltage: %d\n", ret);
+ nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
return ret;
}
}
- ret = clk->calc(clk, cstate);
+ ret = clk->func->calc(clk, cstate);
if (ret == 0) {
- ret = clk->prog(clk);
- clk->tidy(clk);
+ ret = clk->func->prog(clk);
+ clk->func->tidy(clk);
}
if (volt) {
- ret = volt->set_id(volt, cstate->voltage, -1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
if (ret && ret != -ENODEV)
- nv_error(clk, "failed to lower voltage: %d\n", ret);
+ nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
}
- if (ptherm) {
- ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, -1);
+ if (therm) {
+ ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
if (ret && ret != -ENODEV)
- nv_error(clk, "failed to lower fan speed: %d\n", ret);
+ nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
}
- return 0;
+ return ret;
}
static void
@@ -135,8 +137,8 @@ nvkm_cstate_del(struct nvkm_cstate *cstate)
static int
nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
{
- struct nvkm_bios *bios = nvkm_bios(clk);
- struct nvkm_domain *domain = clk->domains;
+ struct nvkm_bios *bios = clk->subdev.device->bios;
+ const struct nvkm_domain *domain = clk->domains;
struct nvkm_cstate *cstate = NULL;
struct nvbios_cstepX cstepX;
u8 ver, hdr;
@@ -172,7 +174,8 @@ nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
static int
nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
{
- struct nvkm_fb *pfb = nvkm_fb(clk);
+ struct nvkm_subdev *subdev = &clk->subdev;
+ struct nvkm_ram *ram = subdev->device->fb->ram;
struct nvkm_pstate *pstate;
int ret, idx = 0;
@@ -181,17 +184,17 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
break;
}
- nv_debug(clk, "setting performance state %d\n", pstatei);
+ nvkm_debug(subdev, "setting performance state %d\n", pstatei);
clk->pstate = pstatei;
- if (pfb->ram && pfb->ram->calc) {
+ if (ram && ram->func->calc) {
int khz = pstate->base.domain[nv_clk_src_mem];
do {
- ret = pfb->ram->calc(pfb, khz);
+ ret = ram->func->calc(ram, khz);
if (ret == 0)
- ret = pfb->ram->prog(pfb);
+ ret = ram->func->prog(ram);
} while (ret > 0);
- pfb->ram->tidy(pfb);
+ ram->func->tidy(ram);
}
return nvkm_cstate_prog(clk, pstate, 0);
@@ -201,31 +204,32 @@ static void
nvkm_pstate_work(struct work_struct *work)
{
struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
+ struct nvkm_subdev *subdev = &clk->subdev;
int pstate;
if (!atomic_xchg(&clk->waiting, 0))
return;
clk->pwrsrc = power_supply_is_system_supplied();
- nv_trace(clk, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
- clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
- clk->astate, clk->tstate, clk->dstate);
+ nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+ clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
+ clk->astate, clk->tstate, clk->dstate);
pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
if (clk->state_nr && pstate != -1) {
pstate = (pstate < 0) ? clk->astate : pstate;
- pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
+ pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
pstate = max(pstate, clk->dstate);
} else {
pstate = clk->pstate = -1;
}
- nv_trace(clk, "-> %d\n", pstate);
+ nvkm_trace(subdev, "-> %d\n", pstate);
if (pstate != clk->pstate) {
int ret = nvkm_pstate_prog(clk, pstate);
if (ret) {
- nv_error(clk, "error setting pstate %d: %d\n",
- pstate, ret);
+ nvkm_error(subdev, "error setting pstate %d: %d\n",
+ pstate, ret);
}
}
@@ -246,8 +250,9 @@ nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
static void
nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
{
- struct nvkm_domain *clock = clk->domains - 1;
+ const struct nvkm_domain *clock = clk->domains - 1;
struct nvkm_cstate *cstate;
+ struct nvkm_subdev *subdev = &clk->subdev;
char info[3][32] = { "", "", "" };
char name[4] = "--";
int i = -1;
@@ -261,12 +266,12 @@ nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
if (hi == 0)
continue;
- nv_debug(clk, "%02x: %10d KHz\n", clock->name, lo);
+ nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
list_for_each_entry(cstate, &pstate->list, head) {
u32 freq = cstate->domain[clock->name];
lo = min(lo, freq);
hi = max(hi, freq);
- nv_debug(clk, "%10d KHz\n", freq);
+ nvkm_debug(subdev, "%10d KHz\n", freq);
}
if (clock->mname && ++i < ARRAY_SIZE(info)) {
@@ -282,7 +287,7 @@ nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
}
}
- nv_info(clk, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
+ nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
}
static void
@@ -301,8 +306,8 @@ nvkm_pstate_del(struct nvkm_pstate *pstate)
static int
nvkm_pstate_new(struct nvkm_clk *clk, int idx)
{
- struct nvkm_bios *bios = nvkm_bios(clk);
- struct nvkm_domain *domain = clk->domains - 1;
+ struct nvkm_bios *bios = clk->subdev.device->bios;
+ const struct nvkm_domain *domain = clk->domains - 1;
struct nvkm_pstate *pstate;
struct nvkm_cstate *cstate;
struct nvbios_cstepE cstepE;
@@ -471,32 +476,37 @@ nvkm_clk_pwrsrc(struct nvkm_notify *notify)
*****************************************************************************/
int
-_nvkm_clk_fini(struct nvkm_object *object, bool suspend)
+nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+{
+ return clk->func->read(clk, src);
+}
+
+static int
+nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
{
- struct nvkm_clk *clk = (void *)object;
+ struct nvkm_clk *clk = nvkm_clk(subdev);
nvkm_notify_put(&clk->pwrsrc_ntfy);
- return nvkm_subdev_fini(&clk->base, suspend);
+ flush_work(&clk->work);
+ if (clk->func->fini)
+ clk->func->fini(clk);
+ return 0;
}
-int
-_nvkm_clk_init(struct nvkm_object *object)
+static int
+nvkm_clk_init(struct nvkm_subdev *subdev)
{
- struct nvkm_clk *clk = (void *)object;
- struct nvkm_domain *clock = clk->domains;
+ struct nvkm_clk *clk = nvkm_clk(subdev);
+ const struct nvkm_domain *clock = clk->domains;
int ret;
- ret = nvkm_subdev_init(&clk->base);
- if (ret)
- return ret;
-
memset(&clk->bstate, 0x00, sizeof(clk->bstate));
INIT_LIST_HEAD(&clk->bstate.list);
clk->bstate.pstate = 0xff;
while (clock->name != nv_clk_src_max) {
- ret = clk->read(clk, clock->name);
+ ret = nvkm_clk_read(clk, clock->name);
if (ret < 0) {
- nv_error(clk, "%02x freq unknown\n", clock->name);
+ nvkm_error(subdev, "%02x freq unknown\n", clock->name);
return ret;
}
clk->bstate.base.domain[clock->name] = ret;
@@ -505,6 +515,9 @@ _nvkm_clk_init(struct nvkm_object *object)
nvkm_pstate_info(clk, &clk->bstate);
+ if (clk->func->init)
+ return clk->func->init(clk);
+
clk->astate = clk->state_nr - 1;
clk->tstate = 0;
clk->dstate = 0;
@@ -513,61 +526,63 @@ _nvkm_clk_init(struct nvkm_object *object)
return 0;
}
-void
-_nvkm_clk_dtor(struct nvkm_object *object)
+static void *
+nvkm_clk_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_clk *clk = (void *)object;
+ struct nvkm_clk *clk = nvkm_clk(subdev);
struct nvkm_pstate *pstate, *temp;
nvkm_notify_fini(&clk->pwrsrc_ntfy);
+ /* Early return if the pstates have been provided statically */
+ if (clk->func->pstates)
+ return clk;
+
list_for_each_entry_safe(pstate, temp, &clk->states, head) {
nvkm_pstate_del(pstate);
}
- nvkm_subdev_destroy(&clk->base);
+ return clk;
}
+static const struct nvkm_subdev_func
+nvkm_clk = {
+ .dtor = nvkm_clk_dtor,
+ .init = nvkm_clk_init,
+ .fini = nvkm_clk_fini,
+};
+
int
-nvkm_clk_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, struct nvkm_domain *clocks,
- struct nvkm_pstate *pstates, int nb_pstates,
- bool allow_reclock, int length, void **object)
+nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
+ int index, bool allow_reclock, struct nvkm_clk *clk)
{
- struct nvkm_device *device = nv_device(parent);
- struct nvkm_clk *clk;
int ret, idx, arglen;
const char *mode;
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "CLK",
- "clock", length, object);
- clk = *object;
- if (ret)
- return ret;
-
+ nvkm_subdev_ctor(&nvkm_clk, device, index, 0, &clk->subdev);
+ clk->func = func;
INIT_LIST_HEAD(&clk->states);
- clk->domains = clocks;
+ clk->domains = func->domains;
clk->ustate_ac = -1;
clk->ustate_dc = -1;
+ clk->allow_reclock = allow_reclock;
INIT_WORK(&clk->work, nvkm_pstate_work);
init_waitqueue_head(&clk->wait);
atomic_set(&clk->waiting, 0);
/* If no pstates are provided, try and fetch them from the BIOS */
- if (!pstates) {
+ if (!func->pstates) {
idx = 0;
do {
ret = nvkm_pstate_new(clk, idx++);
} while (ret == 0);
} else {
- for (idx = 0; idx < nb_pstates; idx++)
- list_add_tail(&pstates[idx].head, &clk->states);
- clk->state_nr = nb_pstates;
+ for (idx = 0; idx < func->nr_pstates; idx++)
+ list_add_tail(&func->pstates[idx].head, &clk->states);
+ clk->state_nr = func->nr_pstates;
}
- clk->allow_reclock = allow_reclock;
-
ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
NULL, 0, 0, &clk->pwrsrc_ntfy);
if (ret)
@@ -589,3 +604,12 @@ nvkm_clk_create_(struct nvkm_object *parent, struct nvkm_object *engine,
return 0;
}
+
+int
+nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
+ int index, bool allow_reclock, struct nvkm_clk **pclk)
+{
+ if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
+ return -ENOMEM;
+ return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
index 4c90b9769d64..347da9ee20f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
@@ -23,25 +23,26 @@
*/
#include "nv50.h"
-static struct nvkm_domain
-g84_domains[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_href , 0xff },
- { nv_clk_src_core , 0xff, 0, "core", 1000 },
- { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
- { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
- { nv_clk_src_vdec , 0xff },
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+g84_clk = {
+ .read = nv50_clk_read,
+ .calc = nv50_clk_calc,
+ .prog = nv50_clk_prog,
+ .tidy = nv50_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
+ { nv_clk_src_vdec , 0xff },
+ { nv_clk_src_max }
+ }
};
-struct nvkm_oclass *
-g84_clk_oclass = &(struct nv50_clk_oclass) {
- .base.handle = NV_SUBDEV(CLK, 0x84),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
- .domains = g84_domains,
-}.base;
+int
+g84_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+ return nv50_clk_new_(&g84_clk, device, index,
+ (device->chipset == 0xa0), pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 3d7330d54b02..a52b7e7fce41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -21,10 +21,10 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/clk.h>
+#define gf100_clk(p) container_of((p), struct gf100_clk, base)
+#include "priv.h"
#include "pll.h"
-#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
@@ -38,29 +38,29 @@ struct gf100_clk_info {
u32 coef;
};
-struct gf100_clk_priv {
+struct gf100_clk {
struct nvkm_clk base;
struct gf100_clk_info eng[16];
};
-static u32 read_div(struct gf100_clk_priv *, int, u32, u32);
+static u32 read_div(struct gf100_clk *, int, u32, u32);
static u32
-read_vco(struct gf100_clk_priv *priv, u32 dsrc)
+read_vco(struct gf100_clk *clk, u32 dsrc)
{
- struct nvkm_clk *clk = &priv->base;
- u32 ssrc = nv_rd32(priv, dsrc);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ssrc = nvkm_rd32(device, dsrc);
if (!(ssrc & 0x00000100))
- return clk->read(clk, nv_clk_src_sppll0);
- return clk->read(clk, nv_clk_src_sppll1);
+ return nvkm_clk_read(&clk->base, nv_clk_src_sppll0);
+ return nvkm_clk_read(&clk->base, nv_clk_src_sppll1);
}
static u32
-read_pll(struct gf100_clk_priv *priv, u32 pll)
+read_pll(struct gf100_clk *clk, u32 pll)
{
- struct nvkm_clk *clk = &priv->base;
- u32 ctrl = nv_rd32(priv, pll + 0x00);
- u32 coef = nv_rd32(priv, pll + 0x04);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ctrl = nvkm_rd32(device, pll + 0x00);
+ u32 coef = nvkm_rd32(device, pll + 0x04);
u32 P = (coef & 0x003f0000) >> 16;
u32 N = (coef & 0x0000ff00) >> 8;
u32 M = (coef & 0x000000ff) >> 0;
@@ -72,20 +72,20 @@ read_pll(struct gf100_clk_priv *priv, u32 pll)
switch (pll) {
case 0x00e800:
case 0x00e820:
- sclk = nv_device(priv)->crystal;
+ sclk = device->crystal;
P = 1;
break;
case 0x132000:
- sclk = clk->read(clk, nv_clk_src_mpllsrc);
+ sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrc);
break;
case 0x132020:
- sclk = clk->read(clk, nv_clk_src_mpllsrcref);
+ sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrcref);
break;
case 0x137000:
case 0x137020:
case 0x137040:
case 0x1370e0:
- sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+ sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
break;
default:
return 0;
@@ -95,46 +95,48 @@ read_pll(struct gf100_clk_priv *priv, u32 pll)
}
static u32
-read_div(struct gf100_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
+read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
{
- u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
- u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
+ u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
switch (ssrc & 0x00000003) {
case 0:
if ((ssrc & 0x00030000) != 0x00030000)
- return nv_device(priv)->crystal;
+ return device->crystal;
return 108000;
case 2:
return 100000;
case 3:
if (sctl & 0x80000000) {
- u32 sclk = read_vco(priv, dsrc + (doff * 4));
+ u32 sclk = read_vco(clk, dsrc + (doff * 4));
u32 sdiv = (sctl & 0x0000003f) + 2;
return (sclk * 2) / sdiv;
}
- return read_vco(priv, dsrc + (doff * 4));
+ return read_vco(clk, dsrc + (doff * 4));
default:
return 0;
}
}
static u32
-read_clk(struct gf100_clk_priv *priv, int clk)
+read_clk(struct gf100_clk *clk, int idx)
{
- u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
- u32 ssel = nv_rd32(priv, 0x137100);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
+ u32 ssel = nvkm_rd32(device, 0x137100);
u32 sclk, sdiv;
- if (ssel & (1 << clk)) {
- if (clk < 7)
- sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+ if (ssel & (1 << idx)) {
+ if (idx < 7)
+ sclk = read_pll(clk, 0x137000 + (idx * 0x20));
else
- sclk = read_pll(priv, 0x1370e0);
+ sclk = read_pll(clk, 0x1370e0);
sdiv = ((sctl & 0x00003f00) >> 8) + 2;
} else {
- sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+ sclk = read_div(clk, idx, 0x137160, 0x1371d0);
sdiv = ((sctl & 0x0000003f) >> 0) + 2;
}
@@ -145,10 +147,11 @@ read_clk(struct gf100_clk_priv *priv, int clk)
}
static int
-gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct nvkm_device *device = nv_device(clk);
- struct gf100_clk_priv *priv = (void *)clk;
+ struct gf100_clk *clk = gf100_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
switch (src) {
case nv_clk_src_crystal:
@@ -156,47 +159,47 @@ gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
case nv_clk_src_href:
return 100000;
case nv_clk_src_sppll0:
- return read_pll(priv, 0x00e800);
+ return read_pll(clk, 0x00e800);
case nv_clk_src_sppll1:
- return read_pll(priv, 0x00e820);
+ return read_pll(clk, 0x00e820);
case nv_clk_src_mpllsrcref:
- return read_div(priv, 0, 0x137320, 0x137330);
+ return read_div(clk, 0, 0x137320, 0x137330);
case nv_clk_src_mpllsrc:
- return read_pll(priv, 0x132020);
+ return read_pll(clk, 0x132020);
case nv_clk_src_mpll:
- return read_pll(priv, 0x132000);
+ return read_pll(clk, 0x132000);
case nv_clk_src_mdiv:
- return read_div(priv, 0, 0x137300, 0x137310);
+ return read_div(clk, 0, 0x137300, 0x137310);
case nv_clk_src_mem:
- if (nv_rd32(priv, 0x1373f0) & 0x00000002)
- return clk->read(clk, nv_clk_src_mpll);
- return clk->read(clk, nv_clk_src_mdiv);
+ if (nvkm_rd32(device, 0x1373f0) & 0x00000002)
+ return nvkm_clk_read(&clk->base, nv_clk_src_mpll);
+ return nvkm_clk_read(&clk->base, nv_clk_src_mdiv);
case nv_clk_src_gpc:
- return read_clk(priv, 0x00);
+ return read_clk(clk, 0x00);
case nv_clk_src_rop:
- return read_clk(priv, 0x01);
+ return read_clk(clk, 0x01);
case nv_clk_src_hubk07:
- return read_clk(priv, 0x02);
+ return read_clk(clk, 0x02);
case nv_clk_src_hubk06:
- return read_clk(priv, 0x07);
+ return read_clk(clk, 0x07);
case nv_clk_src_hubk01:
- return read_clk(priv, 0x08);
+ return read_clk(clk, 0x08);
case nv_clk_src_copy:
- return read_clk(priv, 0x09);
+ return read_clk(clk, 0x09);
case nv_clk_src_daemon:
- return read_clk(priv, 0x0c);
+ return read_clk(clk, 0x0c);
case nv_clk_src_vdec:
- return read_clk(priv, 0x0e);
+ return read_clk(clk, 0x0e);
default:
- nv_error(clk, "invalid clock source %d\n", src);
+ nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
}
}
static u32
-calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
{
u32 div = min((ref * 2) / freq, (u32)65);
if (div < 2)
@@ -207,7 +210,7 @@ calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
}
static u32
-calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
{
u32 sclk;
@@ -229,28 +232,29 @@ calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
}
/* otherwise, calculate the closest divider */
- sclk = read_vco(priv, 0x137160 + (clk * 4));
- if (clk < 7)
- sclk = calc_div(priv, clk, sclk, freq, ddiv);
+ sclk = read_vco(clk, 0x137160 + (idx * 4));
+ if (idx < 7)
+ sclk = calc_div(clk, idx, sclk, freq, ddiv);
return sclk;
}
static u32
-calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef)
+calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pll limits;
int N, M, P, ret;
- ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+ ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
if (ret)
return 0;
- limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+ limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
if (!limits.refclk)
return 0;
- ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+ ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
if (ret <= 0)
return 0;
@@ -259,10 +263,9 @@ calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef)
}
static int
-calc_clk(struct gf100_clk_priv *priv,
- struct nvkm_cstate *cstate, int clk, int dom)
+calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom)
{
- struct gf100_clk_info *info = &priv->eng[clk];
+ struct gf100_clk_info *info = &clk->eng[idx];
u32 freq = cstate->domain[dom];
u32 src0, div0, div1D, div1P = 0;
u32 clk0, clk1 = 0;
@@ -272,16 +275,16 @@ calc_clk(struct gf100_clk_priv *priv,
return 0;
/* first possible path, using only dividers */
- clk0 = calc_src(priv, clk, freq, &src0, &div0);
- clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+ clk0 = calc_src(clk, idx, freq, &src0, &div0);
+ clk0 = calc_div(clk, idx, clk0, freq, &div1D);
/* see if we can get any closer using PLLs */
- if (clk0 != freq && (0x00004387 & (1 << clk))) {
- if (clk <= 7)
- clk1 = calc_pll(priv, clk, freq, &info->coef);
+ if (clk0 != freq && (0x00004387 & (1 << idx))) {
+ if (idx <= 7)
+ clk1 = calc_pll(clk, idx, freq, &info->coef);
else
clk1 = cstate->domain[nv_clk_src_hubk06];
- clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+ clk1 = calc_div(clk, idx, clk1, freq, &div1P);
}
/* select the method which gets closest to target freq */
@@ -303,7 +306,7 @@ calc_clk(struct gf100_clk_priv *priv,
info->mdiv |= 0x80000000;
info->mdiv |= div1P << 8;
}
- info->ssel = (1 << clk);
+ info->ssel = (1 << idx);
info->freq = clk1;
}
@@ -311,81 +314,96 @@ calc_clk(struct gf100_clk_priv *priv,
}
static int
-gf100_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct gf100_clk_priv *priv = (void *)clk;
+ struct gf100_clk *clk = gf100_clk(base);
int ret;
- if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
- (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
- (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
- (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
- (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
- (ret = calc_clk(priv, cstate, 0x09, nv_clk_src_copy)) ||
- (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
- (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+ if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
+ (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
+ (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
+ (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
+ (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
+ (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) ||
+ (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
+ (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
return ret;
return 0;
}
static void
-gf100_clk_prog_0(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_0(struct gf100_clk *clk, int idx)
{
- struct gf100_clk_info *info = &priv->eng[clk];
- if (clk < 7 && !info->ssel) {
- nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
- nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+ struct gf100_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
+ if (idx < 7 && !info->ssel) {
+ nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
+ nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
}
}
static void
-gf100_clk_prog_1(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_1(struct gf100_clk *clk, int idx)
{
- nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
- nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
+ break;
+ );
}
static void
-gf100_clk_prog_2(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_2(struct gf100_clk *clk, int idx)
{
- struct gf100_clk_info *info = &priv->eng[clk];
- const u32 addr = 0x137000 + (clk * 0x20);
- if (clk <= 7) {
- nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
- nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+ struct gf100_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
+ const u32 addr = 0x137000 + (idx * 0x20);
+ if (idx <= 7) {
+ nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
+ nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
if (info->coef) {
- nv_wr32(priv, addr + 0x04, info->coef);
- nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
- nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
- nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+ nvkm_wr32(device, addr + 0x04, info->coef);
+ nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
+ break;
+ );
+ nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
}
}
}
static void
-gf100_clk_prog_3(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_3(struct gf100_clk *clk, int idx)
{
- struct gf100_clk_info *info = &priv->eng[clk];
+ struct gf100_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) {
- nv_mask(priv, 0x137100, (1 << clk), info->ssel);
- nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+ nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
+ if (tmp == info->ssel)
+ break;
+ );
}
}
static void
-gf100_clk_prog_4(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_4(struct gf100_clk *clk, int idx)
{
- struct gf100_clk_info *info = &priv->eng[clk];
- nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+ struct gf100_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
}
static int
-gf100_clk_prog(struct nvkm_clk *clk)
+gf100_clk_prog(struct nvkm_clk *base)
{
- struct gf100_clk_priv *priv = (void *)clk;
+ struct gf100_clk *clk = gf100_clk(base);
struct {
- void (*exec)(struct gf100_clk_priv *, int);
+ void (*exec)(struct gf100_clk *, int);
} stage[] = {
{ gf100_clk_prog_0 }, /* div programming */
{ gf100_clk_prog_1 }, /* select div mode */
@@ -396,10 +414,10 @@ gf100_clk_prog(struct nvkm_clk *clk)
int i, j;
for (i = 0; i < ARRAY_SIZE(stage); i++) {
- for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
- if (!priv->eng[j].freq)
+ for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
+ if (!clk->eng[j].freq)
continue;
- stage[i].exec(priv, j);
+ stage[i].exec(clk, j);
}
}
@@ -407,56 +425,42 @@ gf100_clk_prog(struct nvkm_clk *clk)
}
static void
-gf100_clk_tidy(struct nvkm_clk *clk)
+gf100_clk_tidy(struct nvkm_clk *base)
{
- struct gf100_clk_priv *priv = (void *)clk;
- memset(priv->eng, 0x00, sizeof(priv->eng));
+ struct gf100_clk *clk = gf100_clk(base);
+ memset(clk->eng, 0x00, sizeof(clk->eng));
}
-static struct nvkm_domain
-gf100_domain[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_href , 0xff },
- { nv_clk_src_hubk06 , 0x00 },
- { nv_clk_src_hubk01 , 0x01 },
- { nv_clk_src_copy , 0x02 },
- { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
- { nv_clk_src_rop , 0x04 },
- { nv_clk_src_mem , 0x05, 0, "memory", 1000 },
- { nv_clk_src_vdec , 0x06 },
- { nv_clk_src_daemon , 0x0a },
- { nv_clk_src_hubk07 , 0x0b },
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+gf100_clk = {
+ .read = gf100_clk_read,
+ .calc = gf100_clk_calc,
+ .prog = gf100_clk_prog,
+ .tidy = gf100_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_hubk06 , 0x00 },
+ { nv_clk_src_hubk01 , 0x01 },
+ { nv_clk_src_copy , 0x02 },
+ { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
+ { nv_clk_src_rop , 0x04 },
+ { nv_clk_src_mem , 0x05, 0, "memory", 1000 },
+ { nv_clk_src_vdec , 0x06 },
+ { nv_clk_src_daemon , 0x0a },
+ { nv_clk_src_hubk07 , 0x0b },
+ { nv_clk_src_max }
+ }
};
-static int
-gf100_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct gf100_clk_priv *priv;
- int ret;
+ struct gf100_clk *clk;
- ret = nvkm_clk_create(parent, engine, oclass, gf100_domain,
- NULL, 0, false, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ *pclk = &clk->base;
- priv->base.read = gf100_clk_read;
- priv->base.calc = gf100_clk_calc;
- priv->base.prog = gf100_clk_prog;
- priv->base.tidy = gf100_clk_tidy;
- return 0;
+ return nvkm_clk_ctor(&gf100_clk, device, index, false, &clk->base);
}
-
-struct nvkm_oclass
-gf100_clk_oclass = {
- .handle = NV_SUBDEV(CLK, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index e9b2310bdfbb..396f7e4dad0a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -21,10 +21,10 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/clk.h>
+#define gk104_clk(p) container_of((p), struct gk104_clk, base)
+#include "priv.h"
#include "pll.h"
-#include <core/device.h>
#include <subdev/timer.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
@@ -38,28 +38,30 @@ struct gk104_clk_info {
u32 coef;
};
-struct gk104_clk_priv {
+struct gk104_clk {
struct nvkm_clk base;
struct gk104_clk_info eng[16];
};
-static u32 read_div(struct gk104_clk_priv *, int, u32, u32);
-static u32 read_pll(struct gk104_clk_priv *, u32);
+static u32 read_div(struct gk104_clk *, int, u32, u32);
+static u32 read_pll(struct gk104_clk *, u32);
static u32
-read_vco(struct gk104_clk_priv *priv, u32 dsrc)
+read_vco(struct gk104_clk *clk, u32 dsrc)
{
- u32 ssrc = nv_rd32(priv, dsrc);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ssrc = nvkm_rd32(device, dsrc);
if (!(ssrc & 0x00000100))
- return read_pll(priv, 0x00e800);
- return read_pll(priv, 0x00e820);
+ return read_pll(clk, 0x00e800);
+ return read_pll(clk, 0x00e820);
}
static u32
-read_pll(struct gk104_clk_priv *priv, u32 pll)
+read_pll(struct gk104_clk *clk, u32 pll)
{
- u32 ctrl = nv_rd32(priv, pll + 0x00);
- u32 coef = nv_rd32(priv, pll + 0x04);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ctrl = nvkm_rd32(device, pll + 0x00);
+ u32 coef = nvkm_rd32(device, pll + 0x04);
u32 P = (coef & 0x003f0000) >> 16;
u32 N = (coef & 0x0000ff00) >> 8;
u32 M = (coef & 0x000000ff) >> 0;
@@ -72,22 +74,22 @@ read_pll(struct gk104_clk_priv *priv, u32 pll)
switch (pll) {
case 0x00e800:
case 0x00e820:
- sclk = nv_device(priv)->crystal;
+ sclk = device->crystal;
P = 1;
break;
case 0x132000:
- sclk = read_pll(priv, 0x132020);
+ sclk = read_pll(clk, 0x132020);
P = (coef & 0x10000000) ? 2 : 1;
break;
case 0x132020:
- sclk = read_div(priv, 0, 0x137320, 0x137330);
- fN = nv_rd32(priv, pll + 0x10) >> 16;
+ sclk = read_div(clk, 0, 0x137320, 0x137330);
+ fN = nvkm_rd32(device, pll + 0x10) >> 16;
break;
case 0x137000:
case 0x137020:
case 0x137040:
case 0x1370e0:
- sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+ sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
break;
default:
return 0;
@@ -101,70 +103,73 @@ read_pll(struct gk104_clk_priv *priv, u32 pll)
}
static u32
-read_div(struct gk104_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
+read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
{
- u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
- u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
+ u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
switch (ssrc & 0x00000003) {
case 0:
if ((ssrc & 0x00030000) != 0x00030000)
- return nv_device(priv)->crystal;
+ return device->crystal;
return 108000;
case 2:
return 100000;
case 3:
if (sctl & 0x80000000) {
- u32 sclk = read_vco(priv, dsrc + (doff * 4));
+ u32 sclk = read_vco(clk, dsrc + (doff * 4));
u32 sdiv = (sctl & 0x0000003f) + 2;
return (sclk * 2) / sdiv;
}
- return read_vco(priv, dsrc + (doff * 4));
+ return read_vco(clk, dsrc + (doff * 4));
default:
return 0;
}
}
static u32
-read_mem(struct gk104_clk_priv *priv)
+read_mem(struct gk104_clk *clk)
{
- switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) {
- case 1: return read_pll(priv, 0x132020);
- case 2: return read_pll(priv, 0x132000);
+ struct nvkm_device *device = clk->base.subdev.device;
+ switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) {
+ case 1: return read_pll(clk, 0x132020);
+ case 2: return read_pll(clk, 0x132000);
default:
return 0;
}
}
static u32
-read_clk(struct gk104_clk_priv *priv, int clk)
+read_clk(struct gk104_clk *clk, int idx)
{
- u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
u32 sclk, sdiv;
- if (clk < 7) {
- u32 ssel = nv_rd32(priv, 0x137100);
- if (ssel & (1 << clk)) {
- sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+ if (idx < 7) {
+ u32 ssel = nvkm_rd32(device, 0x137100);
+ if (ssel & (1 << idx)) {
+ sclk = read_pll(clk, 0x137000 + (idx * 0x20));
sdiv = 1;
} else {
- sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+ sclk = read_div(clk, idx, 0x137160, 0x1371d0);
sdiv = 0;
}
} else {
- u32 ssrc = nv_rd32(priv, 0x137160 + (clk * 0x04));
+ u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04));
if ((ssrc & 0x00000003) == 0x00000003) {
- sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+ sclk = read_div(clk, idx, 0x137160, 0x1371d0);
if (ssrc & 0x00000100) {
if (ssrc & 0x40000000)
- sclk = read_pll(priv, 0x1370e0);
+ sclk = read_pll(clk, 0x1370e0);
sdiv = 1;
} else {
sdiv = 0;
}
} else {
- sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+ sclk = read_div(clk, idx, 0x137160, 0x1371d0);
sdiv = 0;
}
}
@@ -181,10 +186,11 @@ read_clk(struct gk104_clk_priv *priv, int clk)
}
static int
-gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gk104_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct nvkm_device *device = nv_device(clk);
- struct gk104_clk_priv *priv = (void *)clk;
+ struct gk104_clk *clk = gk104_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
switch (src) {
case nv_clk_src_crystal:
@@ -192,29 +198,29 @@ gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
case nv_clk_src_href:
return 100000;
case nv_clk_src_mem:
- return read_mem(priv);
+ return read_mem(clk);
case nv_clk_src_gpc:
- return read_clk(priv, 0x00);
+ return read_clk(clk, 0x00);
case nv_clk_src_rop:
- return read_clk(priv, 0x01);
+ return read_clk(clk, 0x01);
case nv_clk_src_hubk07:
- return read_clk(priv, 0x02);
+ return read_clk(clk, 0x02);
case nv_clk_src_hubk06:
- return read_clk(priv, 0x07);
+ return read_clk(clk, 0x07);
case nv_clk_src_hubk01:
- return read_clk(priv, 0x08);
+ return read_clk(clk, 0x08);
case nv_clk_src_daemon:
- return read_clk(priv, 0x0c);
+ return read_clk(clk, 0x0c);
case nv_clk_src_vdec:
- return read_clk(priv, 0x0e);
+ return read_clk(clk, 0x0e);
default:
- nv_error(clk, "invalid clock source %d\n", src);
+ nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
}
}
static u32
-calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+calc_div(struct gk104_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
{
u32 div = min((ref * 2) / freq, (u32)65);
if (div < 2)
@@ -225,7 +231,7 @@ calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
}
static u32
-calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+calc_src(struct gk104_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
{
u32 sclk;
@@ -247,28 +253,29 @@ calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
}
/* otherwise, calculate the closest divider */
- sclk = read_vco(priv, 0x137160 + (clk * 4));
- if (clk < 7)
- sclk = calc_div(priv, clk, sclk, freq, ddiv);
+ sclk = read_vco(clk, 0x137160 + (idx * 4));
+ if (idx < 7)
+ sclk = calc_div(clk, idx, sclk, freq, ddiv);
return sclk;
}
static u32
-calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef)
+calc_pll(struct gk104_clk *clk, int idx, u32 freq, u32 *coef)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pll limits;
int N, M, P, ret;
- ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+ ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
if (ret)
return 0;
- limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+ limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
if (!limits.refclk)
return 0;
- ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+ ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
if (ret <= 0)
return 0;
@@ -277,10 +284,10 @@ calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef)
}
static int
-calc_clk(struct gk104_clk_priv *priv,
- struct nvkm_cstate *cstate, int clk, int dom)
+calc_clk(struct gk104_clk *clk,
+ struct nvkm_cstate *cstate, int idx, int dom)
{
- struct gk104_clk_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &clk->eng[idx];
u32 freq = cstate->domain[dom];
u32 src0, div0, div1D, div1P = 0;
u32 clk0, clk1 = 0;
@@ -290,16 +297,16 @@ calc_clk(struct gk104_clk_priv *priv,
return 0;
/* first possible path, using only dividers */
- clk0 = calc_src(priv, clk, freq, &src0, &div0);
- clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+ clk0 = calc_src(clk, idx, freq, &src0, &div0);
+ clk0 = calc_div(clk, idx, clk0, freq, &div1D);
/* see if we can get any closer using PLLs */
- if (clk0 != freq && (0x0000ff87 & (1 << clk))) {
- if (clk <= 7)
- clk1 = calc_pll(priv, clk, freq, &info->coef);
+ if (clk0 != freq && (0x0000ff87 & (1 << idx))) {
+ if (idx <= 7)
+ clk1 = calc_pll(clk, idx, freq, &info->coef);
else
clk1 = cstate->domain[nv_clk_src_hubk06];
- clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+ clk1 = calc_div(clk, idx, clk1, freq, &div1P);
}
/* select the method which gets closest to target freq */
@@ -320,7 +327,7 @@ calc_clk(struct gk104_clk_priv *priv,
info->mdiv |= 0x80000000;
info->mdiv |= div1P << 8;
}
- info->ssel = (1 << clk);
+ info->ssel = (1 << idx);
info->dsrc = 0x40000100;
info->freq = clk1;
}
@@ -329,98 +336,115 @@ calc_clk(struct gk104_clk_priv *priv,
}
static int
-gk104_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gk104_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct gk104_clk_priv *priv = (void *)clk;
+ struct gk104_clk *clk = gk104_clk(base);
int ret;
- if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
- (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
- (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
- (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
- (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
- (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
- (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+ if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
+ (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
+ (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
+ (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
+ (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
+ (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
+ (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
return ret;
return 0;
}
static void
-gk104_clk_prog_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_0(struct gk104_clk *clk, int idx)
{
- struct gk104_clk_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
if (!info->ssel) {
- nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv);
- nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+ nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
+ nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
}
}
static void
-gk104_clk_prog_1_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
{
- nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
- nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
+ break;
+ );
}
static void
-gk104_clk_prog_1_1(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
{
- nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
}
static void
-gk104_clk_prog_2(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_2(struct gk104_clk *clk, int idx)
{
- struct gk104_clk_info *info = &priv->eng[clk];
- const u32 addr = 0x137000 + (clk * 0x20);
- nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
- nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+ struct gk104_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
+ const u32 addr = 0x137000 + (idx * 0x20);
+ nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
+ nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
if (info->coef) {
- nv_wr32(priv, addr + 0x04, info->coef);
- nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
- nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
- nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+ nvkm_wr32(device, addr + 0x04, info->coef);
+ nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
+ break;
+ );
+ nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
}
}
static void
-gk104_clk_prog_3(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_3(struct gk104_clk *clk, int idx)
{
- struct gk104_clk_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel)
- nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv);
+ nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
else
- nv_mask(priv, 0x137250 + (clk * 0x04), 0x0000003f, info->mdiv);
+ nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
}
static void
-gk104_clk_prog_4_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
{
- struct gk104_clk_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) {
- nv_mask(priv, 0x137100, (1 << clk), info->ssel);
- nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+ nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
+ if (tmp == info->ssel)
+ break;
+ );
}
}
static void
-gk104_clk_prog_4_1(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
{
- struct gk104_clk_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) {
- nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000);
- nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100);
+ nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
+ nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
}
}
static int
-gk104_clk_prog(struct nvkm_clk *clk)
+gk104_clk_prog(struct nvkm_clk *base)
{
- struct gk104_clk_priv *priv = (void *)clk;
+ struct gk104_clk *clk = gk104_clk(base);
struct {
u32 mask;
- void (*exec)(struct gk104_clk_priv *, int);
+ void (*exec)(struct gk104_clk *, int);
} stage[] = {
{ 0x007f, gk104_clk_prog_0 }, /* div programming */
{ 0x007f, gk104_clk_prog_1_0 }, /* select div mode */
@@ -433,12 +457,12 @@ gk104_clk_prog(struct nvkm_clk *clk)
int i, j;
for (i = 0; i < ARRAY_SIZE(stage); i++) {
- for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
+ for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
if (!(stage[i].mask & (1 << j)))
continue;
- if (!priv->eng[j].freq)
+ if (!clk->eng[j].freq)
continue;
- stage[i].exec(priv, j);
+ stage[i].exec(clk, j);
}
}
@@ -446,55 +470,41 @@ gk104_clk_prog(struct nvkm_clk *clk)
}
static void
-gk104_clk_tidy(struct nvkm_clk *clk)
+gk104_clk_tidy(struct nvkm_clk *base)
{
- struct gk104_clk_priv *priv = (void *)clk;
- memset(priv->eng, 0x00, sizeof(priv->eng));
+ struct gk104_clk *clk = gk104_clk(base);
+ memset(clk->eng, 0x00, sizeof(clk->eng));
}
-static struct nvkm_domain
-gk104_domain[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_href , 0xff },
- { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
- { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
- { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
- { nv_clk_src_mem , 0x03, 0, "memory", 500 },
- { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
- { nv_clk_src_hubk01 , 0x05 },
- { nv_clk_src_vdec , 0x06 },
- { nv_clk_src_daemon , 0x07 },
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+gk104_clk = {
+ .read = gk104_clk_read,
+ .calc = gk104_clk_calc,
+ .prog = gk104_clk_prog,
+ .tidy = gk104_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+ { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
+ { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
+ { nv_clk_src_mem , 0x03, 0, "memory", 500 },
+ { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
+ { nv_clk_src_hubk01 , 0x05 },
+ { nv_clk_src_vdec , 0x06 },
+ { nv_clk_src_daemon , 0x07 },
+ { nv_clk_src_max }
+ }
};
-static int
-gk104_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct gk104_clk_priv *priv;
- int ret;
+ struct gk104_clk *clk;
- ret = nvkm_clk_create(parent, engine, oclass, gk104_domain,
- NULL, 0, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ *pclk = &clk->base;
- priv->base.read = gk104_clk_read;
- priv->base.calc = gk104_clk_calc;
- priv->base.prog = gk104_clk_prog;
- priv->base.tidy = gk104_clk_tidy;
- return 0;
+ return nvkm_clk_ctor(&gk104_clk, device, index, true, &clk->base);
}
-
-struct nvkm_oclass
-gk104_clk_oclass = {
- .handle = NV_SUBDEV(CLK, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 65c532742b08..254094ab7fb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -22,14 +22,11 @@
* Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
*
*/
-#include <subdev/clk.h>
-#include <subdev/timer.h>
-
-#include <core/device.h>
+#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
+#include "priv.h"
-#ifdef __KERNEL__
-#include <nouveau_platform.h>
-#endif
+#include <core/tegra.h>
+#include <subdev/timer.h>
#define MHZ (1000 * 1000)
@@ -117,41 +114,42 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
.min_pl = 1, .max_pl = 32,
};
-struct gk20a_clk_priv {
+struct gk20a_clk {
struct nvkm_clk base;
const struct gk20a_clk_pllg_params *params;
u32 m, n, pl;
u32 parent_rate;
};
-#define to_gk20a_clk(base) container_of(base, struct gk20a_clk_priv, base)
static void
-gk20a_pllg_read_mnp(struct gk20a_clk_priv *priv)
+gk20a_pllg_read_mnp(struct gk20a_clk *clk)
{
+ struct nvkm_device *device = clk->base.subdev.device;
u32 val;
- val = nv_rd32(priv, GPCPLL_COEFF);
- priv->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
- priv->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
- priv->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+ val = nvkm_rd32(device, GPCPLL_COEFF);
+ clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+ clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
+ clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
}
static u32
-gk20a_pllg_calc_rate(struct gk20a_clk_priv *priv)
+gk20a_pllg_calc_rate(struct gk20a_clk *clk)
{
u32 rate;
u32 divider;
- rate = priv->parent_rate * priv->n;
- divider = priv->m * pl_to_div[priv->pl];
+ rate = clk->parent_rate * clk->n;
+ divider = clk->m * pl_to_div[clk->pl];
do_div(rate, divider);
return rate / 2;
}
static int
-gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
+gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
{
+ struct nvkm_subdev *subdev = &clk->base.subdev;
u32 target_clk_f, ref_clk_f, target_freq;
u32 min_vco_f, max_vco_f;
u32 low_pl, high_pl, best_pl;
@@ -163,13 +161,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
u32 pl;
target_clk_f = rate * 2 / MHZ;
- ref_clk_f = priv->parent_rate / MHZ;
+ ref_clk_f = clk->parent_rate / MHZ;
- max_vco_f = priv->params->max_vco;
- min_vco_f = priv->params->min_vco;
- best_m = priv->params->max_m;
- best_n = priv->params->min_n;
- best_pl = priv->params->min_pl;
+ max_vco_f = clk->params->max_vco;
+ min_vco_f = clk->params->min_vco;
+ best_m = clk->params->max_m;
+ best_n = clk->params->min_n;
+ best_pl = clk->params->min_pl;
target_vco_f = target_clk_f + target_clk_f / 50;
if (max_vco_f < target_vco_f)
@@ -177,13 +175,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
/* min_pl <= high_pl <= max_pl */
high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
- high_pl = min(high_pl, priv->params->max_pl);
- high_pl = max(high_pl, priv->params->min_pl);
+ high_pl = min(high_pl, clk->params->max_pl);
+ high_pl = max(high_pl, clk->params->min_pl);
/* min_pl <= low_pl <= max_pl */
low_pl = min_vco_f / target_vco_f;
- low_pl = min(low_pl, priv->params->max_pl);
- low_pl = max(low_pl, priv->params->min_pl);
+ low_pl = min(low_pl, clk->params->max_pl);
+ low_pl = max(low_pl, clk->params->min_pl);
/* Find Indices of high_pl and low_pl */
for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
@@ -199,30 +197,30 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
}
}
- nv_debug(priv, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
- pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
+ nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
+ pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
/* Select lowest possible VCO */
for (pl = low_pl; pl <= high_pl; pl++) {
target_vco_f = target_clk_f * pl_to_div[pl];
- for (m = priv->params->min_m; m <= priv->params->max_m; m++) {
+ for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
u_f = ref_clk_f / m;
- if (u_f < priv->params->min_u)
+ if (u_f < clk->params->min_u)
break;
- if (u_f > priv->params->max_u)
+ if (u_f > clk->params->max_u)
continue;
n = (target_vco_f * m) / ref_clk_f;
n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
- if (n > priv->params->max_n)
+ if (n > clk->params->max_n)
break;
for (; n <= n2; n++) {
- if (n < priv->params->min_n)
+ if (n < clk->params->min_n)
continue;
- if (n > priv->params->max_n)
+ if (n > clk->params->max_n)
break;
vco_f = ref_clk_f * n / m;
@@ -250,71 +248,75 @@ found_match:
WARN_ON(best_delta == ~0);
if (best_delta != 0)
- nv_debug(priv, "no best match for target @ %dMHz on gpc_pll",
- target_clk_f);
+ nvkm_debug(subdev,
+ "no best match for target @ %dMHz on gpc_pll",
+ target_clk_f);
- priv->m = best_m;
- priv->n = best_n;
- priv->pl = best_pl;
+ clk->m = best_m;
+ clk->n = best_n;
+ clk->pl = best_pl;
- target_freq = gk20a_pllg_calc_rate(priv) / MHZ;
+ target_freq = gk20a_pllg_calc_rate(clk) / MHZ;
- nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
- target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]);
+ nvkm_debug(subdev,
+ "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
+ target_freq, clk->m, clk->n, clk->pl, pl_to_div[clk->pl]);
return 0;
}
static int
-gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n)
+gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
{
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
u32 val;
int ramp_timeout;
/* get old coefficients */
- val = nv_rd32(priv, GPCPLL_COEFF);
+ val = nvkm_rd32(device, GPCPLL_COEFF);
/* do nothing if NDIV is the same */
if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
return 0;
/* setup */
- nv_mask(priv, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+ nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
- nv_mask(priv, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+ nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
/* pll slowdown mode */
- nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
/* new ndiv ready for ramp */
- val = nv_rd32(priv, GPCPLL_COEFF);
+ val = nvkm_rd32(device, GPCPLL_COEFF);
val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
udelay(1);
- nv_wr32(priv, GPCPLL_COEFF, val);
+ nvkm_wr32(device, GPCPLL_COEFF, val);
/* dynamic ramp to new ndiv */
- val = nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+ val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
udelay(1);
- nv_wr32(priv, GPCPLL_NDIV_SLOWDOWN, val);
+ nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
udelay(1);
- val = nv_rd32(priv, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
+ val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
break;
}
/* exit slowdown mode */
- nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
- nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+ nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
if (ramp_timeout <= 0) {
- nv_error(priv, "gpcpll dynamic ramp timeout\n");
+ nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
return -ETIMEDOUT;
}
@@ -322,149 +324,147 @@ gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n)
}
static void
-_gk20a_pllg_enable(struct gk20a_clk_priv *priv)
+_gk20a_pllg_enable(struct gk20a_clk *clk)
{
- nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
- nv_rd32(priv, GPCPLL_CFG);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+ nvkm_rd32(device, GPCPLL_CFG);
}
static void
-_gk20a_pllg_disable(struct gk20a_clk_priv *priv)
+_gk20a_pllg_disable(struct gk20a_clk *clk)
{
- nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
- nv_rd32(priv, GPCPLL_CFG);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+ nvkm_rd32(device, GPCPLL_CFG);
}
static int
-_gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv, bool allow_slide)
+_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
{
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
u32 val, cfg;
u32 m_old, pl_old, n_lo;
/* get old coefficients */
- val = nv_rd32(priv, GPCPLL_COEFF);
+ val = nvkm_rd32(device, GPCPLL_COEFF);
m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
/* do NDIV slide if there is no change in M and PL */
- cfg = nv_rd32(priv, GPCPLL_CFG);
- if (allow_slide && priv->m == m_old && priv->pl == pl_old &&
+ cfg = nvkm_rd32(device, GPCPLL_CFG);
+ if (allow_slide && clk->m == m_old && clk->pl == pl_old &&
(cfg & GPCPLL_CFG_ENABLE)) {
- return gk20a_pllg_slide(priv, priv->n);
+ return gk20a_pllg_slide(clk, clk->n);
}
/* slide down to NDIV_LO */
- n_lo = DIV_ROUND_UP(m_old * priv->params->min_vco,
- priv->parent_rate / MHZ);
+ n_lo = DIV_ROUND_UP(m_old * clk->params->min_vco,
+ clk->parent_rate / MHZ);
if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
- int ret = gk20a_pllg_slide(priv, n_lo);
+ int ret = gk20a_pllg_slide(clk, n_lo);
if (ret)
return ret;
}
/* split FO-to-bypass jump in halfs by setting out divider 1:2 */
- nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
/* put PLL in bypass before programming it */
- val = nv_rd32(priv, SEL_VCO);
+ val = nvkm_rd32(device, SEL_VCO);
val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
udelay(2);
- nv_wr32(priv, SEL_VCO, val);
+ nvkm_wr32(device, SEL_VCO, val);
/* get out from IDDQ */
- val = nv_rd32(priv, GPCPLL_CFG);
+ val = nvkm_rd32(device, GPCPLL_CFG);
if (val & GPCPLL_CFG_IDDQ) {
val &= ~GPCPLL_CFG_IDDQ;
- nv_wr32(priv, GPCPLL_CFG, val);
- nv_rd32(priv, GPCPLL_CFG);
+ nvkm_wr32(device, GPCPLL_CFG, val);
+ nvkm_rd32(device, GPCPLL_CFG);
udelay(2);
}
- _gk20a_pllg_disable(priv);
+ _gk20a_pllg_disable(clk);
- nv_debug(priv, "%s: m=%d n=%d pl=%d\n", __func__, priv->m, priv->n,
- priv->pl);
+ nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
+ clk->m, clk->n, clk->pl);
- n_lo = DIV_ROUND_UP(priv->m * priv->params->min_vco,
- priv->parent_rate / MHZ);
- val = priv->m << GPCPLL_COEFF_M_SHIFT;
- val |= (allow_slide ? n_lo : priv->n) << GPCPLL_COEFF_N_SHIFT;
- val |= priv->pl << GPCPLL_COEFF_P_SHIFT;
- nv_wr32(priv, GPCPLL_COEFF, val);
+ n_lo = DIV_ROUND_UP(clk->m * clk->params->min_vco,
+ clk->parent_rate / MHZ);
+ val = clk->m << GPCPLL_COEFF_M_SHIFT;
+ val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT;
+ val |= clk->pl << GPCPLL_COEFF_P_SHIFT;
+ nvkm_wr32(device, GPCPLL_COEFF, val);
- _gk20a_pllg_enable(priv);
+ _gk20a_pllg_enable(clk);
- val = nv_rd32(priv, GPCPLL_CFG);
+ val = nvkm_rd32(device, GPCPLL_CFG);
if (val & GPCPLL_CFG_LOCK_DET_OFF) {
val &= ~GPCPLL_CFG_LOCK_DET_OFF;
- nv_wr32(priv, GPCPLL_CFG, val);
+ nvkm_wr32(device, GPCPLL_CFG, val);
}
- if (!nvkm_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
- GPCPLL_CFG_LOCK)) {
- nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__);
+ if (nvkm_usec(device, 300,
+ if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
+ break;
+ ) < 0)
return -ETIMEDOUT;
- }
/* switch to VCO mode */
- nv_mask(priv, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+ nvkm_mask(device, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
/* restore out divider 1:1 */
- val = nv_rd32(priv, GPC2CLK_OUT);
+ val = nvkm_rd32(device, GPC2CLK_OUT);
val &= ~GPC2CLK_OUT_VCODIV_MASK;
udelay(2);
- nv_wr32(priv, GPC2CLK_OUT, val);
+ nvkm_wr32(device, GPC2CLK_OUT, val);
/* slide up to new NDIV */
- return allow_slide ? gk20a_pllg_slide(priv, priv->n) : 0;
+ return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0;
}
static int
-gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv)
+gk20a_pllg_program_mnp(struct gk20a_clk *clk)
{
int err;
- err = _gk20a_pllg_program_mnp(priv, true);
+ err = _gk20a_pllg_program_mnp(clk, true);
if (err)
- err = _gk20a_pllg_program_mnp(priv, false);
+ err = _gk20a_pllg_program_mnp(clk, false);
return err;
}
static void
-gk20a_pllg_disable(struct gk20a_clk_priv *priv)
+gk20a_pllg_disable(struct gk20a_clk *clk)
{
+ struct nvkm_device *device = clk->base.subdev.device;
u32 val;
/* slide to VCO min */
- val = nv_rd32(priv, GPCPLL_CFG);
+ val = nvkm_rd32(device, GPCPLL_CFG);
if (val & GPCPLL_CFG_ENABLE) {
u32 coeff, m, n_lo;
- coeff = nv_rd32(priv, GPCPLL_COEFF);
+ coeff = nvkm_rd32(device, GPCPLL_COEFF);
m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
- n_lo = DIV_ROUND_UP(m * priv->params->min_vco,
- priv->parent_rate / MHZ);
- gk20a_pllg_slide(priv, n_lo);
+ n_lo = DIV_ROUND_UP(m * clk->params->min_vco,
+ clk->parent_rate / MHZ);
+ gk20a_pllg_slide(clk, n_lo);
}
/* put PLL in bypass before disabling it */
- nv_mask(priv, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
- _gk20a_pllg_disable(priv);
+ _gk20a_pllg_disable(clk);
}
#define GK20A_CLK_GPC_MDIV 1000
-static struct nvkm_domain
-gk20a_domains[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
- { nv_clk_src_max }
-};
-
static struct nvkm_pstate
gk20a_pstates[] = {
{
@@ -560,87 +560,99 @@ gk20a_pstates[] = {
};
static int
-gk20a_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct gk20a_clk_priv *priv = (void *)clk;
+ struct gk20a_clk *clk = gk20a_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
switch (src) {
case nv_clk_src_crystal:
- return nv_device(clk)->crystal;
+ return device->crystal;
case nv_clk_src_gpc:
- gk20a_pllg_read_mnp(priv);
- return gk20a_pllg_calc_rate(priv) / GK20A_CLK_GPC_MDIV;
+ gk20a_pllg_read_mnp(clk);
+ return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
default:
- nv_error(clk, "invalid clock source %d\n", src);
+ nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
}
}
static int
-gk20a_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct gk20a_clk_priv *priv = (void *)clk;
+ struct gk20a_clk *clk = gk20a_clk(base);
- return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] *
+ return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
GK20A_CLK_GPC_MDIV);
}
static int
-gk20a_clk_prog(struct nvkm_clk *clk)
+gk20a_clk_prog(struct nvkm_clk *base)
{
- struct gk20a_clk_priv *priv = (void *)clk;
+ struct gk20a_clk *clk = gk20a_clk(base);
- return gk20a_pllg_program_mnp(priv);
+ return gk20a_pllg_program_mnp(clk);
}
static void
-gk20a_clk_tidy(struct nvkm_clk *clk)
+gk20a_clk_tidy(struct nvkm_clk *base)
{
}
-static int
-gk20a_clk_fini(struct nvkm_object *object, bool suspend)
+static void
+gk20a_clk_fini(struct nvkm_clk *base)
{
- struct gk20a_clk_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_clk_fini(&priv->base, false);
-
- gk20a_pllg_disable(priv);
-
- return ret;
+ struct gk20a_clk *clk = gk20a_clk(base);
+ gk20a_pllg_disable(clk);
}
static int
-gk20a_clk_init(struct nvkm_object *object)
+gk20a_clk_init(struct nvkm_clk *base)
{
- struct gk20a_clk_priv *priv = (void *)object;
+ struct gk20a_clk *clk = gk20a_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
int ret;
- nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
-
- ret = nvkm_clk_init(&priv->base);
- if (ret)
- return ret;
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
- ret = gk20a_clk_prog(&priv->base);
+ ret = gk20a_clk_prog(&clk->base);
if (ret) {
- nv_error(priv, "cannot initialize clock\n");
+ nvkm_error(subdev, "cannot initialize clock\n");
return ret;
}
return 0;
}
-static int
-gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static const struct nvkm_clk_func
+gk20a_clk = {
+ .init = gk20a_clk_init,
+ .fini = gk20a_clk_fini,
+ .read = gk20a_clk_read,
+ .calc = gk20a_clk_calc,
+ .prog = gk20a_clk_prog,
+ .tidy = gk20a_clk_tidy,
+ .pstates = gk20a_pstates,
+ .nr_pstates = ARRAY_SIZE(gk20a_pstates),
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+ { nv_clk_src_max }
+ }
+};
+
+int
+gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct gk20a_clk_priv *priv;
- struct nouveau_platform_device *plat;
- int ret;
- int i;
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ struct gk20a_clk *clk;
+ int ret, i;
+
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ *pclk = &clk->base;
/* Finish initializing the pstates */
for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) {
@@ -648,33 +660,11 @@ gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
gk20a_pstates[i].pstate = i + 1;
}
- ret = nvkm_clk_create(parent, engine, oclass, gk20a_domains,
- gk20a_pstates, ARRAY_SIZE(gk20a_pstates),
- true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ clk->params = &gk20a_pllg_params;
+ clk->parent_rate = clk_get_rate(tdev->clk);
- priv->params = &gk20a_pllg_params;
-
- plat = nv_device_to_platform(nv_device(parent));
- priv->parent_rate = clk_get_rate(plat->gpu->clk);
- nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ);
-
- priv->base.read = gk20a_clk_read;
- priv->base.calc = gk20a_clk_calc;
- priv->base.prog = gk20a_clk_prog;
- priv->base.tidy = gk20a_clk_tidy;
- return 0;
+ ret = nvkm_clk_ctor(&gk20a_clk, device, index, true, &clk->base);
+ nvkm_info(&clk->base.subdev, "parent clock rate: %d Mhz\n",
+ clk->parent_rate / MHZ);
+ return ret;
}
-
-struct nvkm_oclass
-gk20a_clk_oclass = {
- .handle = NV_SUBDEV(CLK, 0xea),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_clk_ctor,
- .dtor = _nvkm_subdev_dtor,
- .init = gk20a_clk_init,
- .fini = gk20a_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 065e9f5c8db9..07feae620c8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -22,56 +22,58 @@
* Authors: Ben Skeggs
* Roy Spliet
*/
+#define gt215_clk(p) container_of((p), struct gt215_clk, base)
#include "gt215.h"
#include "pll.h"
-#include <core/device.h>
#include <engine/fifo.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
-struct gt215_clk_priv {
+struct gt215_clk {
struct nvkm_clk base;
struct gt215_clk_info eng[nv_clk_src_max];
};
-static u32 read_clk(struct gt215_clk_priv *, int, bool);
-static u32 read_pll(struct gt215_clk_priv *, int, u32);
+static u32 read_clk(struct gt215_clk *, int, bool);
+static u32 read_pll(struct gt215_clk *, int, u32);
static u32
-read_vco(struct gt215_clk_priv *priv, int clk)
+read_vco(struct gt215_clk *clk, int idx)
{
- u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
switch (sctl & 0x00000030) {
case 0x00000000:
- return nv_device(priv)->crystal;
+ return device->crystal;
case 0x00000020:
- return read_pll(priv, 0x41, 0x00e820);
+ return read_pll(clk, 0x41, 0x00e820);
case 0x00000030:
- return read_pll(priv, 0x42, 0x00e8a0);
+ return read_pll(clk, 0x42, 0x00e8a0);
default:
return 0;
}
}
static u32
-read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
+read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
{
+ struct nvkm_device *device = clk->base.subdev.device;
u32 sctl, sdiv, sclk;
/* refclk for the 0xe8xx plls is a fixed frequency */
- if (clk >= 0x40) {
- if (nv_device(priv)->chipset == 0xaf) {
+ if (idx >= 0x40) {
+ if (device->chipset == 0xaf) {
/* no joke.. seriously.. sigh.. */
- return nv_rd32(priv, 0x00471c) * 1000;
+ return nvkm_rd32(device, 0x00471c) * 1000;
}
- return nv_device(priv)->crystal;
+ return device->crystal;
}
- sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+ sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
if (!ignore_en && !(sctl & 0x00000100))
return 0;
@@ -83,7 +85,7 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
switch (sctl & 0x00003000) {
case 0x00000000:
if (!(sctl & 0x00000200))
- return nv_device(priv)->crystal;
+ return device->crystal;
return 0;
case 0x00002000:
if (sctl & 0x00000040)
@@ -94,7 +96,7 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
if (!(sctl & 0x00000001))
return 0;
- sclk = read_vco(priv, clk);
+ sclk = read_vco(clk, idx);
sdiv = ((sctl & 0x003f0000) >> 16) + 2;
return (sclk * 2) / sdiv;
default:
@@ -103,14 +105,15 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
}
static u32
-read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
+read_pll(struct gt215_clk *clk, int idx, u32 pll)
{
- u32 ctrl = nv_rd32(priv, pll + 0);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ctrl = nvkm_rd32(device, pll + 0);
u32 sclk = 0, P = 1, N = 1, M = 1;
if (!(ctrl & 0x00000008)) {
if (ctrl & 0x00000001) {
- u32 coef = nv_rd32(priv, pll + 4);
+ u32 coef = nvkm_rd32(device, pll + 4);
M = (coef & 0x000000ff) >> 0;
N = (coef & 0x0000ff00) >> 8;
P = (coef & 0x003f0000) >> 16;
@@ -121,10 +124,10 @@ read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
if ((pll & 0x00ff00) == 0x00e800)
P = 1;
- sclk = read_clk(priv, 0x00 + clk, false);
+ sclk = read_clk(clk, 0x00 + idx, false);
}
} else {
- sclk = read_clk(priv, 0x10 + clk, false);
+ sclk = read_clk(clk, 0x10 + idx, false);
}
if (M * P)
@@ -134,41 +137,43 @@ read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
}
static int
-gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct gt215_clk_priv *priv = (void *)clk;
+ struct gt215_clk *clk = gt215_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
u32 hsrc;
switch (src) {
case nv_clk_src_crystal:
- return nv_device(priv)->crystal;
+ return device->crystal;
case nv_clk_src_core:
case nv_clk_src_core_intm:
- return read_pll(priv, 0x00, 0x4200);
+ return read_pll(clk, 0x00, 0x4200);
case nv_clk_src_shader:
- return read_pll(priv, 0x01, 0x4220);
+ return read_pll(clk, 0x01, 0x4220);
case nv_clk_src_mem:
- return read_pll(priv, 0x02, 0x4000);
+ return read_pll(clk, 0x02, 0x4000);
case nv_clk_src_disp:
- return read_clk(priv, 0x20, false);
+ return read_clk(clk, 0x20, false);
case nv_clk_src_vdec:
- return read_clk(priv, 0x21, false);
+ return read_clk(clk, 0x21, false);
case nv_clk_src_daemon:
- return read_clk(priv, 0x25, false);
+ return read_clk(clk, 0x25, false);
case nv_clk_src_host:
- hsrc = (nv_rd32(priv, 0xc040) & 0x30000000) >> 28;
+ hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
switch (hsrc) {
case 0:
- return read_clk(priv, 0x1d, false);
+ return read_clk(clk, 0x1d, false);
case 2:
case 3:
return 277000;
default:
- nv_error(clk, "unknown HOST clock source %d\n", hsrc);
+ nvkm_error(subdev, "unknown HOST clock source %d\n", hsrc);
return -EINVAL;
}
default:
- nv_error(clk, "invalid clock source %d\n", src);
+ nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
}
@@ -176,10 +181,10 @@ gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
}
int
-gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
+gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
struct gt215_clk_info *info)
{
- struct gt215_clk_priv *priv = (void *)clock;
+ struct gt215_clk *clk = gt215_clk(base);
u32 oclk, sclk, sdiv;
s32 diff;
@@ -196,7 +201,7 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
info->clk = 0x00002140;
return khz;
default:
- sclk = read_vco(priv, clk);
+ sclk = read_vco(clk, idx);
sdiv = min((sclk * 2) / khz, (u32)65);
oclk = (sclk * 2) / sdiv;
diff = ((khz + 3000) - oclk);
@@ -224,11 +229,11 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
}
int
-gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
+gt215_pll_info(struct nvkm_clk *base, int idx, u32 pll, u32 khz,
struct gt215_clk_info *info)
{
- struct nvkm_bios *bios = nvkm_bios(clock);
- struct gt215_clk_priv *priv = (void *)clock;
+ struct gt215_clk *clk = gt215_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvbios_pll limits;
int P, N, M, diff;
int ret;
@@ -237,22 +242,22 @@ gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
/* If we can get a within [-2, 3) MHz of a divider, we'll disable the
* PLL and use the divider instead. */
- ret = gt215_clk_info(clock, clk, khz, info);
+ ret = gt215_clk_info(&clk->base, idx, khz, info);
diff = khz - ret;
if (!pll || (diff >= -2000 && diff < 3000)) {
goto out;
}
/* Try with PLL */
- ret = nvbios_pll_parse(bios, pll, &limits);
+ ret = nvbios_pll_parse(subdev->device->bios, pll, &limits);
if (ret)
return ret;
- ret = gt215_clk_info(clock, clk - 0x10, limits.refclk, info);
+ ret = gt215_clk_info(&clk->base, idx - 0x10, limits.refclk, info);
if (ret != limits.refclk)
return -EINVAL;
- ret = gt215_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
+ ret = gt215_pll_calc(subdev, &limits, khz, &N, NULL, &M, &P);
if (ret >= 0) {
info->pll = (P << 16) | (N << 8) | M;
}
@@ -263,22 +268,22 @@ out:
}
static int
-calc_clk(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate,
- int clk, u32 pll, int idx)
+calc_clk(struct gt215_clk *clk, struct nvkm_cstate *cstate,
+ int idx, u32 pll, int dom)
{
- int ret = gt215_pll_info(&priv->base, clk, pll, cstate->domain[idx],
- &priv->eng[idx]);
+ int ret = gt215_pll_info(&clk->base, idx, pll, cstate->domain[dom],
+ &clk->eng[dom]);
if (ret >= 0)
return 0;
return ret;
}
static int
-calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
+calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
{
int ret = 0;
u32 kHz = cstate->domain[nv_clk_src_host];
- struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
+ struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
if (kHz == 277000) {
info->clk = 0;
@@ -288,7 +293,7 @@ calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
info->host_out = NVA3_HOST_CLK;
- ret = gt215_clk_info(&priv->base, 0x1d, kHz, info);
+ ret = gt215_clk_info(&clk->base, 0x1d, kHz, info);
if (ret >= 0)
return 0;
@@ -298,21 +303,33 @@ calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
int
gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
{
- struct nvkm_fifo *pfifo = nvkm_fifo(clk);
+ struct nvkm_device *device = clk->subdev.device;
+ struct nvkm_fifo *fifo = device->fifo;
/* halt and idle execution engines */
- nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
- nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
+ nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
/* Wait until the interrupt handler is finished */
- if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
+ if (nvkm_msec(device, 2000,
+ if (!nvkm_rd32(device, 0x000100))
+ break;
+ ) < 0)
return -EBUSY;
- if (pfifo)
- pfifo->pause(pfifo, flags);
+ if (fifo)
+ nvkm_fifo_pause(fifo, flags);
- if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x002504) & 0x00000010)
+ break;
+ ) < 0)
return -EIO;
- if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
+
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x002504) & 0x0000003f;
+ if (tmp == 0x0000003f)
+ break;
+ ) < 0)
return -EIO;
return 0;
@@ -321,86 +338,94 @@ gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
void
gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
{
- struct nvkm_fifo *pfifo = nvkm_fifo(clk);
+ struct nvkm_device *device = clk->subdev.device;
+ struct nvkm_fifo *fifo = device->fifo;
- if (pfifo && flags)
- pfifo->start(pfifo, flags);
+ if (fifo && flags)
+ nvkm_fifo_start(fifo, flags);
- nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
- nv_mask(clk, 0x020060, 0x00070000, 0x00040000);
+ nvkm_mask(device, 0x002504, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x020060, 0x00070000, 0x00040000);
}
static void
-disable_clk_src(struct gt215_clk_priv *priv, u32 src)
+disable_clk_src(struct gt215_clk *clk, u32 src)
{
- nv_mask(priv, src, 0x00000100, 0x00000000);
- nv_mask(priv, src, 0x00000001, 0x00000000);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, src, 0x00000100, 0x00000000);
+ nvkm_mask(device, src, 0x00000001, 0x00000000);
}
static void
-prog_pll(struct gt215_clk_priv *priv, int clk, u32 pll, int idx)
+prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
{
- struct gt215_clk_info *info = &priv->eng[idx];
- const u32 src0 = 0x004120 + (clk * 4);
- const u32 src1 = 0x004160 + (clk * 4);
+ struct gt215_clk_info *info = &clk->eng[dom];
+ struct nvkm_device *device = clk->base.subdev.device;
+ const u32 src0 = 0x004120 + (idx * 4);
+ const u32 src1 = 0x004160 + (idx * 4);
const u32 ctrl = pll + 0;
const u32 coef = pll + 4;
u32 bypass;
if (info->pll) {
/* Always start from a non-PLL clock */
- bypass = nv_rd32(priv, ctrl) & 0x00000008;
+ bypass = nvkm_rd32(device, ctrl) & 0x00000008;
if (!bypass) {
- nv_mask(priv, src1, 0x00000101, 0x00000101);
- nv_mask(priv, ctrl, 0x00000008, 0x00000008);
+ nvkm_mask(device, src1, 0x00000101, 0x00000101);
+ nvkm_mask(device, ctrl, 0x00000008, 0x00000008);
udelay(20);
}
- nv_mask(priv, src0, 0x003f3141, 0x00000101 | info->clk);
- nv_wr32(priv, coef, info->pll);
- nv_mask(priv, ctrl, 0x00000015, 0x00000015);
- nv_mask(priv, ctrl, 0x00000010, 0x00000000);
- if (!nv_wait(priv, ctrl, 0x00020000, 0x00020000)) {
- nv_mask(priv, ctrl, 0x00000010, 0x00000010);
- nv_mask(priv, src0, 0x00000101, 0x00000000);
+ nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk);
+ nvkm_wr32(device, coef, info->pll);
+ nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
+ nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, ctrl) & 0x00020000)
+ break;
+ ) < 0) {
+ nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
+ nvkm_mask(device, src0, 0x00000101, 0x00000000);
return;
}
- nv_mask(priv, ctrl, 0x00000010, 0x00000010);
- nv_mask(priv, ctrl, 0x00000008, 0x00000000);
- disable_clk_src(priv, src1);
+ nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
+ nvkm_mask(device, ctrl, 0x00000008, 0x00000000);
+ disable_clk_src(clk, src1);
} else {
- nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk);
- nv_mask(priv, ctrl, 0x00000018, 0x00000018);
+ nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk);
+ nvkm_mask(device, ctrl, 0x00000018, 0x00000018);
udelay(20);
- nv_mask(priv, ctrl, 0x00000001, 0x00000000);
- disable_clk_src(priv, src0);
+ nvkm_mask(device, ctrl, 0x00000001, 0x00000000);
+ disable_clk_src(clk, src0);
}
}
static void
-prog_clk(struct gt215_clk_priv *priv, int clk, int idx)
+prog_clk(struct gt215_clk *clk, int idx, int dom)
{
- struct gt215_clk_info *info = &priv->eng[idx];
- nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
+ struct gt215_clk_info *info = &clk->eng[dom];
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
}
static void
-prog_host(struct gt215_clk_priv *priv)
+prog_host(struct gt215_clk *clk)
{
- struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
- u32 hsrc = (nv_rd32(priv, 0xc040));
+ struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 hsrc = (nvkm_rd32(device, 0xc040));
switch (info->host_out) {
case NVA3_HOST_277:
if ((hsrc & 0x30000000) == 0) {
- nv_wr32(priv, 0xc040, hsrc | 0x20000000);
- disable_clk_src(priv, 0x4194);
+ nvkm_wr32(device, 0xc040, hsrc | 0x20000000);
+ disable_clk_src(clk, 0x4194);
}
break;
case NVA3_HOST_CLK:
- prog_clk(priv, 0x1d, nv_clk_src_host);
+ prog_clk(clk, 0x1d, nv_clk_src_host);
if ((hsrc & 0x30000000) >= 0x20000000) {
- nv_wr32(priv, 0xc040, hsrc & ~0x30000000);
+ nvkm_wr32(device, 0xc040, hsrc & ~0x30000000);
}
break;
default:
@@ -408,44 +433,45 @@ prog_host(struct gt215_clk_priv *priv)
}
/* This seems to be a clock gating factor on idle, always set to 64 */
- nv_wr32(priv, 0xc044, 0x3e);
+ nvkm_wr32(device, 0xc044, 0x3e);
}
static void
-prog_core(struct gt215_clk_priv *priv, int idx)
+prog_core(struct gt215_clk *clk, int dom)
{
- struct gt215_clk_info *info = &priv->eng[idx];
- u32 fb_delay = nv_rd32(priv, 0x10002c);
+ struct gt215_clk_info *info = &clk->eng[dom];
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 fb_delay = nvkm_rd32(device, 0x10002c);
if (fb_delay < info->fb_delay)
- nv_wr32(priv, 0x10002c, info->fb_delay);
+ nvkm_wr32(device, 0x10002c, info->fb_delay);
- prog_pll(priv, 0x00, 0x004200, idx);
+ prog_pll(clk, 0x00, 0x004200, dom);
if (fb_delay > info->fb_delay)
- nv_wr32(priv, 0x10002c, info->fb_delay);
+ nvkm_wr32(device, 0x10002c, info->fb_delay);
}
static int
-gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gt215_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct gt215_clk_priv *priv = (void *)clk;
- struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
+ struct gt215_clk *clk = gt215_clk(base);
+ struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
int ret;
- if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
- (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
- (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
- (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
- (ret = calc_host(priv, cstate)))
+ if ((ret = calc_clk(clk, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
+ (ret = calc_clk(clk, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
+ (ret = calc_clk(clk, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
+ (ret = calc_clk(clk, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
+ (ret = calc_host(clk, cstate)))
return ret;
/* XXX: Should be reading the highest bit in the VBIOS clock to decide
* whether to use a PLL or not... but using a PLL defeats the purpose */
if (core->pll) {
- ret = gt215_clk_info(clk, 0x10,
+ ret = gt215_clk_info(&clk->base, 0x10,
cstate->domain[nv_clk_src_core_intm],
- &priv->eng[nv_clk_src_core_intm]);
+ &clk->eng[nv_clk_src_core_intm]);
if (ret < 0)
return ret;
}
@@ -454,81 +480,67 @@ gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
}
static int
-gt215_clk_prog(struct nvkm_clk *clk)
+gt215_clk_prog(struct nvkm_clk *base)
{
- struct gt215_clk_priv *priv = (void *)clk;
- struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
+ struct gt215_clk *clk = gt215_clk(base);
+ struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
int ret = 0;
unsigned long flags;
unsigned long *f = &flags;
- ret = gt215_clk_pre(clk, f);
+ ret = gt215_clk_pre(&clk->base, f);
if (ret)
goto out;
if (core->pll)
- prog_core(priv, nv_clk_src_core_intm);
+ prog_core(clk, nv_clk_src_core_intm);
- prog_core(priv, nv_clk_src_core);
- prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader);
- prog_clk(priv, 0x20, nv_clk_src_disp);
- prog_clk(priv, 0x21, nv_clk_src_vdec);
- prog_host(priv);
+ prog_core(clk, nv_clk_src_core);
+ prog_pll(clk, 0x01, 0x004220, nv_clk_src_shader);
+ prog_clk(clk, 0x20, nv_clk_src_disp);
+ prog_clk(clk, 0x21, nv_clk_src_vdec);
+ prog_host(clk);
out:
if (ret == -EBUSY)
f = NULL;
- gt215_clk_post(clk, f);
+ gt215_clk_post(&clk->base, f);
return ret;
}
static void
-gt215_clk_tidy(struct nvkm_clk *clk)
+gt215_clk_tidy(struct nvkm_clk *base)
{
}
-static struct nvkm_domain
-gt215_domain[] = {
- { nv_clk_src_crystal , 0xff },
- { nv_clk_src_core , 0x00, 0, "core", 1000 },
- { nv_clk_src_shader , 0x01, 0, "shader", 1000 },
- { nv_clk_src_mem , 0x02, 0, "memory", 1000 },
- { nv_clk_src_vdec , 0x03 },
- { nv_clk_src_disp , 0x04 },
- { nv_clk_src_host , 0x05 },
- { nv_clk_src_core_intm, 0x06 },
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+gt215_clk = {
+ .read = gt215_clk_read,
+ .calc = gt215_clk_calc,
+ .prog = gt215_clk_prog,
+ .tidy = gt215_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal , 0xff },
+ { nv_clk_src_core , 0x00, 0, "core", 1000 },
+ { nv_clk_src_shader , 0x01, 0, "shader", 1000 },
+ { nv_clk_src_mem , 0x02, 0, "memory", 1000 },
+ { nv_clk_src_vdec , 0x03 },
+ { nv_clk_src_disp , 0x04 },
+ { nv_clk_src_host , 0x05 },
+ { nv_clk_src_core_intm, 0x06 },
+ { nv_clk_src_max }
+ }
};
-static int
-gt215_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct gt215_clk_priv *priv;
- int ret;
+ struct gt215_clk *clk;
- ret = nvkm_clk_create(parent, engine, oclass, gt215_domain,
- NULL, 0, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ *pclk = &clk->base;
- priv->base.read = gt215_clk_read;
- priv->base.calc = gt215_clk_calc;
- priv->base.prog = gt215_clk_prog;
- priv->base.tidy = gt215_clk_tidy;
- return 0;
+ return nvkm_clk_ctor(&gt215_clk, device, index, true, &clk->base);
}
-
-struct nvkm_oclass
-gt215_clk_oclass = {
- .handle = NV_SUBDEV(CLK, 0xa3),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gt215_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
index b447d9cd4d37..8865b59fe575 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
@@ -1,6 +1,6 @@
#ifndef __NVKM_CLK_NVA3_H__
#define __NVKM_CLK_NVA3_H__
-#include <subdev/clk.h>
+#include "priv.h"
struct gt215_clk_info {
u32 clk;
@@ -13,6 +13,6 @@ struct gt215_clk_info {
};
int gt215_pll_info(struct nvkm_clk *, int, u32, u32, struct gt215_clk_info *);
-int gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags);
-void gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags);
+int gt215_clk_pre(struct nvkm_clk *, unsigned long *flags);
+void gt215_clk_post(struct nvkm_clk *, unsigned long *flags);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index c54417b146c7..1c21b8b53b78 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -21,15 +21,15 @@
*
* Authors: Ben Skeggs
*/
+#define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
#include "gt215.h"
#include "pll.h"
-#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
-struct mcp77_clk_priv {
+struct mcp77_clk {
struct nvkm_clk base;
enum nv_clk_src csrc, ssrc, vsrc;
u32 cctrl, sctrl;
@@ -39,27 +39,29 @@ struct mcp77_clk_priv {
};
static u32
-read_div(struct nvkm_clk *clk)
+read_div(struct mcp77_clk *clk)
{
- return nv_rd32(clk, 0x004600);
+ struct nvkm_device *device = clk->base.subdev.device;
+ return nvkm_rd32(device, 0x004600);
}
static u32
-read_pll(struct nvkm_clk *clk, u32 base)
+read_pll(struct mcp77_clk *clk, u32 base)
{
- u32 ctrl = nv_rd32(clk, base + 0);
- u32 coef = nv_rd32(clk, base + 4);
- u32 ref = clk->read(clk, nv_clk_src_href);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ctrl = nvkm_rd32(device, base + 0);
+ u32 coef = nvkm_rd32(device, base + 4);
+ u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href);
u32 post_div = 0;
u32 clock = 0;
int N1, M1;
switch (base){
case 0x4020:
- post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
+ post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
break;
case 0x4028:
- post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
+ post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
break;
default:
break;
@@ -76,59 +78,61 @@ read_pll(struct nvkm_clk *clk, u32 base)
}
static int
-mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct mcp77_clk_priv *priv = (void *)clk;
- u32 mast = nv_rd32(clk, 0x00c054);
+ struct mcp77_clk *clk = mcp77_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mast = nvkm_rd32(device, 0x00c054);
u32 P = 0;
switch (src) {
case nv_clk_src_crystal:
- return nv_device(priv)->crystal;
+ return device->crystal;
case nv_clk_src_href:
return 100000; /* PCIE reference clock */
case nv_clk_src_hclkm4:
- return clk->read(clk, nv_clk_src_href) * 4;
+ return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4;
case nv_clk_src_hclkm2d3:
- return clk->read(clk, nv_clk_src_href) * 2 / 3;
+ return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3;
case nv_clk_src_host:
switch (mast & 0x000c0000) {
- case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
+ case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
case 0x00040000: break;
- case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
- case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
+ case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
+ case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk);
}
break;
case nv_clk_src_core:
- P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
+ P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
switch (mast & 0x00000003) {
- case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
+ case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000001: return 0;
- case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
+ case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P;
case 0x00000003: return read_pll(clk, 0x004028) >> P;
}
break;
case nv_clk_src_cclk:
if ((mast & 0x03000000) != 0x03000000)
- return clk->read(clk, nv_clk_src_core);
+ return nvkm_clk_read(&clk->base, nv_clk_src_core);
if ((mast & 0x00000200) == 0x00000000)
- return clk->read(clk, nv_clk_src_core);
+ return nvkm_clk_read(&clk->base, nv_clk_src_core);
switch (mast & 0x00000c00) {
- case 0x00000000: return clk->read(clk, nv_clk_src_href);
- case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
- case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
+ case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
+ case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
+ case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
default: return 0;
}
case nv_clk_src_shader:
- P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
+ P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
switch (mast & 0x00000030) {
case 0x00000000:
if (mast & 0x00000040)
- return clk->read(clk, nv_clk_src_href) >> P;
- return clk->read(clk, nv_clk_src_crystal) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000010: break;
case 0x00000020: return read_pll(clk, 0x004028) >> P;
case 0x00000030: return read_pll(clk, 0x004020) >> P;
@@ -142,7 +146,7 @@ mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
switch (mast & 0x00400000) {
case 0x00400000:
- return clk->read(clk, nv_clk_src_core) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
break;
default:
return 500000 >> P;
@@ -153,29 +157,28 @@ mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
break;
}
- nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+ nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
return 0;
}
static u32
-calc_pll(struct mcp77_clk_priv *priv, u32 reg,
+calc_pll(struct mcp77_clk *clk, u32 reg,
u32 clock, int *N, int *M, int *P)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvbios_pll pll;
- struct nvkm_clk *clk = &priv->base;
int ret;
- ret = nvbios_pll_parse(bios, reg, &pll);
+ ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
if (ret)
return 0;
pll.vco2.max_freq = 0;
- pll.refclk = clk->read(clk, nv_clk_src_href);
+ pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href);
if (!pll.refclk)
return 0;
- return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
+ return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
}
static inline u32
@@ -197,26 +200,27 @@ calc_P(u32 src, u32 target, int *div)
}
static int
-mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct mcp77_clk_priv *priv = (void *)clk;
+ struct mcp77_clk *clk = mcp77_clk(base);
const int shader = cstate->domain[nv_clk_src_shader];
const int core = cstate->domain[nv_clk_src_core];
const int vdec = cstate->domain[nv_clk_src_vdec];
+ struct nvkm_subdev *subdev = &clk->base.subdev;
u32 out = 0, clock = 0;
int N, M, P1, P2 = 0;
int divs = 0;
/* cclk: find suitable source, disable PLL if we can */
- if (core < clk->read(clk, nv_clk_src_hclkm4))
- out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
+ if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4))
+ out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs);
/* Calculate clock * 2, so shader clock can use it too */
- clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
+ clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
if (abs(core - out) <= abs(core - (clock >> 1))) {
- priv->csrc = nv_clk_src_hclkm4;
- priv->cctrl = divs << 16;
+ clk->csrc = nv_clk_src_hclkm4;
+ clk->cctrl = divs << 16;
} else {
/* NVCTRL is actually used _after_ NVPOST, and after what we
* call NVPLL. To make matters worse, NVPOST is an integer
@@ -226,31 +230,31 @@ mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
P1 = 2;
}
- priv->csrc = nv_clk_src_core;
- priv->ccoef = (N << 8) | M;
+ clk->csrc = nv_clk_src_core;
+ clk->ccoef = (N << 8) | M;
- priv->cctrl = (P2 + 1) << 16;
- priv->cpost = (1 << P1) << 16;
+ clk->cctrl = (P2 + 1) << 16;
+ clk->cpost = (1 << P1) << 16;
}
/* sclk: nvpll + divisor, href or spll */
out = 0;
- if (shader == clk->read(clk, nv_clk_src_href)) {
- priv->ssrc = nv_clk_src_href;
+ if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) {
+ clk->ssrc = nv_clk_src_href;
} else {
- clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
- if (priv->csrc == nv_clk_src_core)
+ clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
+ if (clk->csrc == nv_clk_src_core)
out = calc_P((core << 1), shader, &divs);
if (abs(shader - out) <=
abs(shader - clock) &&
(divs + P2) <= 7) {
- priv->ssrc = nv_clk_src_core;
- priv->sctrl = (divs + P2) << 16;
+ clk->ssrc = nv_clk_src_core;
+ clk->sctrl = (divs + P2) << 16;
} else {
- priv->ssrc = nv_clk_src_shader;
- priv->scoef = (N << 8) | M;
- priv->sctrl = P1 << 16;
+ clk->ssrc = nv_clk_src_shader;
+ clk->scoef = (N << 8) | M;
+ clk->sctrl = P1 << 16;
}
}
@@ -258,172 +262,162 @@ mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
out = calc_P(core, vdec, &divs);
clock = calc_P(500000, vdec, &P1);
if(abs(vdec - out) <= abs(vdec - clock)) {
- priv->vsrc = nv_clk_src_cclk;
- priv->vdiv = divs << 16;
+ clk->vsrc = nv_clk_src_cclk;
+ clk->vdiv = divs << 16;
} else {
- priv->vsrc = nv_clk_src_vdec;
- priv->vdiv = P1 << 16;
+ clk->vsrc = nv_clk_src_vdec;
+ clk->vdiv = P1 << 16;
}
/* Print strategy! */
- nv_debug(priv, "nvpll: %08x %08x %08x\n",
- priv->ccoef, priv->cpost, priv->cctrl);
- nv_debug(priv, " spll: %08x %08x %08x\n",
- priv->scoef, priv->spost, priv->sctrl);
- nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
- if (priv->csrc == nv_clk_src_hclkm4)
- nv_debug(priv, "core: hrefm4\n");
+ nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
+ clk->ccoef, clk->cpost, clk->cctrl);
+ nvkm_debug(subdev, " spll: %08x %08x %08x\n",
+ clk->scoef, clk->spost, clk->sctrl);
+ nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
+ if (clk->csrc == nv_clk_src_hclkm4)
+ nvkm_debug(subdev, "core: hrefm4\n");
else
- nv_debug(priv, "core: nvpll\n");
+ nvkm_debug(subdev, "core: nvpll\n");
- if (priv->ssrc == nv_clk_src_hclkm4)
- nv_debug(priv, "shader: hrefm4\n");
- else if (priv->ssrc == nv_clk_src_core)
- nv_debug(priv, "shader: nvpll\n");
+ if (clk->ssrc == nv_clk_src_hclkm4)
+ nvkm_debug(subdev, "shader: hrefm4\n");
+ else if (clk->ssrc == nv_clk_src_core)
+ nvkm_debug(subdev, "shader: nvpll\n");
else
- nv_debug(priv, "shader: spll\n");
+ nvkm_debug(subdev, "shader: spll\n");
- if (priv->vsrc == nv_clk_src_hclkm4)
- nv_debug(priv, "vdec: 500MHz\n");
+ if (clk->vsrc == nv_clk_src_hclkm4)
+ nvkm_debug(subdev, "vdec: 500MHz\n");
else
- nv_debug(priv, "vdec: core\n");
+ nvkm_debug(subdev, "vdec: core\n");
return 0;
}
static int
-mcp77_clk_prog(struct nvkm_clk *clk)
+mcp77_clk_prog(struct nvkm_clk *base)
{
- struct mcp77_clk_priv *priv = (void *)clk;
+ struct mcp77_clk *clk = mcp77_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
u32 pllmask = 0, mast;
unsigned long flags;
unsigned long *f = &flags;
int ret = 0;
- ret = gt215_clk_pre(clk, f);
+ ret = gt215_clk_pre(&clk->base, f);
if (ret)
goto out;
/* First switch to safe clocks: href */
- mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
+ mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
mast &= ~0x00400e73;
mast |= 0x03000000;
- switch (priv->csrc) {
+ switch (clk->csrc) {
case nv_clk_src_hclkm4:
- nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
+ nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
mast |= 0x00000002;
break;
case nv_clk_src_core:
- nv_wr32(clk, 0x402c, priv->ccoef);
- nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
- nv_wr32(clk, 0x4040, priv->cpost);
+ nvkm_wr32(device, 0x402c, clk->ccoef);
+ nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
+ nvkm_wr32(device, 0x4040, clk->cpost);
pllmask |= (0x3 << 8);
mast |= 0x00000003;
break;
default:
- nv_warn(priv,"Reclocking failed: unknown core clock\n");
+ nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
goto resume;
}
- switch (priv->ssrc) {
+ switch (clk->ssrc) {
case nv_clk_src_href:
- nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
+ nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
/* mast |= 0x00000000; */
break;
case nv_clk_src_core:
- nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
+ nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
mast |= 0x00000020;
break;
case nv_clk_src_shader:
- nv_wr32(clk, 0x4024, priv->scoef);
- nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
- nv_wr32(clk, 0x4070, priv->spost);
+ nvkm_wr32(device, 0x4024, clk->scoef);
+ nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
+ nvkm_wr32(device, 0x4070, clk->spost);
pllmask |= (0x3 << 12);
mast |= 0x00000030;
break;
default:
- nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
+ nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
goto resume;
}
- if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
- nv_warn(priv,"Reclocking failed: unstable PLLs\n");
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
+ if (tmp == pllmask)
+ break;
+ ) < 0)
goto resume;
- }
- switch (priv->vsrc) {
+ switch (clk->vsrc) {
case nv_clk_src_cclk:
mast |= 0x00400000;
default:
- nv_wr32(clk, 0x4600, priv->vdiv);
+ nvkm_wr32(device, 0x4600, clk->vdiv);
}
- nv_wr32(clk, 0xc054, mast);
+ nvkm_wr32(device, 0xc054, mast);
resume:
/* Disable some PLLs and dividers when unused */
- if (priv->csrc != nv_clk_src_core) {
- nv_wr32(clk, 0x4040, 0x00000000);
- nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
+ if (clk->csrc != nv_clk_src_core) {
+ nvkm_wr32(device, 0x4040, 0x00000000);
+ nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
}
- if (priv->ssrc != nv_clk_src_shader) {
- nv_wr32(clk, 0x4070, 0x00000000);
- nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
+ if (clk->ssrc != nv_clk_src_shader) {
+ nvkm_wr32(device, 0x4070, 0x00000000);
+ nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
}
out:
if (ret == -EBUSY)
f = NULL;
- gt215_clk_post(clk, f);
+ gt215_clk_post(&clk->base, f);
return ret;
}
static void
-mcp77_clk_tidy(struct nvkm_clk *clk)
+mcp77_clk_tidy(struct nvkm_clk *base)
{
}
-static struct nvkm_domain
-mcp77_domains[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_href , 0xff },
- { nv_clk_src_core , 0xff, 0, "core", 1000 },
- { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
- { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+mcp77_clk = {
+ .read = mcp77_clk_read,
+ .calc = mcp77_clk_calc,
+ .prog = mcp77_clk_prog,
+ .tidy = mcp77_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
+ { nv_clk_src_max }
+ }
};
-static int
-mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct mcp77_clk_priv *priv;
- int ret;
+ struct mcp77_clk *clk;
- ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
- NULL, 0, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ *pclk = &clk->base;
- priv->base.read = mcp77_clk_read;
- priv->base.calc = mcp77_clk_calc;
- priv->base.prog = mcp77_clk_prog;
- priv->base.tidy = mcp77_clk_tidy;
- return 0;
+ return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base);
}
-
-struct nvkm_oclass *
-mcp77_clk_oclass = &(struct nvkm_oclass) {
- .handle = NV_SUBDEV(CLK, 0xaa),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = mcp77_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
index 63dbbb575228..b280f85e8827 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
@@ -21,23 +21,19 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/clk.h>
+#include "priv.h"
#include "pll.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/devinit/nv04.h>
-struct nv04_clk_priv {
- struct nvkm_clk base;
-};
-
int
nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
int clk, struct nvkm_pll_vals *pv)
{
int N1, M1, N2, M2, P;
- int ret = nv04_pll_calc(nv_subdev(clock), info, clk, &N1, &M1, &N2, &M2, &P);
+ int ret = nv04_pll_calc(&clock->subdev, info, clk, &N1, &M1, &N2, &M2, &P);
if (ret) {
pv->refclk = info->refclk;
pv->N1 = N1;
@@ -52,8 +48,9 @@ nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
int
nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv)
{
- struct nvkm_devinit *devinit = nvkm_devinit(clk);
- int cv = nvkm_bios(clk)->version.chip;
+ struct nvkm_device *device = clk->subdev.device;
+ struct nvkm_devinit *devinit = device->devinit;
+ int cv = device->bios->version.chip;
if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
cv >= 0x40) {
@@ -67,37 +64,20 @@ nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv)
return 0;
}
-static struct nvkm_domain
-nv04_domain[] = {
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+nv04_clk = {
+ .domains = {
+ { nv_clk_src_max }
+ }
};
-static int
-nv04_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv04_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct nv04_clk_priv *priv;
- int ret;
-
- ret = nvkm_clk_create(parent, engine, oclass, nv04_domain,
- NULL, 0, false, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.pll_calc = nv04_clk_pll_calc;
- priv->base.pll_prog = nv04_clk_pll_prog;
- return 0;
+ int ret = nvkm_clk_new_(&nv04_clk, device, index, false, pclk);
+ if (ret == 0) {
+ (*pclk)->pll_calc = nv04_clk_pll_calc;
+ (*pclk)->pll_prog = nv04_clk_pll_prog;
+ }
+ return ret;
}
-
-struct nvkm_oclass
-nv04_clk_oclass = {
- .handle = NV_SUBDEV(CLK, 0x04),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
index ed838130c89d..2ab9b9b84018 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
@@ -21,14 +21,14 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/clk.h>
+#define nv40_clk(p) container_of((p), struct nv40_clk, base)
+#include "priv.h"
#include "pll.h"
-#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
-struct nv40_clk_priv {
+struct nv40_clk {
struct nvkm_clk base;
u32 ctrl;
u32 npll_ctrl;
@@ -36,64 +36,56 @@ struct nv40_clk_priv {
u32 spll;
};
-static struct nvkm_domain
-nv40_domain[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_href , 0xff },
- { nv_clk_src_core , 0xff, 0, "core", 1000 },
- { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
- { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
- { nv_clk_src_max }
-};
-
static u32
-read_pll_1(struct nv40_clk_priv *priv, u32 reg)
+read_pll_1(struct nv40_clk *clk, u32 reg)
{
- u32 ctrl = nv_rd32(priv, reg + 0x00);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ctrl = nvkm_rd32(device, reg + 0x00);
int P = (ctrl & 0x00070000) >> 16;
int N = (ctrl & 0x0000ff00) >> 8;
int M = (ctrl & 0x000000ff) >> 0;
- u32 ref = 27000, clk = 0;
+ u32 ref = 27000, khz = 0;
if (ctrl & 0x80000000)
- clk = ref * N / M;
+ khz = ref * N / M;
- return clk >> P;
+ return khz >> P;
}
static u32
-read_pll_2(struct nv40_clk_priv *priv, u32 reg)
+read_pll_2(struct nv40_clk *clk, u32 reg)
{
- u32 ctrl = nv_rd32(priv, reg + 0x00);
- u32 coef = nv_rd32(priv, reg + 0x04);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ctrl = nvkm_rd32(device, reg + 0x00);
+ u32 coef = nvkm_rd32(device, reg + 0x04);
int N2 = (coef & 0xff000000) >> 24;
int M2 = (coef & 0x00ff0000) >> 16;
int N1 = (coef & 0x0000ff00) >> 8;
int M1 = (coef & 0x000000ff) >> 0;
int P = (ctrl & 0x00070000) >> 16;
- u32 ref = 27000, clk = 0;
+ u32 ref = 27000, khz = 0;
if ((ctrl & 0x80000000) && M1) {
- clk = ref * N1 / M1;
+ khz = ref * N1 / M1;
if ((ctrl & 0x40000100) == 0x40000000) {
if (M2)
- clk = clk * N2 / M2;
+ khz = khz * N2 / M2;
else
- clk = 0;
+ khz = 0;
}
}
- return clk >> P;
+ return khz >> P;
}
static u32
-read_clk(struct nv40_clk_priv *priv, u32 src)
+read_clk(struct nv40_clk *clk, u32 src)
{
switch (src) {
case 3:
- return read_pll_2(priv, 0x004000);
+ return read_pll_2(clk, 0x004000);
case 2:
- return read_pll_1(priv, 0x004008);
+ return read_pll_1(clk, 0x004008);
default:
break;
}
@@ -102,46 +94,48 @@ read_clk(struct nv40_clk_priv *priv, u32 src)
}
static int
-nv40_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+nv40_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct nv40_clk_priv *priv = (void *)clk;
- u32 mast = nv_rd32(priv, 0x00c040);
+ struct nv40_clk *clk = nv40_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mast = nvkm_rd32(device, 0x00c040);
switch (src) {
case nv_clk_src_crystal:
- return nv_device(priv)->crystal;
+ return device->crystal;
case nv_clk_src_href:
return 100000; /*XXX: PCIE/AGP differ*/
case nv_clk_src_core:
- return read_clk(priv, (mast & 0x00000003) >> 0);
+ return read_clk(clk, (mast & 0x00000003) >> 0);
case nv_clk_src_shader:
- return read_clk(priv, (mast & 0x00000030) >> 4);
+ return read_clk(clk, (mast & 0x00000030) >> 4);
case nv_clk_src_mem:
- return read_pll_2(priv, 0x4020);
+ return read_pll_2(clk, 0x4020);
default:
break;
}
- nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+ nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
return -EINVAL;
}
static int
-nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
+nv40_clk_calc_pll(struct nv40_clk *clk, u32 reg, u32 khz,
int *N1, int *M1, int *N2, int *M2, int *log2P)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvbios_pll pll;
int ret;
- ret = nvbios_pll_parse(bios, reg, &pll);
+ ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
if (ret)
return ret;
- if (clk < pll.vco1.max_freq)
+ if (khz < pll.vco1.max_freq)
pll.vco2.max_freq = 0;
- ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P);
+ ret = nv04_pll_calc(subdev, &pll, khz, N1, M1, N2, M2, log2P);
if (ret == 0)
return -ERANGE;
@@ -149,93 +143,90 @@ nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
}
static int
-nv40_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+nv40_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct nv40_clk_priv *priv = (void *)clk;
+ struct nv40_clk *clk = nv40_clk(base);
int gclk = cstate->domain[nv_clk_src_core];
int sclk = cstate->domain[nv_clk_src_shader];
int N1, M1, N2, M2, log2P;
int ret;
/* core/geometric clock */
- ret = nv40_clk_calc_pll(priv, 0x004000, gclk,
+ ret = nv40_clk_calc_pll(clk, 0x004000, gclk,
&N1, &M1, &N2, &M2, &log2P);
if (ret < 0)
return ret;
if (N2 == M2) {
- priv->npll_ctrl = 0x80000100 | (log2P << 16);
- priv->npll_coef = (N1 << 8) | M1;
+ clk->npll_ctrl = 0x80000100 | (log2P << 16);
+ clk->npll_coef = (N1 << 8) | M1;
} else {
- priv->npll_ctrl = 0xc0000000 | (log2P << 16);
- priv->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+ clk->npll_ctrl = 0xc0000000 | (log2P << 16);
+ clk->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
}
/* use the second pll for shader/rop clock, if it differs from core */
if (sclk && sclk != gclk) {
- ret = nv40_clk_calc_pll(priv, 0x004008, sclk,
+ ret = nv40_clk_calc_pll(clk, 0x004008, sclk,
&N1, &M1, NULL, NULL, &log2P);
if (ret < 0)
return ret;
- priv->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
- priv->ctrl = 0x00000223;
+ clk->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
+ clk->ctrl = 0x00000223;
} else {
- priv->spll = 0x00000000;
- priv->ctrl = 0x00000333;
+ clk->spll = 0x00000000;
+ clk->ctrl = 0x00000333;
}
return 0;
}
static int
-nv40_clk_prog(struct nvkm_clk *clk)
+nv40_clk_prog(struct nvkm_clk *base)
{
- struct nv40_clk_priv *priv = (void *)clk;
- nv_mask(priv, 0x00c040, 0x00000333, 0x00000000);
- nv_wr32(priv, 0x004004, priv->npll_coef);
- nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl);
- nv_mask(priv, 0x004008, 0xc007ffff, priv->spll);
+ struct nv40_clk *clk = nv40_clk(base);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, 0x00c040, 0x00000333, 0x00000000);
+ nvkm_wr32(device, 0x004004, clk->npll_coef);
+ nvkm_mask(device, 0x004000, 0xc0070100, clk->npll_ctrl);
+ nvkm_mask(device, 0x004008, 0xc007ffff, clk->spll);
mdelay(5);
- nv_mask(priv, 0x00c040, 0x00000333, priv->ctrl);
+ nvkm_mask(device, 0x00c040, 0x00000333, clk->ctrl);
return 0;
}
static void
-nv40_clk_tidy(struct nvkm_clk *clk)
+nv40_clk_tidy(struct nvkm_clk *obj)
{
}
-static int
-nv40_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static const struct nvkm_clk_func
+nv40_clk = {
+ .read = nv40_clk_read,
+ .calc = nv40_clk_calc,
+ .prog = nv40_clk_prog,
+ .tidy = nv40_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
+ { nv_clk_src_max }
+ }
+};
+
+int
+nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct nv40_clk_priv *priv;
- int ret;
+ struct nv40_clk *clk;
- ret = nvkm_clk_create(parent, engine, oclass, nv40_domain,
- NULL, 0, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ clk->base.pll_calc = nv04_clk_pll_calc;
+ clk->base.pll_prog = nv04_clk_pll_prog;
+ *pclk = &clk->base;
- priv->base.pll_calc = nv04_clk_pll_calc;
- priv->base.pll_prog = nv04_clk_pll_prog;
- priv->base.read = nv40_clk_read;
- priv->base.calc = nv40_clk_calc;
- priv->base.prog = nv40_clk_prog;
- priv->base.tidy = nv40_clk_tidy;
- return 0;
+ return nvkm_clk_ctor(&nv40_clk, device, index, true, &clk->base);
}
-
-struct nvkm_oclass
-nv40_clk_oclass = {
- .handle = NV_SUBDEV(CLK, 0x40),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index 9b4ffd6347ce..5841f297973c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -25,38 +25,39 @@
#include "pll.h"
#include "seq.h"
-#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
static u32
-read_div(struct nv50_clk_priv *priv)
+read_div(struct nv50_clk *clk)
{
- switch (nv_device(priv)->chipset) {
+ struct nvkm_device *device = clk->base.subdev.device;
+ switch (device->chipset) {
case 0x50: /* it exists, but only has bit 31, not the dividers.. */
case 0x84:
case 0x86:
case 0x98:
case 0xa0:
- return nv_rd32(priv, 0x004700);
+ return nvkm_rd32(device, 0x004700);
case 0x92:
case 0x94:
case 0x96:
- return nv_rd32(priv, 0x004800);
+ return nvkm_rd32(device, 0x004800);
default:
return 0x00000000;
}
}
static u32
-read_pll_src(struct nv50_clk_priv *priv, u32 base)
+read_pll_src(struct nv50_clk *clk, u32 base)
{
- struct nvkm_clk *clk = &priv->base;
- u32 coef, ref = clk->read(clk, nv_clk_src_crystal);
- u32 rsel = nv_rd32(priv, 0x00e18c);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 coef, ref = nvkm_clk_read(&clk->base, nv_clk_src_crystal);
+ u32 rsel = nvkm_rd32(device, 0x00e18c);
int P, N, M, id;
- switch (nv_device(priv)->chipset) {
+ switch (device->chipset) {
case 0x50:
case 0xa0:
switch (base) {
@@ -65,11 +66,11 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
case 0x4008: id = !!(rsel & 0x00000008); break;
case 0x4030: id = 0; break;
default:
- nv_error(priv, "ref: bad pll 0x%06x\n", base);
+ nvkm_error(subdev, "ref: bad pll %06x\n", base);
return 0;
}
- coef = nv_rd32(priv, 0x00e81c + (id * 0x0c));
+ coef = nvkm_rd32(device, 0x00e81c + (id * 0x0c));
ref *= (coef & 0x01000000) ? 2 : 4;
P = (coef & 0x00070000) >> 16;
N = ((coef & 0x0000ff00) >> 8) + 1;
@@ -78,7 +79,7 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
case 0x84:
case 0x86:
case 0x92:
- coef = nv_rd32(priv, 0x00e81c);
+ coef = nvkm_rd32(device, 0x00e81c);
P = (coef & 0x00070000) >> 16;
N = (coef & 0x0000ff00) >> 8;
M = (coef & 0x000000ff) >> 0;
@@ -86,26 +87,26 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
case 0x94:
case 0x96:
case 0x98:
- rsel = nv_rd32(priv, 0x00c050);
+ rsel = nvkm_rd32(device, 0x00c050);
switch (base) {
case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
case 0x4030: rsel = 3; break;
default:
- nv_error(priv, "ref: bad pll 0x%06x\n", base);
+ nvkm_error(subdev, "ref: bad pll %06x\n", base);
return 0;
}
switch (rsel) {
case 0: id = 1; break;
- case 1: return clk->read(clk, nv_clk_src_crystal);
- case 2: return clk->read(clk, nv_clk_src_href);
+ case 1: return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
+ case 2: return nvkm_clk_read(&clk->base, nv_clk_src_href);
case 3: id = 0; break;
}
- coef = nv_rd32(priv, 0x00e81c + (id * 0x28));
- P = (nv_rd32(priv, 0x00e824 + (id * 0x28)) >> 16) & 7;
+ coef = nvkm_rd32(device, 0x00e81c + (id * 0x28));
+ P = (nvkm_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
P += (coef & 0x00070000) >> 16;
N = (coef & 0x0000ff00) >> 8;
M = (coef & 0x000000ff) >> 0;
@@ -121,10 +122,11 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
}
static u32
-read_pll_ref(struct nv50_clk_priv *priv, u32 base)
+read_pll_ref(struct nv50_clk *clk, u32 base)
{
- struct nvkm_clk *clk = &priv->base;
- u32 src, mast = nv_rd32(priv, 0x00c040);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 src, mast = nvkm_rd32(device, 0x00c040);
switch (base) {
case 0x004028:
@@ -140,33 +142,33 @@ read_pll_ref(struct nv50_clk_priv *priv, u32 base)
src = !!(mast & 0x02000000);
break;
case 0x00e810:
- return clk->read(clk, nv_clk_src_crystal);
+ return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
default:
- nv_error(priv, "bad pll 0x%06x\n", base);
+ nvkm_error(subdev, "bad pll %06x\n", base);
return 0;
}
if (src)
- return clk->read(clk, nv_clk_src_href);
+ return nvkm_clk_read(&clk->base, nv_clk_src_href);
- return read_pll_src(priv, base);
+ return read_pll_src(clk, base);
}
static u32
-read_pll(struct nv50_clk_priv *priv, u32 base)
+read_pll(struct nv50_clk *clk, u32 base)
{
- struct nvkm_clk *clk = &priv->base;
- u32 mast = nv_rd32(priv, 0x00c040);
- u32 ctrl = nv_rd32(priv, base + 0);
- u32 coef = nv_rd32(priv, base + 4);
- u32 ref = read_pll_ref(priv, base);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 mast = nvkm_rd32(device, 0x00c040);
+ u32 ctrl = nvkm_rd32(device, base + 0);
+ u32 coef = nvkm_rd32(device, base + 4);
+ u32 ref = read_pll_ref(clk, base);
u32 freq = 0;
int N1, N2, M1, M2;
if (base == 0x004028 && (mast & 0x00100000)) {
/* wtf, appears to only disable post-divider on gt200 */
- if (nv_device(priv)->chipset != 0xa0)
- return clk->read(clk, nv_clk_src_dom6);
+ if (device->chipset != 0xa0)
+ return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
}
N2 = (coef & 0xff000000) >> 24;
@@ -186,71 +188,73 @@ read_pll(struct nv50_clk_priv *priv, u32 base)
return freq;
}
-static int
-nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+int
+nv50_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct nv50_clk_priv *priv = (void *)clk;
- u32 mast = nv_rd32(priv, 0x00c040);
+ struct nv50_clk *clk = nv50_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mast = nvkm_rd32(device, 0x00c040);
u32 P = 0;
switch (src) {
case nv_clk_src_crystal:
- return nv_device(priv)->crystal;
+ return device->crystal;
case nv_clk_src_href:
return 100000; /* PCIE reference clock */
case nv_clk_src_hclk:
- return div_u64((u64)clk->read(clk, nv_clk_src_href) * 27778, 10000);
+ return div_u64((u64)nvkm_clk_read(&clk->base, nv_clk_src_href) * 27778, 10000);
case nv_clk_src_hclkm3:
- return clk->read(clk, nv_clk_src_hclk) * 3;
+ return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
case nv_clk_src_hclkm3d2:
- return clk->read(clk, nv_clk_src_hclk) * 3 / 2;
+ return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3 / 2;
case nv_clk_src_host:
switch (mast & 0x30000000) {
- case 0x00000000: return clk->read(clk, nv_clk_src_href);
+ case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
case 0x10000000: break;
case 0x20000000: /* !0x50 */
- case 0x30000000: return clk->read(clk, nv_clk_src_hclk);
+ case 0x30000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
}
break;
case nv_clk_src_core:
if (!(mast & 0x00100000))
- P = (nv_rd32(priv, 0x004028) & 0x00070000) >> 16;
+ P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
switch (mast & 0x00000003) {
- case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
- case 0x00000001: return clk->read(clk, nv_clk_src_dom6);
- case 0x00000002: return read_pll(priv, 0x004020) >> P;
- case 0x00000003: return read_pll(priv, 0x004028) >> P;
+ case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
+ case 0x00000001: return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
+ case 0x00000002: return read_pll(clk, 0x004020) >> P;
+ case 0x00000003: return read_pll(clk, 0x004028) >> P;
}
break;
case nv_clk_src_shader:
- P = (nv_rd32(priv, 0x004020) & 0x00070000) >> 16;
+ P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
switch (mast & 0x00000030) {
case 0x00000000:
if (mast & 0x00000080)
- return clk->read(clk, nv_clk_src_host) >> P;
- return clk->read(clk, nv_clk_src_crystal) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_host) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000010: break;
- case 0x00000020: return read_pll(priv, 0x004028) >> P;
- case 0x00000030: return read_pll(priv, 0x004020) >> P;
+ case 0x00000020: return read_pll(clk, 0x004028) >> P;
+ case 0x00000030: return read_pll(clk, 0x004020) >> P;
}
break;
case nv_clk_src_mem:
- P = (nv_rd32(priv, 0x004008) & 0x00070000) >> 16;
- if (nv_rd32(priv, 0x004008) & 0x00000200) {
+ P = (nvkm_rd32(device, 0x004008) & 0x00070000) >> 16;
+ if (nvkm_rd32(device, 0x004008) & 0x00000200) {
switch (mast & 0x0000c000) {
case 0x00000000:
- return clk->read(clk, nv_clk_src_crystal) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00008000:
case 0x0000c000:
- return clk->read(clk, nv_clk_src_href) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
}
} else {
- return read_pll(priv, 0x004008) >> P;
+ return read_pll(clk, 0x004008) >> P;
}
break;
case nv_clk_src_vdec:
- P = (read_div(priv) & 0x00000700) >> 8;
- switch (nv_device(priv)->chipset) {
+ P = (read_div(clk) & 0x00000700) >> 8;
+ switch (device->chipset) {
case 0x84:
case 0x86:
case 0x92:
@@ -259,51 +263,51 @@ nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
case 0xa0:
switch (mast & 0x00000c00) {
case 0x00000000:
- if (nv_device(priv)->chipset == 0xa0) /* wtf?? */
- return clk->read(clk, nv_clk_src_core) >> P;
- return clk->read(clk, nv_clk_src_crystal) >> P;
+ if (device->chipset == 0xa0) /* wtf?? */
+ return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000400:
return 0;
case 0x00000800:
if (mast & 0x01000000)
- return read_pll(priv, 0x004028) >> P;
- return read_pll(priv, 0x004030) >> P;
+ return read_pll(clk, 0x004028) >> P;
+ return read_pll(clk, 0x004030) >> P;
case 0x00000c00:
- return clk->read(clk, nv_clk_src_core) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
}
break;
case 0x98:
switch (mast & 0x00000c00) {
case 0x00000000:
- return clk->read(clk, nv_clk_src_core) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
case 0x00000400:
return 0;
case 0x00000800:
- return clk->read(clk, nv_clk_src_hclkm3d2) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2) >> P;
case 0x00000c00:
- return clk->read(clk, nv_clk_src_mem) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_mem) >> P;
}
break;
}
break;
case nv_clk_src_dom6:
- switch (nv_device(priv)->chipset) {
+ switch (device->chipset) {
case 0x50:
case 0xa0:
- return read_pll(priv, 0x00e810) >> 2;
+ return read_pll(clk, 0x00e810) >> 2;
case 0x84:
case 0x86:
case 0x92:
case 0x94:
case 0x96:
case 0x98:
- P = (read_div(priv) & 0x00000007) >> 0;
+ P = (read_div(clk) & 0x00000007) >> 0;
switch (mast & 0x0c000000) {
- case 0x00000000: return clk->read(clk, nv_clk_src_href);
+ case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
case 0x04000000: break;
- case 0x08000000: return clk->read(clk, nv_clk_src_hclk);
+ case 0x08000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
case 0x0c000000:
- return clk->read(clk, nv_clk_src_hclkm3) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3) >> P;
}
break;
default:
@@ -313,27 +317,27 @@ nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
break;
}
- nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+ nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
return -EINVAL;
}
static u32
-calc_pll(struct nv50_clk_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
+calc_pll(struct nv50_clk *clk, u32 reg, u32 idx, int *N, int *M, int *P)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvbios_pll pll;
int ret;
- ret = nvbios_pll_parse(bios, reg, &pll);
+ ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
if (ret)
return 0;
pll.vco2.max_freq = 0;
- pll.refclk = read_pll_ref(priv, reg);
+ pll.refclk = read_pll_ref(clk, reg);
if (!pll.refclk)
return 0;
- return nv04_pll_calc(nv_subdev(priv), &pll, clk, N, M, NULL, NULL, P);
+ return nv04_pll_calc(subdev, &pll, idx, N, M, NULL, NULL, P);
}
static inline u32
@@ -360,11 +364,13 @@ clk_same(u32 a, u32 b)
return ((a / 1000) == (b / 1000));
}
-static int
-nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+int
+nv50_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct nv50_clk_priv *priv = (void *)clk;
- struct nv50_clk_hwsq *hwsq = &priv->hwsq;
+ struct nv50_clk *clk = nv50_clk(base);
+ struct nv50_clk_hwsq *hwsq = &clk->hwsq;
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
const int shader = cstate->domain[nv_clk_src_shader];
const int core = cstate->domain[nv_clk_src_core];
const int vdec = cstate->domain[nv_clk_src_vdec];
@@ -375,7 +381,7 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
int freq, out;
/* prepare a hwsq script from which we'll perform the reclock */
- out = clk_init(hwsq, nv_subdev(clk));
+ out = clk_init(hwsq, subdev);
if (out)
return out;
@@ -393,15 +399,15 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
freq = calc_div(core, vdec, &P1);
/* see how close we can get using xpll/hclk as a source */
- if (nv_device(priv)->chipset != 0x98)
- out = read_pll(priv, 0x004030);
+ if (device->chipset != 0x98)
+ out = read_pll(clk, 0x004030);
else
- out = clk->read(clk, nv_clk_src_hclkm3d2);
+ out = nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2);
out = calc_div(out, vdec, &P2);
/* select whichever gets us closest */
if (abs(vdec - freq) <= abs(vdec - out)) {
- if (nv_device(priv)->chipset != 0x98)
+ if (device->chipset != 0x98)
mastv |= 0x00000c00;
divsv |= P1 << 8;
} else {
@@ -417,14 +423,14 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
* of the host clock frequency
*/
if (dom6) {
- if (clk_same(dom6, clk->read(clk, nv_clk_src_href))) {
+ if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_href))) {
mastv |= 0x00000000;
} else
- if (clk_same(dom6, clk->read(clk, nv_clk_src_hclk))) {
+ if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_hclk))) {
mastv |= 0x08000000;
} else {
- freq = clk->read(clk, nv_clk_src_hclk) * 3;
- freq = calc_div(freq, dom6, &P1);
+ freq = nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
+ calc_div(freq, dom6, &P1);
mastv |= 0x0c000000;
divsv |= P1;
@@ -444,13 +450,13 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
/* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
* sclk to hclk) before reprogramming
*/
- if (nv_device(priv)->chipset < 0x92)
+ if (device->chipset < 0x92)
clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
else
clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
/* core: for the moment at least, always use nvpll */
- freq = calc_pll(priv, 0x4028, core, &N, &M, &P1);
+ freq = calc_pll(clk, 0x4028, core, &N, &M, &P1);
if (freq == 0)
return -ERANGE;
@@ -468,7 +474,7 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
clk_mask(hwsq, mast, 0x00100033, 0x00000023);
} else {
- freq = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
+ freq = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
if (freq == 0)
return -ERANGE;
@@ -485,77 +491,71 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
return 0;
}
-static int
-nv50_clk_prog(struct nvkm_clk *clk)
+int
+nv50_clk_prog(struct nvkm_clk *base)
{
- struct nv50_clk_priv *priv = (void *)clk;
- return clk_exec(&priv->hwsq, true);
+ struct nv50_clk *clk = nv50_clk(base);
+ return clk_exec(&clk->hwsq, true);
}
-static void
-nv50_clk_tidy(struct nvkm_clk *clk)
+void
+nv50_clk_tidy(struct nvkm_clk *base)
{
- struct nv50_clk_priv *priv = (void *)clk;
- clk_exec(&priv->hwsq, false);
+ struct nv50_clk *clk = nv50_clk(base);
+ clk_exec(&clk->hwsq, false);
}
int
-nv50_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
+ int index, bool allow_reclock, struct nvkm_clk **pclk)
{
- struct nv50_clk_oclass *pclass = (void *)oclass;
- struct nv50_clk_priv *priv;
+ struct nv50_clk *clk;
int ret;
- ret = nvkm_clk_create(parent, engine, oclass, pclass->domains,
- NULL, 0, false, &priv);
- *pobject = nv_object(priv);
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ ret = nvkm_clk_ctor(func, device, index, allow_reclock, &clk->base);
+ *pclk = &clk->base;
if (ret)
return ret;
- priv->hwsq.r_fifo = hwsq_reg(0x002504);
- priv->hwsq.r_spll[0] = hwsq_reg(0x004020);
- priv->hwsq.r_spll[1] = hwsq_reg(0x004024);
- priv->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
- priv->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
- switch (nv_device(priv)->chipset) {
+ clk->hwsq.r_fifo = hwsq_reg(0x002504);
+ clk->hwsq.r_spll[0] = hwsq_reg(0x004020);
+ clk->hwsq.r_spll[1] = hwsq_reg(0x004024);
+ clk->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
+ clk->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
+ switch (device->chipset) {
case 0x92:
case 0x94:
case 0x96:
- priv->hwsq.r_divs = hwsq_reg(0x004800);
+ clk->hwsq.r_divs = hwsq_reg(0x004800);
break;
default:
- priv->hwsq.r_divs = hwsq_reg(0x004700);
+ clk->hwsq.r_divs = hwsq_reg(0x004700);
break;
}
- priv->hwsq.r_mast = hwsq_reg(0x00c040);
-
- priv->base.read = nv50_clk_read;
- priv->base.calc = nv50_clk_calc;
- priv->base.prog = nv50_clk_prog;
- priv->base.tidy = nv50_clk_tidy;
+ clk->hwsq.r_mast = hwsq_reg(0x00c040);
return 0;
}
-static struct nvkm_domain
-nv50_domains[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_href , 0xff },
- { nv_clk_src_core , 0xff, 0, "core", 1000 },
- { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
- { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+nv50_clk = {
+ .read = nv50_clk_read,
+ .calc = nv50_clk_calc,
+ .prog = nv50_clk_prog,
+ .tidy = nv50_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
+ { nv_clk_src_max }
+ }
};
-struct nvkm_oclass *
-nv50_clk_oclass = &(struct nv50_clk_oclass) {
- .base.handle = NV_SUBDEV(CLK, 0x50),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
- .domains = nv50_domains,
-}.base;
+int
+nv50_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+ return nv50_clk_new_(&nv50_clk, device, index, false, pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
index 0ead76a32f10..d3c7fb6efa16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -1,7 +1,9 @@
-#ifndef __NVKM_CLK_NV50_H__
-#define __NVKM_CLK_NV50_H__
+#ifndef __NV50_CLK_H__
+#define __NV50_CLK_H__
+#define nv50_clk(p) container_of((p), struct nv50_clk, base)
+#include "priv.h"
+
#include <subdev/bus/hwsq.h>
-#include <subdev/clk.h>
struct nv50_clk_hwsq {
struct hwsq base;
@@ -12,17 +14,15 @@ struct nv50_clk_hwsq {
struct hwsq_reg r_mast;
};
-struct nv50_clk_priv {
+struct nv50_clk {
struct nvkm_clk base;
struct nv50_clk_hwsq hwsq;
};
-int nv50_clk_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-
-struct nv50_clk_oclass {
- struct nvkm_oclass base;
- struct nvkm_domain *domains;
-};
+int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+ bool, struct nvkm_clk **);
+int nv50_clk_read(struct nvkm_clk *, enum nv_clk_src);
+int nv50_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
+int nv50_clk_prog(struct nvkm_clk *);
+void nv50_clk_tidy(struct nvkm_clk *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
index 783a3e78d632..c6fccd600db4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
@@ -79,7 +79,7 @@ gt215_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info,
}
if (unlikely(best_err == ~0)) {
- nv_error(subdev, "unable to find matching pll values\n");
+ nvkm_error(subdev, "unable to find matching pll values\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
index f2292895a1a8..5ad67879e703 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
@@ -37,7 +37,7 @@ getMNP_single(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk,
* "clk" parameter in kHz
* returns calculated clock
*/
- struct nvkm_bios *bios = nvkm_bios(subdev);
+ struct nvkm_bios *bios = subdev->device->bios;
int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
int minM = info->vco1.min_m, maxM = info->vco1.max_m;
int minN = info->vco1.min_n, maxN = info->vco1.max_n;
@@ -136,7 +136,7 @@ getMNP_double(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk,
* "clk" parameter in kHz
* returns calculated clock
*/
- int chip_version = nvkm_bios(subdev)->version.chip;
+ int chip_version = subdev->device->bios->version.chip;
int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq;
int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq;
int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq;
@@ -240,6 +240,6 @@ nv04_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info, u32 freq,
}
if (!ret)
- nv_error(subdev, "unable to compute acceptable pll values\n");
+ nvkm_error(subdev, "unable to compute acceptable pll values\n");
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
new file mode 100644
index 000000000000..51eafc00c8b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_CLK_PRIV_H__
+#define __NVKM_CLK_PRIV_H__
+#define nvkm_clk(p) container_of((p), struct nvkm_clk, subdev)
+#include <subdev/clk.h>
+
+struct nvkm_clk_func {
+ int (*init)(struct nvkm_clk *);
+ void (*fini)(struct nvkm_clk *);
+ int (*read)(struct nvkm_clk *, enum nv_clk_src);
+ int (*calc)(struct nvkm_clk *, struct nvkm_cstate *);
+ int (*prog)(struct nvkm_clk *);
+ void (*tidy)(struct nvkm_clk *);
+ struct nvkm_pstate *pstates;
+ int nr_pstates;
+ struct nvkm_domain domains[];
+};
+
+int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, int,
+ bool allow_reclock, struct nvkm_clk *);
+int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+ bool allow_reclock, struct nvkm_clk **);
+
+int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
+ struct nvkm_pll_vals *);
+int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *);
+#endif
OpenPOWER on IntegriCloud