summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/platform.c20
-rw-r--r--drivers/clocksource/sh_cmt.c45
-rw-r--r--drivers/clocksource/sh_mtu2.c37
-rw-r--r--drivers/clocksource/sh_tmu.c41
-rw-r--r--drivers/dma/shdma.c25
-rw-r--r--drivers/dma/shdma.h4
-rw-r--r--drivers/serial/sh-sci.c189
-rw-r--r--drivers/sh/Kconfig24
-rw-r--r--drivers/sh/Makefile2
-rw-r--r--drivers/sh/clk-cpg.c298
-rw-r--r--drivers/sh/clk.c545
-rw-r--r--drivers/sh/intc.c333
12 files changed, 1396 insertions, 167 deletions
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index c5fbe198fbdb..765bcf0df3bb 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1254,6 +1254,26 @@ static int __init early_platform_driver_probe_id(char *class_str,
}
if (match) {
+ /*
+ * Set up a sensible init_name to enable
+ * dev_name() and others to be used before the
+ * rest of the driver core is initialized.
+ */
+ if (!match->dev.init_name && slab_is_available()) {
+ if (match->id != -1)
+ match->dev.init_name =
+ kasprintf(GFP_KERNEL, "%s.%d",
+ match->name,
+ match->id);
+ else
+ match->dev.init_name =
+ kasprintf(GFP_KERNEL, "%s",
+ match->name);
+
+ if (!match->dev.init_name)
+ return -ENOMEM;
+ }
+
if (epdrv->pdrv->probe(match))
pr_warning("%s: unable to probe %s early.\n",
class_str, match->name);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 744f748cc84b..f6677cb19789 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -150,13 +150,12 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
- pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk);
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
@@ -279,7 +278,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
delay = 1;
if (!delay)
- pr_warning("sh_cmt: too long delay\n");
+ dev_warn(&p->pdev->dev, "too long delay\n");
} while (delay);
}
@@ -289,7 +288,7 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
unsigned long flags;
if (delta > p->max_match_value)
- pr_warning("sh_cmt: delta out of range\n");
+ dev_warn(&p->pdev->dev, "delta out of range\n");
spin_lock_irqsave(&p->lock, flags);
p->next_match_value = delta;
@@ -451,7 +450,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
cs->resume = sh_cmt_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
- pr_info("sh_cmt: %s used as clock source\n", cs->name);
+ dev_info(&p->pdev->dev, "used as clock source\n");
clocksource_register(cs);
return 0;
}
@@ -497,13 +496,11 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- pr_info("sh_cmt: %s used for periodic clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_cmt_clock_event_start(p, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
- pr_info("sh_cmt: %s used for oneshot clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
sh_cmt_clock_event_start(p, 0);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
@@ -544,7 +541,7 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
ced->set_next_event = sh_cmt_clock_event_next;
ced->set_mode = sh_cmt_clock_event_mode;
- pr_info("sh_cmt: %s used for clock events\n", ced->name);
+ dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
}
@@ -601,22 +598,27 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
- pr_err("sh_cmt: failed to remap I/O memory\n");
+ dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* request irq using setup_irq() (too early for request_irq()) */
- p->irqaction.name = cfg->name;
+ p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_cmt_interrupt;
p->irqaction.dev_id = p;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ p->clk = clk_get(&p->pdev->dev, "cmt_fck");
if (IS_ERR(p->clk)) {
- pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk);
- ret = PTR_ERR(p->clk);
- goto err1;
+ dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
}
if (resource_size(res) == 6) {
@@ -629,17 +631,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
p->clear_bits = ~0xc000;
}
- ret = sh_cmt_register(p, cfg->name,
+ ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating,
cfg->clocksource_rating);
if (ret) {
- pr_err("sh_cmt: registration failed\n");
+ dev_err(&p->pdev->dev, "registration failed\n");
goto err1;
}
ret = setup_irq(irq, &p->irqaction);
if (ret) {
- pr_err("sh_cmt: failed to request irq %d\n", irq);
+ dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
goto err1;
}
@@ -654,11 +656,10 @@ err0:
static int __devinit sh_cmt_probe(struct platform_device *pdev)
{
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
if (p) {
- pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name);
+ dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 5fb78bfd73bb..ef7a5be8a09f 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -119,13 +119,12 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
static int sh_mtu2_enable(struct sh_mtu2_priv *p)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
- pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk);
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
@@ -194,8 +193,7 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- pr_info("sh_mtu2: %s used for periodic clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_mtu2_enable(p);
break;
case CLOCK_EVT_MODE_UNUSED:
@@ -222,13 +220,13 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
ced->cpumask = cpumask_of(0);
ced->set_mode = sh_mtu2_clock_event_mode;
- pr_info("sh_mtu2: %s used for clock events\n", ced->name);
+ dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
- pr_err("sh_mtu2: failed to request irq %d\n",
- p->irqaction.irq);
+ dev_err(&p->pdev->dev, "failed to request irq %d\n",
+ p->irqaction.irq);
return;
}
}
@@ -274,26 +272,32 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
- pr_err("sh_mtu2: failed to remap I/O memory\n");
+ dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* setup data for setup_irq() (too early for request_irq()) */
- p->irqaction.name = cfg->name;
+ p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_mtu2_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
if (IS_ERR(p->clk)) {
- pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk);
- ret = PTR_ERR(p->clk);
- goto err1;
+ dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
}
- return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating);
+ return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
+ cfg->clockevent_rating);
err1:
iounmap(p->mapbase);
err0:
@@ -303,11 +307,10 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
static int __devinit sh_mtu2_probe(struct platform_device *pdev)
{
struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
if (p) {
- pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name);
+ dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index fc9ff1e5b770..8e44e14ec4c2 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -107,13 +107,12 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
static int sh_tmu_enable(struct sh_tmu_priv *p)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
- pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk);
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
@@ -229,7 +228,7 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
cs->disable = sh_tmu_clocksource_disable;
cs->mask = CLOCKSOURCE_MASK(32);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
- pr_info("sh_tmu: %s used as clock source\n", cs->name);
+ dev_info(&p->pdev->dev, "used as clock source\n");
clocksource_register(cs);
return 0;
}
@@ -277,13 +276,11 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- pr_info("sh_tmu: %s used for periodic clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_tmu_clock_event_start(p, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
- pr_info("sh_tmu: %s used for oneshot clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
sh_tmu_clock_event_start(p, 0);
break;
case CLOCK_EVT_MODE_UNUSED:
@@ -324,13 +321,13 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_mode = sh_tmu_clock_event_mode;
- pr_info("sh_tmu: %s used for clock events\n", ced->name);
+ dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
- pr_err("sh_tmu: failed to request irq %d\n",
- p->irqaction.irq);
+ dev_err(&p->pdev->dev, "failed to request irq %d\n",
+ p->irqaction.irq);
return;
}
}
@@ -379,26 +376,31 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
- pr_err("sh_tmu: failed to remap I/O memory\n");
+ dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* setup data for setup_irq() (too early for request_irq()) */
- p->irqaction.name = cfg->name;
+ p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_tmu_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ p->clk = clk_get(&p->pdev->dev, "tmu_fck");
if (IS_ERR(p->clk)) {
- pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk);
- ret = PTR_ERR(p->clk);
- goto err1;
+ dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
}
- return sh_tmu_register(p, cfg->name,
+ return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating,
cfg->clocksource_rating);
err1:
@@ -410,11 +412,10 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
static int __devinit sh_tmu_probe(struct platform_device *pdev)
{
struct sh_tmu_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
if (p) {
- pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name);
+ dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 6f25a20de99f..323afef77802 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -26,8 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-
-#include <asm/dmaengine.h>
+#include <linux/sh_dma.h>
#include "shdma.h"
@@ -45,7 +44,7 @@ enum sh_dmae_desc_status {
#define LOG2_DEFAULT_XFER_SIZE 2
/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
-static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
+static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
@@ -190,7 +189,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
struct sh_dmae_device, common);
struct sh_dmae_pdata *pdata = shdev->pdata;
- struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
+ const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
int shift = chan_pdata->dmars_bit;
@@ -266,8 +265,8 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
return NULL;
}
-static struct sh_dmae_slave_config *sh_dmae_find_slave(
- struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
+static const struct sh_dmae_slave_config *sh_dmae_find_slave(
+ struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
{
struct dma_device *dma_dev = sh_chan->common.device;
struct sh_dmae_device *shdev = container_of(dma_dev,
@@ -275,11 +274,11 @@ static struct sh_dmae_slave_config *sh_dmae_find_slave(
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
- if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
+ if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
return NULL;
for (i = 0; i < pdata->slave_num; i++)
- if (pdata->slave[i].slave_id == slave_id)
+ if (pdata->slave[i].slave_id == param->slave_id)
return pdata->slave + i;
return NULL;
@@ -299,9 +298,9 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
* never runs concurrently with itself or free_chan_resources.
*/
if (param) {
- struct sh_dmae_slave_config *cfg;
+ const struct sh_dmae_slave_config *cfg;
- cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
+ cfg = sh_dmae_find_slave(sh_chan, param);
if (!cfg) {
ret = -EINVAL;
goto efindslave;
@@ -574,12 +573,14 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
{
struct sh_dmae_slave *param;
struct sh_dmae_chan *sh_chan;
+ dma_addr_t slave_addr;
if (!chan)
return NULL;
sh_chan = to_sh_chan(chan);
param = chan->private;
+ slave_addr = param->config->addr;
/* Someone calling slave DMA on a public channel? */
if (!param || !sg_len) {
@@ -592,7 +593,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
* if (param != NULL), this is a successfully requested slave channel,
* therefore param->config != NULL too.
*/
- return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
+ return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
direction, flags);
}
@@ -873,7 +874,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
int irq, unsigned long flags)
{
int err;
- struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
+ const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
struct platform_device *pdev = to_platform_device(shdev->common.dev);
struct sh_dmae_chan *new_sh_chan;
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 153609a1e96c..4021275a0a43 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -17,8 +17,8 @@
#include <linux/interrupt.h>
#include <linux/list.h>
-#include <asm/dmaengine.h>
-
+#define SH_DMAC_MAX_CHANNELS 6
+#define SH_DMA_SLAVE_NUMBER 256
#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
struct device;
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 8eb094c1f61b..8d993c4cceac 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -83,16 +83,16 @@ struct sci_port {
/* Interface clock */
struct clk *iclk;
- /* Data clock */
- struct clk *dclk;
+ /* Function clock */
+ struct clk *fclk;
struct list_head node;
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
struct device *dma_dev;
- enum sh_dmae_slave_chan_id slave_tx;
- enum sh_dmae_slave_chan_id slave_rx;
+ unsigned int slave_tx;
+ unsigned int slave_rx;
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx[2];
dma_cookie_t cookie_tx;
@@ -107,6 +107,7 @@ struct sci_port {
struct work_struct work_tx;
struct work_struct work_rx;
struct timer_list rx_timer;
+ unsigned int rx_timeout;
#endif
};
@@ -674,22 +675,22 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
struct sci_port *s = to_sci_port(port);
if (s->chan_rx) {
- unsigned long tout;
u16 scr = sci_in(port, SCSCR);
u16 ssr = sci_in(port, SCxSR);
/* Disable future Rx interrupts */
- sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE);
+ if (port->type == PORT_SCIFA) {
+ disable_irq_nosync(irq);
+ scr |= 0x4000;
+ } else {
+ scr &= ~SCI_CTRL_FLAGS_RIE;
+ }
+ sci_out(port, SCSCR, scr);
/* Clear current interrupt */
sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
- /* Calculate delay for 1.5 DMA buffers */
- tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
- port->fifosize / 2;
- dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n",
- tout * 1000 / HZ);
- if (tout < 2)
- tout = 2;
- mod_timer(&s->rx_timer, jiffies + tout);
+ dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
+ jiffies, s->rx_timeout);
+ mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
return IRQ_HANDLED;
}
@@ -799,7 +800,7 @@ static int sci_notifier(struct notifier_block *self,
(phase == CPUFREQ_RESUMECHANGE)) {
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry(sci_port, &priv->ports, node)
- sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -810,21 +811,17 @@ static void sci_clk_enable(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
- clk_enable(sci_port->dclk);
- sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
-
- if (sci_port->iclk)
- clk_enable(sci_port->iclk);
+ clk_enable(sci_port->iclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
+ clk_enable(sci_port->fclk);
}
static void sci_clk_disable(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
- if (sci_port->iclk)
- clk_disable(sci_port->iclk);
-
- clk_disable(sci_port->dclk);
+ clk_disable(sci_port->fclk);
+ clk_disable(sci_port->iclk);
}
static int sci_request_irq(struct sci_port *port)
@@ -913,22 +910,26 @@ static void sci_dma_tx_complete(void *arg)
spin_lock_irqsave(&port->lock, flags);
- xmit->tail += s->sg_tx.length;
+ xmit->tail += sg_dma_len(&s->sg_tx);
xmit->tail &= UART_XMIT_SIZE - 1;
- port->icount.tx += s->sg_tx.length;
+ port->icount.tx += sg_dma_len(&s->sg_tx);
async_tx_ack(s->desc_tx);
s->cookie_tx = -EINVAL;
s->desc_tx = NULL;
- spin_unlock_irqrestore(&port->lock, flags);
-
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_chars_pending(xmit))
+ if (!uart_circ_empty(xmit)) {
schedule_work(&s->work_tx);
+ } else if (port->type == PORT_SCIFA) {
+ u16 ctrl = sci_in(port, SCSCR);
+ sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
}
/* Locking: called with port lock held */
@@ -972,13 +973,13 @@ static void sci_dma_rx_complete(void *arg)
unsigned long flags;
int count;
- dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+ dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
spin_lock_irqsave(&port->lock, flags);
count = sci_dma_rx_push(s, tty, s->buf_len_rx);
- mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5));
+ mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
spin_unlock_irqrestore(&port->lock, flags);
@@ -1050,6 +1051,8 @@ static void sci_submit_rx(struct sci_port *s)
sci_rx_dma_release(s, true);
return;
}
+ dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
+ s->cookie_rx[i], i);
}
s->active_rx = s->cookie_rx[0];
@@ -1107,10 +1110,10 @@ static void work_fn_rx(struct work_struct *work)
return;
}
- dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__,
- s->cookie_rx[new], new);
-
s->active_rx = s->cookie_rx[!new];
+
+ dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
+ s->cookie_rx[new], new, s->active_rx);
}
static void work_fn_tx(struct work_struct *work)
@@ -1131,14 +1134,13 @@ static void work_fn_tx(struct work_struct *work)
*/
spin_lock_irq(&port->lock);
sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
- sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
+ sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
sg->offset;
- sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
+ sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
- sg->dma_length = sg->length;
spin_unlock_irq(&port->lock);
- BUG_ON(!sg->length);
+ BUG_ON(!sg_dma_len(sg));
desc = chan->device->device_prep_slave_sg(chan,
sg, s->sg_len_tx, DMA_TO_DEVICE,
@@ -1173,23 +1175,28 @@ static void work_fn_tx(struct work_struct *work)
static void sci_start_tx(struct uart_port *port)
{
+ struct sci_port *s = to_sci_port(port);
unsigned short ctrl;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
- struct sci_port *s = to_sci_port(port);
-
- if (s->chan_tx) {
- if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0)
- schedule_work(&s->work_tx);
-
- return;
+ if (port->type == PORT_SCIFA) {
+ u16 new, scr = sci_in(port, SCSCR);
+ if (s->chan_tx)
+ new = scr | 0x8000;
+ else
+ new = scr & ~0x8000;
+ if (new != scr)
+ sci_out(port, SCSCR, new);
}
+ if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
+ s->cookie_tx < 0)
+ schedule_work(&s->work_tx);
#endif
-
- /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
- ctrl = sci_in(port, SCSCR);
- ctrl |= SCI_CTRL_FLAGS_TIE;
- sci_out(port, SCSCR, ctrl);
+ if (!s->chan_tx || port->type == PORT_SCIFA) {
+ /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
+ ctrl = sci_in(port, SCSCR);
+ sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
+ }
}
static void sci_stop_tx(struct uart_port *port)
@@ -1198,6 +1205,8 @@ static void sci_stop_tx(struct uart_port *port)
/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
+ if (port->type == PORT_SCIFA)
+ ctrl &= ~0x8000;
ctrl &= ~SCI_CTRL_FLAGS_TIE;
sci_out(port, SCSCR, ctrl);
}
@@ -1208,6 +1217,8 @@ static void sci_start_rx(struct uart_port *port)
/* Set RIE (Receive Interrupt Enable) bit in SCSCR */
ctrl |= sci_in(port, SCSCR);
+ if (port->type == PORT_SCIFA)
+ ctrl &= ~0x4000;
sci_out(port, SCSCR, ctrl);
}
@@ -1217,6 +1228,8 @@ static void sci_stop_rx(struct uart_port *port)
/* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
+ if (port->type == PORT_SCIFA)
+ ctrl &= ~0x4000;
ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
sci_out(port, SCSCR, ctrl);
}
@@ -1251,8 +1264,12 @@ static void rx_timer_fn(unsigned long arg)
{
struct sci_port *s = (struct sci_port *)arg;
struct uart_port *port = &s->port;
-
u16 scr = sci_in(port, SCSCR);
+
+ if (port->type == PORT_SCIFA) {
+ scr &= ~0x4000;
+ enable_irq(s->irqs[1]);
+ }
sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
dev_dbg(port->dev, "DMA Rx timed out\n");
schedule_work(&s->work_rx);
@@ -1339,8 +1356,7 @@ static void sci_request_dma(struct uart_port *port)
sg_init_table(sg, 1);
sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
(int)buf[i] & ~PAGE_MASK);
- sg->dma_address = dma[i];
- sg->dma_length = sg->length;
+ sg_dma_address(sg) = dma[i];
}
INIT_WORK(&s->work_rx, work_fn_rx);
@@ -1403,8 +1419,12 @@ static void sci_shutdown(struct uart_port *port)
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ struct sci_port *s = to_sci_port(port);
+#endif
unsigned int status, baud, smr_val, max_baud;
int t = -1;
+ u16 scfcr = 0;
/*
* earlyprintk comes here early on with port->uartclk set to zero.
@@ -1427,7 +1447,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
if (port->type != PORT_SCI)
- sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+ sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
smr_val = sci_in(port, SCSMR) & 3;
if ((termios->c_cflag & CSIZE) == CS7)
@@ -1458,10 +1478,32 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
}
sci_init_pins(port, termios->c_cflag);
- sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0);
+ sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
sci_out(port, SCSCR, SCSCR_INIT(port));
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ /*
+ * Calculate delay for 1.5 DMA buffers: see
+ * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
+ * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
+ * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
+ * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
+ * sizes), but it has been found out experimentally, that this is not
+ * enough: the driver too often needlessly runs on a DMA timeout. 20ms
+ * as a minimum seem to work perfectly.
+ */
+ if (s->chan_rx) {
+ s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
+ port->fifosize / 2;
+ dev_dbg(port->dev,
+ "DMA Rx t-out %ums, tty t-out %u jiffies\n",
+ s->rx_timeout * 1000 / HZ, port->timeout);
+ if (s->rx_timeout < msecs_to_jiffies(20))
+ s->rx_timeout = msecs_to_jiffies(20);
+ }
+#endif
+
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
}
@@ -1553,10 +1595,10 @@ static struct uart_ops sci_uart_ops = {
#endif
};
-static void __devinit sci_init_single(struct platform_device *dev,
- struct sci_port *sci_port,
- unsigned int index,
- struct plat_sci_port *p)
+static int __devinit sci_init_single(struct platform_device *dev,
+ struct sci_port *sci_port,
+ unsigned int index,
+ struct plat_sci_port *p)
{
struct uart_port *port = &sci_port->port;
@@ -1577,8 +1619,23 @@ static void __devinit sci_init_single(struct platform_device *dev,
}
if (dev) {
- sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
- sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
+ sci_port->iclk = clk_get(&dev->dev, "sci_ick");
+ if (IS_ERR(sci_port->iclk)) {
+ sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
+ if (IS_ERR(sci_port->iclk)) {
+ dev_err(&dev->dev, "can't get iclk\n");
+ return PTR_ERR(sci_port->iclk);
+ }
+ }
+
+ /*
+ * The function clock is optional, ignore it if we can't
+ * find it.
+ */
+ sci_port->fclk = clk_get(&dev->dev, "sci_fck");
+ if (IS_ERR(sci_port->fclk))
+ sci_port->fclk = NULL;
+
sci_port->enable = sci_clk_enable;
sci_port->disable = sci_clk_disable;
port->dev = &dev->dev;
@@ -1605,6 +1662,7 @@ static void __devinit sci_init_single(struct platform_device *dev,
#endif
memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
+ return 0;
}
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
@@ -1754,8 +1812,11 @@ static int sci_remove(struct platform_device *dev)
cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node)
+ list_for_each_entry(p, &priv->ports, node) {
uart_remove_one_port(&sci_uart_driver, &p->port);
+ clk_put(p->iclk);
+ clk_put(p->fclk);
+ }
spin_unlock_irqrestore(&priv->lock, flags);
kfree(priv);
@@ -1781,7 +1842,9 @@ static int __devinit sci_probe_single(struct platform_device *dev,
return 0;
}
- sci_init_single(dev, sciport, index, p);
+ ret = sci_init_single(dev, sciport, index, p);
+ if (ret)
+ return ret;
ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
if (ret)
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig
new file mode 100644
index 000000000000..a54de0b9b3df
--- /dev/null
+++ b/drivers/sh/Kconfig
@@ -0,0 +1,24 @@
+config INTC_USERIMASK
+ bool "Userspace interrupt masking support"
+ depends on ARCH_SHMOBILE || (SUPERH && CPU_SH4A)
+ help
+ This enables support for hardware-assisted userspace hardirq
+ masking.
+
+ SH-4A and newer interrupt blocks all support a special shadowed
+ page with all non-masking registers obscured when mapped in to
+ userspace. This is primarily for use by userspace device
+ drivers that are using special priority levels.
+
+ If in doubt, say N.
+
+config INTC_BALANCING
+ bool "Hardware IRQ balancing support"
+ depends on SMP && SUPERH && CPU_SUBTYPE_SH7786
+ help
+ This enables support for IRQ auto-distribution mode on SH-X3
+ SMP parts. All of the balancing and CPU wakeup decisions are
+ taken care of automatically by hardware for distributed
+ vectors.
+
+ If in doubt, say N.
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 4956bf1f2134..78bb5127abd0 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -4,4 +4,6 @@
obj-$(CONFIG_SUPERHYWAY) += superhyway/
obj-$(CONFIG_MAPLE) += maple/
obj-$(CONFIG_GENERIC_GPIO) += pfc.o
+obj-$(CONFIG_SUPERH) += clk.o
+obj-$(CONFIG_SH_CLK_CPG) += clk-cpg.o
obj-y += intc.o
diff --git a/drivers/sh/clk-cpg.c b/drivers/sh/clk-cpg.c
new file mode 100644
index 000000000000..f5c80ba9ab1c
--- /dev/null
+++ b/drivers/sh/clk-cpg.c
@@ -0,0 +1,298 @@
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/sh_clk.h>
+
+static int sh_clk_mstp32_enable(struct clk *clk)
+{
+ __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
+ clk->enable_reg);
+ return 0;
+}
+
+static void sh_clk_mstp32_disable(struct clk *clk)
+{
+ __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
+ clk->enable_reg);
+}
+
+static struct clk_ops sh_clk_mstp32_clk_ops = {
+ .enable = sh_clk_mstp32_enable,
+ .disable = sh_clk_mstp32_disable,
+ .recalc = followparent_recalc,
+};
+
+int __init sh_clk_mstp32_register(struct clk *clks, int nr)
+{
+ struct clk *clkp;
+ int ret = 0;
+ int k;
+
+ for (k = 0; !ret && (k < nr); k++) {
+ clkp = clks + k;
+ clkp->ops = &sh_clk_mstp32_clk_ops;
+ ret |= clk_register(clkp);
+ }
+
+ return ret;
+}
+
+static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_rate_table_round(clk, clk->freq_table, rate);
+}
+
+static int sh_clk_div6_divisors[64] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
+};
+
+static struct clk_div_mult_table sh_clk_div6_table = {
+ .divisors = sh_clk_div6_divisors,
+ .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
+};
+
+static unsigned long sh_clk_div6_recalc(struct clk *clk)
+{
+ struct clk_div_mult_table *table = &sh_clk_div6_table;
+ unsigned int idx;
+
+ clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+ table, NULL);
+
+ idx = __raw_readl(clk->enable_reg) & 0x003f;
+
+ return clk->freq_table[idx].frequency;
+}
+
+static int sh_clk_div6_set_rate(struct clk *clk,
+ unsigned long rate, int algo_id)
+{
+ unsigned long value;
+ int idx;
+
+ idx = clk_rate_table_find(clk, clk->freq_table, rate);
+ if (idx < 0)
+ return idx;
+
+ value = __raw_readl(clk->enable_reg);
+ value &= ~0x3f;
+ value |= idx;
+ __raw_writel(value, clk->enable_reg);
+ return 0;
+}
+
+static int sh_clk_div6_enable(struct clk *clk)
+{
+ unsigned long value;
+ int ret;
+
+ ret = sh_clk_div6_set_rate(clk, clk->rate, 0);
+ if (ret == 0) {
+ value = __raw_readl(clk->enable_reg);
+ value &= ~0x100; /* clear stop bit to enable clock */
+ __raw_writel(value, clk->enable_reg);
+ }
+ return ret;
+}
+
+static void sh_clk_div6_disable(struct clk *clk)
+{
+ unsigned long value;
+
+ value = __raw_readl(clk->enable_reg);
+ value |= 0x100; /* stop clock */
+ value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
+ __raw_writel(value, clk->enable_reg);
+}
+
+static struct clk_ops sh_clk_div6_clk_ops = {
+ .recalc = sh_clk_div6_recalc,
+ .round_rate = sh_clk_div_round_rate,
+ .set_rate = sh_clk_div6_set_rate,
+ .enable = sh_clk_div6_enable,
+ .disable = sh_clk_div6_disable,
+};
+
+int __init sh_clk_div6_register(struct clk *clks, int nr)
+{
+ struct clk *clkp;
+ void *freq_table;
+ int nr_divs = sh_clk_div6_table.nr_divisors;
+ int freq_table_size = sizeof(struct cpufreq_frequency_table);
+ int ret = 0;
+ int k;
+
+ freq_table_size *= (nr_divs + 1);
+ freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
+ if (!freq_table) {
+ pr_err("sh_clk_div6_register: unable to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ for (k = 0; !ret && (k < nr); k++) {
+ clkp = clks + k;
+
+ clkp->ops = &sh_clk_div6_clk_ops;
+ clkp->id = -1;
+ clkp->freq_table = freq_table + (k * freq_table_size);
+ clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
+
+ ret = clk_register(clkp);
+ }
+
+ return ret;
+}
+
+static unsigned long sh_clk_div4_recalc(struct clk *clk)
+{
+ struct clk_div4_table *d4t = clk->priv;
+ struct clk_div_mult_table *table = d4t->div_mult_table;
+ unsigned int idx;
+
+ clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+ table, &clk->arch_flags);
+
+ idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
+
+ return clk->freq_table[idx].frequency;
+}
+
+static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct clk_div4_table *d4t = clk->priv;
+ struct clk_div_mult_table *table = d4t->div_mult_table;
+ u32 value;
+ int ret;
+
+ /* we really need a better way to determine parent index, but for
+ * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
+ * no CLK_ENABLE_ON_INIT means external clock...
+ */
+
+ if (parent->flags & CLK_ENABLE_ON_INIT)
+ value = __raw_readl(clk->enable_reg) & ~(1 << 7);
+ else
+ value = __raw_readl(clk->enable_reg) | (1 << 7);
+
+ ret = clk_reparent(clk, parent);
+ if (ret < 0)
+ return ret;
+
+ __raw_writel(value, clk->enable_reg);
+
+ /* Rebiuld the frequency table */
+ clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+ table, &clk->arch_flags);
+
+ return 0;
+}
+
+static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id)
+{
+ struct clk_div4_table *d4t = clk->priv;
+ unsigned long value;
+ int idx = clk_rate_table_find(clk, clk->freq_table, rate);
+ if (idx < 0)
+ return idx;
+
+ value = __raw_readl(clk->enable_reg);
+ value &= ~(0xf << clk->enable_bit);
+ value |= (idx << clk->enable_bit);
+ __raw_writel(value, clk->enable_reg);
+
+ if (d4t->kick)
+ d4t->kick(clk);
+
+ return 0;
+}
+
+static int sh_clk_div4_enable(struct clk *clk)
+{
+ __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
+ return 0;
+}
+
+static void sh_clk_div4_disable(struct clk *clk)
+{
+ __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
+}
+
+static struct clk_ops sh_clk_div4_clk_ops = {
+ .recalc = sh_clk_div4_recalc,
+ .set_rate = sh_clk_div4_set_rate,
+ .round_rate = sh_clk_div_round_rate,
+};
+
+static struct clk_ops sh_clk_div4_enable_clk_ops = {
+ .recalc = sh_clk_div4_recalc,
+ .set_rate = sh_clk_div4_set_rate,
+ .round_rate = sh_clk_div_round_rate,
+ .enable = sh_clk_div4_enable,
+ .disable = sh_clk_div4_disable,
+};
+
+static struct clk_ops sh_clk_div4_reparent_clk_ops = {
+ .recalc = sh_clk_div4_recalc,
+ .set_rate = sh_clk_div4_set_rate,
+ .round_rate = sh_clk_div_round_rate,
+ .enable = sh_clk_div4_enable,
+ .disable = sh_clk_div4_disable,
+ .set_parent = sh_clk_div4_set_parent,
+};
+
+static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
+ struct clk_div4_table *table, struct clk_ops *ops)
+{
+ struct clk *clkp;
+ void *freq_table;
+ int nr_divs = table->div_mult_table->nr_divisors;
+ int freq_table_size = sizeof(struct cpufreq_frequency_table);
+ int ret = 0;
+ int k;
+
+ freq_table_size *= (nr_divs + 1);
+ freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
+ if (!freq_table) {
+ pr_err("sh_clk_div4_register: unable to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ for (k = 0; !ret && (k < nr); k++) {
+ clkp = clks + k;
+
+ clkp->ops = ops;
+ clkp->id = -1;
+ clkp->priv = table;
+
+ clkp->freq_table = freq_table + (k * freq_table_size);
+ clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
+
+ ret = clk_register(clkp);
+ }
+
+ return ret;
+}
+
+int __init sh_clk_div4_register(struct clk *clks, int nr,
+ struct clk_div4_table *table)
+{
+ return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
+}
+
+int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
+ struct clk_div4_table *table)
+{
+ return sh_clk_div4_register_ops(clks, nr, table,
+ &sh_clk_div4_enable_clk_ops);
+}
+
+int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
+ struct clk_div4_table *table)
+{
+ return sh_clk_div4_register_ops(clks, nr, table,
+ &sh_clk_div4_reparent_clk_ops);
+}
diff --git a/drivers/sh/clk.c b/drivers/sh/clk.c
new file mode 100644
index 000000000000..5d84adac9ec4
--- /dev/null
+++ b/drivers/sh/clk.c
@@ -0,0 +1,545 @@
+/*
+ * drivers/sh/clk.c - SuperH clock framework
+ *
+ * Copyright (C) 2005 - 2009 Paul Mundt
+ *
+ * This clock framework is derived from the OMAP version by:
+ *
+ * Copyright (C) 2004 - 2008 Nokia Corporation
+ * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ *
+ * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/sysdev.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
+
+static LIST_HEAD(clock_list);
+static DEFINE_SPINLOCK(clock_lock);
+static DEFINE_MUTEX(clock_list_sem);
+
+void clk_rate_table_build(struct clk *clk,
+ struct cpufreq_frequency_table *freq_table,
+ int nr_freqs,
+ struct clk_div_mult_table *src_table,
+ unsigned long *bitmap)
+{
+ unsigned long mult, div;
+ unsigned long freq;
+ int i;
+
+ for (i = 0; i < nr_freqs; i++) {
+ div = 1;
+ mult = 1;
+
+ if (src_table->divisors && i < src_table->nr_divisors)
+ div = src_table->divisors[i];
+
+ if (src_table->multipliers && i < src_table->nr_multipliers)
+ mult = src_table->multipliers[i];
+
+ if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
+ freq = CPUFREQ_ENTRY_INVALID;
+ else
+ freq = clk->parent->rate * mult / div;
+
+ freq_table[i].index = i;
+ freq_table[i].frequency = freq;
+ }
+
+ /* Termination entry */
+ freq_table[i].index = i;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+}
+
+long clk_rate_table_round(struct clk *clk,
+ struct cpufreq_frequency_table *freq_table,
+ unsigned long rate)
+{
+ unsigned long rate_error, rate_error_prev = ~0UL;
+ unsigned long rate_best_fit = rate;
+ unsigned long highest, lowest;
+ int i;
+
+ highest = lowest = 0;
+
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ unsigned long freq = freq_table[i].frequency;
+
+ if (freq == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ if (freq > highest)
+ highest = freq;
+ if (freq < lowest)
+ lowest = freq;
+
+ rate_error = abs(freq - rate);
+ if (rate_error < rate_error_prev) {
+ rate_best_fit = freq;
+ rate_error_prev = rate_error;
+ }
+
+ if (rate_error == 0)
+ break;
+ }
+
+ if (rate >= highest)
+ rate_best_fit = highest;
+ if (rate <= lowest)
+ rate_best_fit = lowest;
+
+ return rate_best_fit;
+}
+
+int clk_rate_table_find(struct clk *clk,
+ struct cpufreq_frequency_table *freq_table,
+ unsigned long rate)
+{
+ int i;
+
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ unsigned long freq = freq_table[i].frequency;
+
+ if (freq == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ if (freq == rate)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+/* Used for clocks that always have same value as the parent clock */
+unsigned long followparent_recalc(struct clk *clk)
+{
+ return clk->parent ? clk->parent->rate : 0;
+}
+
+int clk_reparent(struct clk *child, struct clk *parent)
+{
+ list_del_init(&child->sibling);
+ if (parent)
+ list_add(&child->sibling, &parent->children);
+ child->parent = parent;
+
+ /* now do the debugfs renaming to reattach the child
+ to the proper parent */
+
+ return 0;
+}
+
+/* Propagate rate to children */
+void propagate_rate(struct clk *tclk)
+{
+ struct clk *clkp;
+
+ list_for_each_entry(clkp, &tclk->children, sibling) {
+ if (clkp->ops && clkp->ops->recalc)
+ clkp->rate = clkp->ops->recalc(clkp);
+
+ propagate_rate(clkp);
+ }
+}
+
+static void __clk_disable(struct clk *clk)
+{
+ if (WARN(!clk->usecount, "Trying to disable clock %s with 0 usecount\n",
+ clk->name))
+ return;
+
+ if (!(--clk->usecount)) {
+ if (likely(clk->ops && clk->ops->disable))
+ clk->ops->disable(clk);
+ if (likely(clk->parent))
+ __clk_disable(clk->parent);
+ }
+}
+
+void clk_disable(struct clk *clk)
+{
+ unsigned long flags;
+
+ if (!clk)
+ return;
+
+ spin_lock_irqsave(&clock_lock, flags);
+ __clk_disable(clk);
+ spin_unlock_irqrestore(&clock_lock, flags);
+}
+EXPORT_SYMBOL_GPL(clk_disable);
+
+static int __clk_enable(struct clk *clk)
+{
+ int ret = 0;
+
+ if (clk->usecount++ == 0) {
+ if (clk->parent) {
+ ret = __clk_enable(clk->parent);
+ if (unlikely(ret))
+ goto err;
+ }
+
+ if (clk->ops && clk->ops->enable) {
+ ret = clk->ops->enable(clk);
+ if (ret) {
+ if (clk->parent)
+ __clk_disable(clk->parent);
+ goto err;
+ }
+ }
+ }
+
+ return ret;
+err:
+ clk->usecount--;
+ return ret;
+}
+
+int clk_enable(struct clk *clk)
+{
+ unsigned long flags;
+ int ret;
+
+ if (!clk)
+ return -EINVAL;
+
+ spin_lock_irqsave(&clock_lock, flags);
+ ret = __clk_enable(clk);
+ spin_unlock_irqrestore(&clock_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_enable);
+
+static LIST_HEAD(root_clks);
+
+/**
+ * recalculate_root_clocks - recalculate and propagate all root clocks
+ *
+ * Recalculates all root clocks (clocks with no parent), which if the
+ * clock's .recalc is set correctly, should also propagate their rates.
+ * Called at init.
+ */
+void recalculate_root_clocks(void)
+{
+ struct clk *clkp;
+
+ list_for_each_entry(clkp, &root_clks, sibling) {
+ if (clkp->ops && clkp->ops->recalc)
+ clkp->rate = clkp->ops->recalc(clkp);
+ propagate_rate(clkp);
+ }
+}
+
+int clk_register(struct clk *clk)
+{
+ if (clk == NULL || IS_ERR(clk))
+ return -EINVAL;
+
+ /*
+ * trap out already registered clocks
+ */
+ if (clk->node.next || clk->node.prev)
+ return 0;
+
+ mutex_lock(&clock_list_sem);
+
+ INIT_LIST_HEAD(&clk->children);
+ clk->usecount = 0;
+
+ if (clk->parent)
+ list_add(&clk->sibling, &clk->parent->children);
+ else
+ list_add(&clk->sibling, &root_clks);
+
+ list_add(&clk->node, &clock_list);
+ if (clk->ops && clk->ops->init)
+ clk->ops->init(clk);
+ mutex_unlock(&clock_list_sem);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_register);
+
+void clk_unregister(struct clk *clk)
+{
+ mutex_lock(&clock_list_sem);
+ list_del(&clk->sibling);
+ list_del(&clk->node);
+ mutex_unlock(&clock_list_sem);
+}
+EXPORT_SYMBOL_GPL(clk_unregister);
+
+void clk_enable_init_clocks(void)
+{
+ struct clk *clkp;
+
+ list_for_each_entry(clkp, &clock_list, node)
+ if (clkp->flags & CLK_ENABLE_ON_INIT)
+ clk_enable(clkp);
+}
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+EXPORT_SYMBOL_GPL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_set_rate_ex(clk, rate, 0);
+}
+EXPORT_SYMBOL_GPL(clk_set_rate);
+
+int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
+{
+ int ret = -EOPNOTSUPP;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock_lock, flags);
+
+ if (likely(clk->ops && clk->ops->set_rate)) {
+ ret = clk->ops->set_rate(clk, rate, algo_id);
+ if (ret != 0)
+ goto out_unlock;
+ } else {
+ clk->rate = rate;
+ ret = 0;
+ }
+
+ if (clk->ops && clk->ops->recalc)
+ clk->rate = clk->ops->recalc(clk);
+
+ propagate_rate(clk);
+
+out_unlock:
+ spin_unlock_irqrestore(&clock_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_rate_ex);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (!parent || !clk)
+ return ret;
+ if (clk->parent == parent)
+ return 0;
+
+ spin_lock_irqsave(&clock_lock, flags);
+ if (clk->usecount == 0) {
+ if (clk->ops->set_parent)
+ ret = clk->ops->set_parent(clk, parent);
+ else
+ ret = clk_reparent(clk, parent);
+
+ if (ret == 0) {
+ pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
+ clk->name, clk->parent->name, clk->rate);
+ if (clk->ops->recalc)
+ clk->rate = clk->ops->recalc(clk);
+ propagate_rate(clk);
+ }
+ } else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&clock_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ return clk->parent;
+}
+EXPORT_SYMBOL_GPL(clk_get_parent);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ if (likely(clk->ops && clk->ops->round_rate)) {
+ unsigned long flags, rounded;
+
+ spin_lock_irqsave(&clock_lock, flags);
+ rounded = clk->ops->round_rate(clk, rate);
+ spin_unlock_irqrestore(&clock_lock, flags);
+
+ return rounded;
+ }
+
+ return clk_get_rate(clk);
+}
+EXPORT_SYMBOL_GPL(clk_round_rate);
+
+#ifdef CONFIG_PM
+static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
+{
+ static pm_message_t prev_state;
+ struct clk *clkp;
+
+ switch (state.event) {
+ case PM_EVENT_ON:
+ /* Resumeing from hibernation */
+ if (prev_state.event != PM_EVENT_FREEZE)
+ break;
+
+ list_for_each_entry(clkp, &clock_list, node) {
+ if (likely(clkp->ops)) {
+ unsigned long rate = clkp->rate;
+
+ if (likely(clkp->ops->set_parent))
+ clkp->ops->set_parent(clkp,
+ clkp->parent);
+ if (likely(clkp->ops->set_rate))
+ clkp->ops->set_rate(clkp,
+ rate, NO_CHANGE);
+ else if (likely(clkp->ops->recalc))
+ clkp->rate = clkp->ops->recalc(clkp);
+ }
+ }
+ break;
+ case PM_EVENT_FREEZE:
+ break;
+ case PM_EVENT_SUSPEND:
+ break;
+ }
+
+ prev_state = state;
+ return 0;
+}
+
+static int clks_sysdev_resume(struct sys_device *dev)
+{
+ return clks_sysdev_suspend(dev, PMSG_ON);
+}
+
+static struct sysdev_class clks_sysdev_class = {
+ .name = "clks",
+};
+
+static struct sysdev_driver clks_sysdev_driver = {
+ .suspend = clks_sysdev_suspend,
+ .resume = clks_sysdev_resume,
+};
+
+static struct sys_device clks_sysdev_dev = {
+ .cls = &clks_sysdev_class,
+};
+
+static int __init clk_sysdev_init(void)
+{
+ sysdev_class_register(&clks_sysdev_class);
+ sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
+ sysdev_register(&clks_sysdev_dev);
+
+ return 0;
+}
+subsys_initcall(clk_sysdev_init);
+#endif
+
+/*
+ * debugfs support to trace clock tree hierarchy and attributes
+ */
+static struct dentry *clk_debugfs_root;
+
+static int clk_debugfs_register_one(struct clk *c)
+{
+ int err;
+ struct dentry *d, *child, *child_tmp;
+ struct clk *pa = c->parent;
+ char s[255];
+ char *p = s;
+
+ p += sprintf(p, "%s", c->name);
+ if (c->id >= 0)
+ sprintf(p, ":%d", c->id);
+ d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
+ if (!d)
+ return -ENOMEM;
+ c->dentry = d;
+
+ d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
+ if (!d) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
+ if (!d) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
+ if (!d) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ return 0;
+
+err_out:
+ d = c->dentry;
+ list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
+ debugfs_remove(child);
+ debugfs_remove(c->dentry);
+ return err;
+}
+
+static int clk_debugfs_register(struct clk *c)
+{
+ int err;
+ struct clk *pa = c->parent;
+
+ if (pa && !pa->dentry) {
+ err = clk_debugfs_register(pa);
+ if (err)
+ return err;
+ }
+
+ if (!c->dentry && c->name) {
+ err = clk_debugfs_register_one(c);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int __init clk_debugfs_init(void)
+{
+ struct clk *c;
+ struct dentry *d;
+ int err;
+
+ d = debugfs_create_dir("clock", NULL);
+ if (!d)
+ return -ENOMEM;
+ clk_debugfs_root = d;
+
+ list_for_each_entry(c, &clock_list, node) {
+ err = clk_debugfs_register(c);
+ if (err)
+ goto err_out;
+ }
+ return 0;
+err_out:
+ debugfs_remove_recursive(clk_debugfs_root);
+ return err;
+}
+late_initcall(clk_debugfs_init);
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 94ad6bd86a00..c585574b9aed 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -28,6 +28,7 @@
#include <linux/topology.h>
#include <linux/bitmap.h>
#include <linux/cpumask.h>
+#include <asm/sizes.h>
#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -45,6 +46,12 @@ struct intc_handle_int {
unsigned long handle;
};
+struct intc_window {
+ phys_addr_t phys;
+ void __iomem *virt;
+ unsigned long size;
+};
+
struct intc_desc_int {
struct list_head list;
struct sys_device sysdev;
@@ -58,6 +65,8 @@ struct intc_desc_int {
unsigned int nr_prio;
struct intc_handle_int *sense;
unsigned int nr_sense;
+ struct intc_window *window;
+ unsigned int nr_windows;
struct irq_chip chip;
};
@@ -87,8 +96,12 @@ static DEFINE_SPINLOCK(vector_lock);
#define SMP_NR(d, x) 1
#endif
-static unsigned int intc_prio_level[NR_IRQS]; /* for now */
+static unsigned int intc_prio_level[NR_IRQS]; /* for now */
+static unsigned int default_prio_level = 2; /* 2 - 16 */
static unsigned long ack_handle[NR_IRQS];
+#ifdef CONFIG_INTC_BALANCING
+static unsigned long dist_handle[NR_IRQS];
+#endif
static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
{
@@ -96,6 +109,47 @@ static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
return container_of(chip, struct intc_desc_int, chip);
}
+static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
+ unsigned long address)
+{
+ struct intc_window *window;
+ int k;
+
+ /* scan through physical windows and convert address */
+ for (k = 0; k < d->nr_windows; k++) {
+ window = d->window + k;
+
+ if (address < window->phys)
+ continue;
+
+ if (address >= (window->phys + window->size))
+ continue;
+
+ address -= window->phys;
+ address += (unsigned long)window->virt;
+
+ return address;
+ }
+
+ /* no windows defined, register must be 1:1 mapped virt:phys */
+ return address;
+}
+
+static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
+{
+ unsigned int k;
+
+ address = intc_phys_to_virt(d, address);
+
+ for (k = 0; k < d->nr_reg; k++) {
+ if (d->reg[k] == address)
+ return k;
+ }
+
+ BUG();
+ return 0;
+}
+
static inline unsigned int set_field(unsigned int value,
unsigned int field_value,
unsigned int handle)
@@ -229,6 +283,85 @@ static void (*intc_disable_fns[])(unsigned long addr,
[MODE_PCLR_REG] = intc_mode_field,
};
+#ifdef CONFIG_INTC_BALANCING
+static inline void intc_balancing_enable(unsigned int irq)
+{
+ struct intc_desc_int *d = get_intc_desc(irq);
+ unsigned long handle = dist_handle[irq];
+ unsigned long addr;
+
+ if (irq_balancing_disabled(irq) || !handle)
+ return;
+
+ addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
+ intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
+}
+
+static inline void intc_balancing_disable(unsigned int irq)
+{
+ struct intc_desc_int *d = get_intc_desc(irq);
+ unsigned long handle = dist_handle[irq];
+ unsigned long addr;
+
+ if (irq_balancing_disabled(irq) || !handle)
+ return;
+
+ addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
+ intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
+}
+
+static unsigned int intc_dist_data(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ intc_enum enum_id)
+{
+ struct intc_mask_reg *mr = desc->hw.mask_regs;
+ unsigned int i, j, fn, mode;
+ unsigned long reg_e, reg_d;
+
+ for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
+ mr = desc->hw.mask_regs + i;
+
+ /*
+ * Skip this entry if there's no auto-distribution
+ * register associated with it.
+ */
+ if (!mr->dist_reg)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
+ if (mr->enum_ids[j] != enum_id)
+ continue;
+
+ fn = REG_FN_MODIFY_BASE;
+ mode = MODE_ENABLE_REG;
+ reg_e = mr->dist_reg;
+ reg_d = mr->dist_reg;
+
+ fn += (mr->reg_width >> 3) - 1;
+ return _INTC_MK(fn, mode,
+ intc_get_reg(d, reg_e),
+ intc_get_reg(d, reg_d),
+ 1,
+ (mr->reg_width - 1) - j);
+ }
+ }
+
+ /*
+ * It's possible we've gotten here with no distribution options
+ * available for the IRQ in question, so we just skip over those.
+ */
+ return 0;
+}
+#else
+static inline void intc_balancing_enable(unsigned int irq)
+{
+}
+
+static inline void intc_balancing_disable(unsigned int irq)
+{
+}
+#endif
+
static inline void _intc_enable(unsigned int irq, unsigned long handle)
{
struct intc_desc_int *d = get_intc_desc(irq);
@@ -244,6 +377,8 @@ static inline void _intc_enable(unsigned int irq, unsigned long handle)
intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
[_INTC_FN(handle)], irq);
}
+
+ intc_balancing_enable(irq);
}
static void intc_enable(unsigned int irq)
@@ -254,10 +389,12 @@ static void intc_enable(unsigned int irq)
static void intc_disable(unsigned int irq)
{
struct intc_desc_int *d = get_intc_desc(irq);
- unsigned long handle = (unsigned long) get_irq_chip_data(irq);
+ unsigned long handle = (unsigned long)get_irq_chip_data(irq);
unsigned long addr;
unsigned int cpu;
+ intc_balancing_disable(irq);
+
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
#ifdef CONFIG_SMP
if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
@@ -336,8 +473,7 @@ static void intc_mask_ack(unsigned int irq)
intc_disable(irq);
- /* read register and write zero only to the assocaited bit */
-
+ /* read register and write zero only to the associated bit */
if (handle) {
addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
switch (_INTC_FN(handle)) {
@@ -366,7 +502,8 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
{
int i;
- /* this doesn't scale well, but...
+ /*
+ * this doesn't scale well, but...
*
* this function should only be used for cerain uncommon
* operations such as intc_set_priority() and intc_set_sense()
@@ -377,7 +514,6 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
* memory footprint down is to make sure the array is sorted
* and then perform a bisect to lookup the irq.
*/
-
for (i = 0; i < nr_hp; i++) {
if ((hp + i)->irq != irq)
continue;
@@ -408,7 +544,6 @@ int intc_set_priority(unsigned int irq, unsigned int prio)
* primary masking method is using intc_prio_level[irq]
* priority level will be set during next enable()
*/
-
if (_INTC_FN(ihp->handle) != REG_FN_ERR)
_intc_enable(irq, ihp->handle);
}
@@ -447,20 +582,6 @@ static int intc_set_sense(unsigned int irq, unsigned int type)
return 0;
}
-static unsigned int __init intc_get_reg(struct intc_desc_int *d,
- unsigned long address)
-{
- unsigned int k;
-
- for (k = 0; k < d->nr_reg; k++) {
- if (d->reg[k] == address)
- return k;
- }
-
- BUG();
- return 0;
-}
-
static intc_enum __init intc_grp_id(struct intc_desc *desc,
intc_enum enum_id)
{
@@ -718,13 +839,14 @@ static void __init intc_register_irq(struct intc_desc *desc,
*/
set_bit(irq, intc_irq_map);
- /* Prefer single interrupt source bitmap over other combinations:
+ /*
+ * Prefer single interrupt source bitmap over other combinations:
+ *
* 1. bitmap, single interrupt source
* 2. priority, single interrupt source
* 3. bitmap, multiple interrupt sources (groups)
* 4. priority, multiple interrupt sources (groups)
*/
-
data[0] = intc_mask_data(desc, d, enum_id, 0);
data[1] = intc_prio_data(desc, d, enum_id, 0);
@@ -749,10 +871,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
handle_level_irq, "level");
set_irq_chip_data(irq, (void *)data[primary]);
- /* set priority level
+ /*
+ * set priority level
* - this needs to be at least 2 for 5-bit priorities on 7780
*/
- intc_prio_level[irq] = 2;
+ intc_prio_level[irq] = default_prio_level;
/* enable secondary masking method if present */
if (data[!primary])
@@ -769,7 +892,6 @@ static void __init intc_register_irq(struct intc_desc *desc,
* only secondary priority should access registers, so
* set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
*/
-
hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
}
@@ -790,6 +912,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
if (desc->hw.ack_regs)
ack_handle[irq] = intc_ack_data(desc, d, enum_id);
+#ifdef CONFIG_INTC_BALANCING
+ if (desc->hw.mask_regs)
+ dist_handle[irq] = intc_dist_data(desc, d, enum_id);
+#endif
+
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
#endif
@@ -801,6 +928,8 @@ static unsigned int __init save_reg(struct intc_desc_int *d,
unsigned int smp)
{
if (value) {
+ value = intc_phys_to_virt(d, value);
+
d->reg[cnt] = value;
#ifdef CONFIG_SMP
d->smp[cnt] = smp;
@@ -816,25 +945,59 @@ static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
generic_handle_irq((unsigned int)get_irq_data(irq));
}
-void __init register_intc_controller(struct intc_desc *desc)
+int __init register_intc_controller(struct intc_desc *desc)
{
unsigned int i, k, smp;
struct intc_hw_desc *hw = &desc->hw;
struct intc_desc_int *d;
+ struct resource *res;
+
+ pr_info("intc: Registered controller '%s' with %u IRQs\n",
+ desc->name, hw->nr_vectors);
d = kzalloc(sizeof(*d), GFP_NOWAIT);
+ if (!d)
+ goto err0;
INIT_LIST_HEAD(&d->list);
list_add(&d->list, &intc_list);
+ if (desc->num_resources) {
+ d->nr_windows = desc->num_resources;
+ d->window = kzalloc(d->nr_windows * sizeof(*d->window),
+ GFP_NOWAIT);
+ if (!d->window)
+ goto err1;
+
+ for (k = 0; k < d->nr_windows; k++) {
+ res = desc->resource + k;
+ WARN_ON(resource_type(res) != IORESOURCE_MEM);
+ d->window[k].phys = res->start;
+ d->window[k].size = resource_size(res);
+ d->window[k].virt = ioremap_nocache(res->start,
+ resource_size(res));
+ if (!d->window[k].virt)
+ goto err2;
+ }
+ }
+
d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
+#ifdef CONFIG_INTC_BALANCING
+ if (d->nr_reg)
+ d->nr_reg += hw->nr_mask_regs;
+#endif
d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
+ if (!d->reg)
+ goto err2;
+
#ifdef CONFIG_SMP
d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
+ if (!d->smp)
+ goto err3;
#endif
k = 0;
@@ -843,12 +1006,17 @@ void __init register_intc_controller(struct intc_desc *desc)
smp = IS_SMP(hw->mask_regs[i]);
k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
+#ifdef CONFIG_INTC_BALANCING
+ k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
+#endif
}
}
if (hw->prio_regs) {
d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
GFP_NOWAIT);
+ if (!d->prio)
+ goto err4;
for (i = 0; i < hw->nr_prio_regs; i++) {
smp = IS_SMP(hw->prio_regs[i]);
@@ -860,6 +1028,8 @@ void __init register_intc_controller(struct intc_desc *desc)
if (hw->sense_regs) {
d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
GFP_NOWAIT);
+ if (!d->sense)
+ goto err5;
for (i = 0; i < hw->nr_sense_regs; i++)
k += save_reg(d, k, hw->sense_regs[i].reg, 0);
@@ -906,7 +1076,7 @@ void __init register_intc_controller(struct intc_desc *desc)
irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
if (unlikely(!irq_desc)) {
- pr_info("can't get irq_desc for %d\n", irq);
+ pr_err("can't get irq_desc for %d\n", irq);
continue;
}
@@ -926,7 +1096,7 @@ void __init register_intc_controller(struct intc_desc *desc)
*/
irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
if (unlikely(!irq_desc)) {
- pr_info("can't get irq_desc for %d\n", irq2);
+ pr_err("can't get irq_desc for %d\n", irq2);
continue;
}
@@ -942,8 +1112,100 @@ void __init register_intc_controller(struct intc_desc *desc)
/* enable bits matching force_enable after registering irqs */
if (desc->force_enable)
intc_enable_disable_enum(desc, d, desc->force_enable, 1);
+
+ return 0;
+err5:
+ kfree(d->prio);
+err4:
+#ifdef CONFIG_SMP
+ kfree(d->smp);
+err3:
+#endif
+ kfree(d->reg);
+err2:
+ for (k = 0; k < d->nr_windows; k++)
+ if (d->window[k].virt)
+ iounmap(d->window[k].virt);
+
+ kfree(d->window);
+err1:
+ kfree(d);
+err0:
+ pr_err("unable to allocate INTC memory\n");
+
+ return -ENOMEM;
+}
+
+#ifdef CONFIG_INTC_USERIMASK
+static void __iomem *uimask;
+
+int register_intc_userimask(unsigned long addr)
+{
+ if (unlikely(uimask))
+ return -EBUSY;
+
+ uimask = ioremap_nocache(addr, SZ_4K);
+ if (unlikely(!uimask))
+ return -ENOMEM;
+
+ pr_info("intc: userimask support registered for levels 0 -> %d\n",
+ default_prio_level - 1);
+
+ return 0;
+}
+
+static ssize_t
+show_intc_userimask(struct sysdev_class *cls,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
+}
+
+static ssize_t
+store_intc_userimask(struct sysdev_class *cls,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long level;
+
+ level = simple_strtoul(buf, NULL, 10);
+
+ /*
+ * Minimal acceptable IRQ levels are in the 2 - 16 range, but
+ * these are chomped so as to not interfere with normal IRQs.
+ *
+ * Level 1 is a special case on some CPUs in that it's not
+ * directly settable, but given that USERIMASK cuts off below a
+ * certain level, we don't care about this limitation here.
+ * Level 0 on the other hand equates to user masking disabled.
+ *
+ * We use default_prio_level as a cut off so that only special
+ * case opt-in IRQs can be mangled.
+ */
+ if (level >= default_prio_level)
+ return -EINVAL;
+
+ __raw_writel(0xa5 << 24 | level << 4, uimask);
+
+ return count;
}
+static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
+ show_intc_userimask, store_intc_userimask);
+#endif
+
+static ssize_t
+show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
+{
+ struct intc_desc_int *d;
+
+ d = container_of(dev, struct intc_desc_int, sysdev);
+
+ return sprintf(buf, "%s\n", d->chip.name);
+}
+
+static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
+
static int intc_suspend(struct sys_device *dev, pm_message_t state)
{
struct intc_desc_int *d;
@@ -1003,19 +1265,28 @@ static int __init register_intc_sysdevs(void)
int id = 0;
error = sysdev_class_register(&intc_sysdev_class);
+#ifdef CONFIG_INTC_USERIMASK
+ if (!error && uimask)
+ error = sysdev_class_create_file(&intc_sysdev_class,
+ &attr_userimask);
+#endif
if (!error) {
list_for_each_entry(d, &intc_list, list) {
d->sysdev.id = id;
d->sysdev.cls = &intc_sysdev_class;
error = sysdev_register(&d->sysdev);
+ if (error == 0)
+ error = sysdev_create_file(&d->sysdev,
+ &attr_name);
if (error)
break;
+
id++;
}
}
if (error)
- pr_warning("intc: sysdev registration error\n");
+ pr_err("intc: sysdev registration error\n");
return error;
}
@@ -1048,7 +1319,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
desc = irq_to_desc_alloc_node(new, node);
if (unlikely(!desc)) {
- pr_info("can't get irq_desc for %d\n", new);
+ pr_err("can't get irq_desc for %d\n", new);
goto out_unlock;
}
OpenPOWER on IntegriCloud