diff options
Diffstat (limited to 'drivers/soc/fsl')
27 files changed, 837 insertions, 659 deletions
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig index f9ad8ad54a7d..4df32bc4c7a6 100644 --- a/drivers/soc/fsl/Kconfig +++ b/drivers/soc/fsl/Kconfig @@ -40,4 +40,14 @@ config DPAA2_CONSOLE /dev/dpaa2_mc_console and /dev/dpaa2_aiop_console, which can be used to dump the Management Complex and AIOP firmware logs. + +config FSL_RCPM + bool "Freescale RCPM support" + depends on PM_SLEEP && (ARM || ARM64) + help + The NXP QorIQ Processors based on ARM Core have RCPM module + (Run Control and Power Management), which performs all device-level + tasks associated with power management, such as wakeup source control. + Note that currently this driver will not support PowerPC based + QorIQ processor. endmenu diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile index 71dee8d0d1f0..906f1cd8af01 100644 --- a/drivers/soc/fsl/Makefile +++ b/drivers/soc/fsl/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_FSL_DPAA) += qbman/ obj-$(CONFIG_QUICC_ENGINE) += qe/ obj-$(CONFIG_CPM) += qe/ +obj-$(CONFIG_FSL_RCPM) += rcpm.o obj-$(CONFIG_FSL_GUTS) += guts.o obj-$(CONFIG_FSL_MC_DPIO) += dpio/ obj-$(CONFIG_DPAA2_CONSOLE) += dpaa2-console.o diff --git a/drivers/soc/fsl/dpaa2-console.c b/drivers/soc/fsl/dpaa2-console.c index 9168d8ddc932..27243f706f37 100644 --- a/drivers/soc/fsl/dpaa2-console.c +++ b/drivers/soc/fsl/dpaa2-console.c @@ -73,7 +73,7 @@ static u64 get_mc_fw_base_address(void) mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr)); if (!mcfbaregs) { - pr_err("could not map MC Firmaware Base registers\n"); + pr_err("could not map MC Firmware Base registers\n"); return 0; } diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c index b9539ef2c3cd..518a8e081b49 100644 --- a/drivers/soc/fsl/dpio/dpio-service.c +++ b/drivers/soc/fsl/dpio/dpio-service.c @@ -305,8 +305,6 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service, list_del(&ctx->node); spin_unlock_irqrestore(&d->lock_notifications, irqflags); - if (dev) - device_link_remove(dev, d->dev); } EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister); diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index 1ef8068c8dd3..34810f9bb2ee 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c @@ -102,6 +102,11 @@ static const struct fsl_soc_die_attr fsl_soc_die[] = { .svr = 0x87360000, .mask = 0xff3f0000, }, + /* Die: LS1028A, SoC: LS1028A */ + { .die = "LS1028A", + .svr = 0x870b0000, + .mask = 0xff3f0000, + }, { }, }; @@ -224,6 +229,7 @@ static const struct of_device_id fsl_guts_of_match[] = { { .compatible = "fsl,ls1012a-dcfg", }, { .compatible = "fsl,ls1046a-dcfg", }, { .compatible = "fsl,lx2160a-dcfg", }, + { .compatible = "fsl,ls1028a-dcfg", }, {} }; MODULE_DEVICE_TABLE(of, fsl_guts_of_match); diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c index f84ab596bde8..f4fb527d8301 100644 --- a/drivers/soc/fsl/qbman/bman.c +++ b/drivers/soc/fsl/qbman/bman.c @@ -635,30 +635,31 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits) return 0; } -static int bm_shutdown_pool(u32 bpid) +int bm_shutdown_pool(u32 bpid) { + int err = 0; struct bm_mc_command *bm_cmd; union bm_mc_result *bm_res; + + struct bman_portal *p = get_affine_portal(); while (1) { - struct bman_portal *p = get_affine_portal(); /* Acquire buffers until empty */ bm_cmd = bm_mc_start(&p->p); bm_cmd->bpid = bpid; bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1); if (!bm_mc_result_timeout(&p->p, &bm_res)) { - put_affine_portal(); pr_crit("BMan Acquire Command timedout\n"); - return -ETIMEDOUT; + err = -ETIMEDOUT; + goto done; } if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { - put_affine_portal(); /* Pool is empty */ - return 0; + goto done; } - put_affine_portal(); } - +done: + put_affine_portal(); return 0; } diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c index 7c3cc968053c..cb24a08be084 100644 --- a/drivers/soc/fsl/qbman/bman_ccsr.c +++ b/drivers/soc/fsl/qbman/bman_ccsr.c @@ -97,17 +97,40 @@ static void bm_get_version(u16 *id, u8 *major, u8 *minor) /* signal transactions for FBPRs with higher priority */ #define FBPR_AR_RPRIO_HI BIT(30) -static void bm_set_memory(u64 ba, u32 size) +/* Track if probe has occurred and if cleanup is required */ +static int __bman_probed; +static int __bman_requires_cleanup; + + +static int bm_set_memory(u64 ba, u32 size) { + u32 bar, bare; u32 exp = ilog2(size); /* choke if size isn't within range */ DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 && is_power_of_2(size)); /* choke if '[e]ba' has lower-alignment than 'size' */ DPAA_ASSERT(!(ba & (size - 1))); + + /* Check to see if BMan has already been initialized */ + bar = bm_ccsr_in(REG_FBPR_BAR); + if (bar) { + /* Maker sure ba == what was programmed) */ + bare = bm_ccsr_in(REG_FBPR_BARE); + if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) { + pr_err("Attempted to reinitialize BMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n", + ba, bare, bar); + return -ENOMEM; + } + pr_info("BMan BAR already configured\n"); + __bman_requires_cleanup = 1; + return 1; + } + bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba)); bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba)); bm_ccsr_out(REG_FBPR_AR, exp - 1); + return 0; } /* @@ -120,7 +143,6 @@ static void bm_set_memory(u64 ba, u32 size) */ static dma_addr_t fbpr_a; static size_t fbpr_sz; -static int __bman_probed; static int bman_fbpr(struct reserved_mem *rmem) { @@ -173,6 +195,16 @@ int bman_is_probed(void) } EXPORT_SYMBOL_GPL(bman_is_probed); +int bman_requires_cleanup(void) +{ + return __bman_requires_cleanup; +} + +void bman_done_cleanup(void) +{ + __bman_requires_cleanup = 0; +} + static int fsl_bman_probe(struct platform_device *pdev) { int ret, err_irq; diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c index cf4f10d6f590..923c44063a9a 100644 --- a/drivers/soc/fsl/qbman/bman_portal.c +++ b/drivers/soc/fsl/qbman/bman_portal.c @@ -100,7 +100,7 @@ static int bman_portal_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; struct bm_portal_config *pcfg; struct resource *addr_phys[2]; - int irq, cpu, err; + int irq, cpu, err, i; err = bman_is_probed(); if (!err) @@ -135,10 +135,8 @@ static int bman_portal_probe(struct platform_device *pdev) pcfg->cpu = -1; irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - dev_err(dev, "Can't get %pOF IRQ'\n", node); + if (irq <= 0) goto err_ioremap1; - } pcfg->irq = irq; pcfg->addr_virt_ce = memremap(addr_phys[0]->start, @@ -178,6 +176,22 @@ static int bman_portal_probe(struct platform_device *pdev) if (!cpu_online(cpu)) bman_offline_cpu(cpu); + if (__bman_portals_probed == 1 && bman_requires_cleanup()) { + /* + * BMan wasn't reset prior to boot (Kexec for example) + * Empty all the buffer pools so they are in reset state + */ + for (i = 0; i < BM_POOL_MAX; i++) { + err = bm_shutdown_pool(i); + if (err) { + dev_err(dev, "Failed to shutdown bpool %d\n", + i); + goto err_portal_init; + } + } + bman_done_cleanup(); + } + return 0; err_portal_init: diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h index 751ce90383b7..aa3981e04965 100644 --- a/drivers/soc/fsl/qbman/bman_priv.h +++ b/drivers/soc/fsl/qbman/bman_priv.h @@ -76,3 +76,8 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits); const struct bm_portal_config * bman_get_bm_portal_config(const struct bman_portal *portal); + +int bman_requires_cleanup(void); +void bman_done_cleanup(void); + +int bm_shutdown_pool(u32 bpid); diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c index e6d48dccb8d5..9dd8bb571dbc 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.c +++ b/drivers/soc/fsl/qbman/dpaa_sys.c @@ -37,42 +37,53 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, size_t *size) { - int ret; struct device_node *mem_node; - u64 size64; + struct reserved_mem *rmem; + struct property *prop; + int len, err; + __be32 *res_array; - ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx); - if (ret) { - dev_err(dev, - "of_reserved_mem_device_init_by_idx(%d) failed 0x%x\n", - idx, ret); - return -ENODEV; - } - mem_node = of_parse_phandle(dev->of_node, "memory-region", 0); - if (mem_node) { - ret = of_property_read_u64(mem_node, "size", &size64); - if (ret) { - dev_err(dev, "of_address_to_resource fails 0x%x\n", - ret); - return -ENODEV; - } - *size = size64; - } else { + mem_node = of_parse_phandle(dev->of_node, "memory-region", idx); + if (!mem_node) { dev_err(dev, "No memory-region found for index %d\n", idx); return -ENODEV; } - if (!dma_alloc_coherent(dev, *size, addr, 0)) { - dev_err(dev, "DMA Alloc memory failed\n"); + rmem = of_reserved_mem_lookup(mem_node); + if (!rmem) { + dev_err(dev, "of_reserved_mem_lookup() returned NULL\n"); return -ENODEV; } + *addr = rmem->base; + *size = rmem->size; /* - * Disassociate the reserved memory area from the device - * because a device can only have one DMA memory area. This - * should be fine since the memory is allocated and initialized - * and only ever accessed by the QBMan device from now on + * Check if the reg property exists - if not insert the node + * so upon kexec() the same memory region address will be preserved. + * This is needed because QBMan HW does not allow the base address/ + * size to be modified once set. */ - of_reserved_mem_device_release(dev); + prop = of_find_property(mem_node, "reg", &len); + if (!prop) { + prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL); + if (!prop) + return -ENOMEM; + prop->value = res_array = devm_kzalloc(dev, sizeof(__be32) * 4, + GFP_KERNEL); + if (!prop->value) + return -ENOMEM; + res_array[0] = cpu_to_be32(upper_32_bits(*addr)); + res_array[1] = cpu_to_be32(lower_32_bits(*addr)); + res_array[2] = cpu_to_be32(upper_32_bits(*size)); + res_array[3] = cpu_to_be32(lower_32_bits(*size)); + prop->length = sizeof(__be32) * 4; + prop->name = devm_kstrdup(dev, "reg", GFP_KERNEL); + if (!prop->name) + return -ENOMEM; + err = of_add_property(mem_node, prop); + if (err) + return err; + } + return 0; } diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 636f83f781f5..1e164e03410a 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -1018,6 +1018,20 @@ static inline void put_affine_portal(void) put_cpu_var(qman_affine_portal); } + +static inline struct qman_portal *get_portal_for_channel(u16 channel) +{ + int i; + + for (i = 0; i < num_possible_cpus(); i++) { + if (affine_portals[i] && + affine_portals[i]->config->channel == channel) + return affine_portals[i]; + } + + return NULL; +} + static struct workqueue_struct *qm_portal_wq; int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh) @@ -1070,6 +1084,20 @@ int qman_wq_alloc(void) return 0; } + +void qman_enable_irqs(void) +{ + int i; + + for (i = 0; i < num_possible_cpus(); i++) { + if (affine_portals[i]) { + qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff); + qm_out(&affine_portals[i]->p, QM_REG_IIR, 0); + } + + } +} + /* * This is what everything can wait on, even if it migrates to a different cpu * to the one whose affine portal it is waiting on. @@ -1164,6 +1192,7 @@ static int drain_mr_fqrni(struct qm_portal *p) { const union qm_mr_entry *msg; loop: + qm_mr_pvb_update(p); msg = qm_mr_current(p); if (!msg) { /* @@ -1180,7 +1209,8 @@ loop: * entries well before the ring has been fully consumed, so * we're being *really* paranoid here. */ - msleep(1); + mdelay(1); + qm_mr_pvb_update(p); msg = qm_mr_current(p); if (!msg) return 0; @@ -1267,8 +1297,8 @@ static int qman_create_portal(struct qman_portal *portal, qm_out(p, QM_REG_ISDR, isdr); portal->irq_sources = 0; qm_out(p, QM_REG_IER, 0); - qm_out(p, QM_REG_ISR, 0xffffffff); snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); + qm_out(p, QM_REG_IIR, 1); if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { dev_err(c->dev, "request_irq() failed\n"); goto fail_irq; @@ -1288,7 +1318,7 @@ static int qman_create_portal(struct qman_portal *portal, isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); qm_out(p, QM_REG_ISDR, isdr); if (qm_dqrr_current(p)) { - dev_err(c->dev, "DQRR unclean\n"); + dev_dbg(c->dev, "DQRR unclean\n"); qm_dqrr_cdc_consume_n(p, 0xffff); } if (qm_mr_current(p) && drain_mr_fqrni(p)) { @@ -1301,8 +1331,10 @@ static int qman_create_portal(struct qman_portal *portal, } /* Success */ portal->config = c; + qm_out(p, QM_REG_ISR, 0xffffffff); qm_out(p, QM_REG_ISDR, 0); - qm_out(p, QM_REG_IIR, 0); + if (!qman_requires_cleanup()) + qm_out(p, QM_REG_IIR, 0); /* Write a sane SDQCR */ qm_dqrr_sdqcr_set(p, portal->sdqcr); return 0; @@ -1717,6 +1749,13 @@ struct qman_portal *qman_get_affine_portal(int cpu) } EXPORT_SYMBOL(qman_get_affine_portal); +int qman_start_using_portal(struct qman_portal *p, struct device *dev) +{ + return (!device_link_add(dev, p->config->dev, + DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0; +} +EXPORT_SYMBOL(qman_start_using_portal); + int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) { return __poll_portal_fast(p, limit); @@ -2581,9 +2620,9 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, #define qm_dqrr_drain_nomatch(p) \ _qm_dqrr_consume_and_match(p, 0, 0, false) -static int qman_shutdown_fq(u32 fqid) +int qman_shutdown_fq(u32 fqid) { - struct qman_portal *p; + struct qman_portal *p, *channel_portal; struct device *dev; union qm_mc_command *mcc; union qm_mc_result *mcr; @@ -2623,17 +2662,28 @@ static int qman_shutdown_fq(u32 fqid) channel = qm_fqd_get_chan(&mcr->queryfq.fqd); wq = qm_fqd_get_wq(&mcr->queryfq.fqd); + if (channel < qm_channel_pool1) { + channel_portal = get_portal_for_channel(channel); + if (channel_portal == NULL) { + dev_err(dev, "Can't find portal for dedicated channel 0x%x\n", + channel); + ret = -EIO; + goto out; + } + } else + channel_portal = p; + switch (state) { case QM_MCR_NP_STATE_TEN_SCHED: case QM_MCR_NP_STATE_TRU_SCHED: case QM_MCR_NP_STATE_ACTIVE: case QM_MCR_NP_STATE_PARKED: orl_empty = 0; - mcc = qm_mc_start(&p->p); + mcc = qm_mc_start(&channel_portal->p); qm_fqid_set(&mcc->fq, fqid); - qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); - if (!qm_mc_result_timeout(&p->p, &mcr)) { - dev_err(dev, "QUERYFQ_NP timeout\n"); + qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE); + if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) { + dev_err(dev, "ALTER_RETIRE timeout\n"); ret = -ETIMEDOUT; goto out; } @@ -2641,6 +2691,9 @@ static int qman_shutdown_fq(u32 fqid) QM_MCR_VERB_ALTER_RETIRE); res = mcr->result; /* Make a copy as we reuse MCR below */ + if (res == QM_MCR_RESULT_OK) + drain_mr_fqrni(&channel_portal->p); + if (res == QM_MCR_RESULT_PENDING) { /* * Need to wait for the FQRN in the message ring, which @@ -2670,21 +2723,25 @@ static int qman_shutdown_fq(u32 fqid) } /* Set the sdqcr to drain this channel */ if (channel < qm_channel_pool1) - qm_dqrr_sdqcr_set(&p->p, + qm_dqrr_sdqcr_set(&channel_portal->p, QM_SDQCR_TYPE_ACTIVE | QM_SDQCR_CHANNELS_DEDICATED); else - qm_dqrr_sdqcr_set(&p->p, + qm_dqrr_sdqcr_set(&channel_portal->p, QM_SDQCR_TYPE_ACTIVE | QM_SDQCR_CHANNELS_POOL_CONV (channel)); do { /* Keep draining DQRR while checking the MR*/ - qm_dqrr_drain_nomatch(&p->p); + qm_dqrr_drain_nomatch(&channel_portal->p); /* Process message ring too */ - found_fqrn = qm_mr_drain(&p->p, FQRN); + found_fqrn = qm_mr_drain(&channel_portal->p, + FQRN); cpu_relax(); } while (!found_fqrn); + /* Restore SDQCR */ + qm_dqrr_sdqcr_set(&channel_portal->p, + channel_portal->sdqcr); } if (res != QM_MCR_RESULT_OK && @@ -2715,9 +2772,8 @@ static int qman_shutdown_fq(u32 fqid) * Wait for a dequeue and process the dequeues, * making sure to empty the ring completely */ - } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); + } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); } - qm_dqrr_sdqcr_set(&p->p, 0); while (!orl_empty) { /* Wait for the ORL to have been completely drained */ @@ -2754,7 +2810,7 @@ static int qman_shutdown_fq(u32 fqid) DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); - if (mcr->result) { + if (mcr->result != QM_MCR_RESULT_OK) { dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n", fqid, mcr->result); ret = -EIO; diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index a6bb43007d03..157659fd033a 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c @@ -274,6 +274,7 @@ static u32 __iomem *qm_ccsr_start; /* A SDQCR mask comprising all the available/visible pool channels */ static u32 qm_pools_sdqcr; static int __qman_probed; +static int __qman_requires_cleanup; static inline u32 qm_ccsr_in(u32 offset) { @@ -340,19 +341,55 @@ static void qm_get_version(u16 *id, u8 *major, u8 *minor) } #define PFDR_AR_EN BIT(31) -static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size) +static int qm_set_memory(enum qm_memory memory, u64 ba, u32 size) { + void *ptr; u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; u32 exp = ilog2(size); + u32 bar, bare; /* choke if size isn't within range */ DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) && is_power_of_2(size)); /* choke if 'ba' has lower-alignment than 'size' */ DPAA_ASSERT(!(ba & (size - 1))); + + /* Check to see if QMan has already been initialized */ + bar = qm_ccsr_in(offset + REG_offset_BAR); + if (bar) { + /* Maker sure ba == what was programmed) */ + bare = qm_ccsr_in(offset); + if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) { + pr_err("Attempted to reinitialize QMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n", + ba, bare, bar); + return -ENOMEM; + } + __qman_requires_cleanup = 1; + /* Return 1 to indicate memory was previously programmed */ + return 1; + } + /* Need to temporarily map the area to make sure it is zeroed */ + ptr = memremap(ba, size, MEMREMAP_WB); + if (!ptr) { + pr_crit("memremap() of QMan private memory failed\n"); + return -ENOMEM; + } + memset(ptr, 0, size); + +#ifdef CONFIG_PPC + /* + * PPC doesn't appear to flush the cache on memunmap() but the + * cache must be flushed since QMan does non coherent accesses + * to this memory + */ + flush_dcache_range((unsigned long) ptr, (unsigned long) ptr+size); +#endif + memunmap(ptr); + qm_ccsr_out(offset, upper_32_bits(ba)); qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba)); qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1)); + return 0; } static void qm_set_pfdr_threshold(u32 th, u8 k) @@ -455,7 +492,7 @@ RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr); #endif -static unsigned int qm_get_fqid_maxcnt(void) +unsigned int qm_get_fqid_maxcnt(void) { return fqd_sz / 64; } @@ -571,12 +608,19 @@ static int qman_init_ccsr(struct device *dev) int i, err; /* FQD memory */ - qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz); + err = qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz); + if (err < 0) + return err; /* PFDR memory */ - qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz); - err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8); - if (err) + err = qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz); + if (err < 0) return err; + /* Only initialize PFDRs if the QMan was not initialized before */ + if (err == 0) { + err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8); + if (err) + return err; + } /* thresholds */ qm_set_pfdr_threshold(512, 64); qm_set_sfdr_threshold(128); @@ -693,6 +737,18 @@ int qman_is_probed(void) } EXPORT_SYMBOL_GPL(qman_is_probed); +int qman_requires_cleanup(void) +{ + return __qman_requires_cleanup; +} + +void qman_done_cleanup(void) +{ + qman_enable_irqs(); + __qman_requires_cleanup = 0; +} + + static int fsl_qman_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index e2186b681d87..5685b6706893 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -233,7 +233,7 @@ static int qman_portal_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; struct qm_portal_config *pcfg; struct resource *addr_phys[2]; - int irq, cpu, err; + int irq, cpu, err, i; u32 val; err = qman_is_probed(); @@ -275,10 +275,8 @@ static int qman_portal_probe(struct platform_device *pdev) pcfg->channel = val; pcfg->cpu = -1; irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - dev_err(dev, "Can't get %pOF IRQ\n", node); + if (irq <= 0) goto err_ioremap1; - } pcfg->irq = irq; pcfg->addr_virt_ce = memremap(addr_phys[0]->start, @@ -325,6 +323,22 @@ static int qman_portal_probe(struct platform_device *pdev) if (!cpu_online(cpu)) qman_offline_cpu(cpu); + if (__qman_portals_probed == 1 && qman_requires_cleanup()) { + /* + * QMan wasn't reset prior to boot (Kexec for example) + * Empty all the frame queues so they are in reset state + */ + for (i = 0; i < qm_get_fqid_maxcnt(); i++) { + err = qman_shutdown_fq(i); + if (err) { + dev_err(dev, "Failed to shutdown frame queue %d\n", + i); + goto err_portal_init; + } + } + qman_done_cleanup(); + } + return 0; err_portal_init: diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 04515718cfd9..fd1cf543fb81 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h @@ -272,3 +272,11 @@ extern struct qman_portal *affine_portals[NR_CPUS]; extern struct qman_portal *qman_dma_portal; const struct qm_portal_config *qman_get_qm_portal_config( struct qman_portal *portal); + +unsigned int qm_get_fqid_maxcnt(void); + +int qman_shutdown_fq(u32 fqid); + +int qman_requires_cleanup(void); +void qman_done_cleanup(void); +void qman_enable_irqs(void); diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig index cfa4b2939992..357c5800b112 100644 --- a/drivers/soc/fsl/qe/Kconfig +++ b/drivers/soc/fsl/qe/Kconfig @@ -5,7 +5,8 @@ config QUICC_ENGINE bool "QUICC Engine (QE) framework support" - depends on FSL_SOC && PPC32 + depends on OF && HAS_IOMEM + depends on PPC || ARM || ARM64 || COMPILE_TEST select GENERIC_ALLOCATOR select CRC32 help diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c index f0c29ed8f0ff..ed75198ed254 100644 --- a/drivers/soc/fsl/qe/gpio.c +++ b/drivers/soc/fsl/qe/gpio.c @@ -41,13 +41,13 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) container_of(mm_gc, struct qe_gpio_chip, mm_gc); struct qe_pio_regs __iomem *regs = mm_gc->regs; - qe_gc->cpdata = in_be32(®s->cpdata); + qe_gc->cpdata = qe_ioread32be(®s->cpdata); qe_gc->saved_regs.cpdata = qe_gc->cpdata; - qe_gc->saved_regs.cpdir1 = in_be32(®s->cpdir1); - qe_gc->saved_regs.cpdir2 = in_be32(®s->cpdir2); - qe_gc->saved_regs.cppar1 = in_be32(®s->cppar1); - qe_gc->saved_regs.cppar2 = in_be32(®s->cppar2); - qe_gc->saved_regs.cpodr = in_be32(®s->cpodr); + qe_gc->saved_regs.cpdir1 = qe_ioread32be(®s->cpdir1); + qe_gc->saved_regs.cpdir2 = qe_ioread32be(®s->cpdir2); + qe_gc->saved_regs.cppar1 = qe_ioread32be(®s->cppar1); + qe_gc->saved_regs.cppar2 = qe_ioread32be(®s->cppar2); + qe_gc->saved_regs.cpodr = qe_ioread32be(®s->cpodr); } static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) @@ -56,7 +56,7 @@ static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) struct qe_pio_regs __iomem *regs = mm_gc->regs; u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); - return !!(in_be32(®s->cpdata) & pin_mask); + return !!(qe_ioread32be(®s->cpdata) & pin_mask); } static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) @@ -74,7 +74,7 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) else qe_gc->cpdata &= ~pin_mask; - out_be32(®s->cpdata, qe_gc->cpdata); + qe_iowrite32be(qe_gc->cpdata, ®s->cpdata); spin_unlock_irqrestore(&qe_gc->lock, flags); } @@ -101,7 +101,7 @@ static void qe_gpio_set_multiple(struct gpio_chip *gc, } } - out_be32(®s->cpdata, qe_gc->cpdata); + qe_iowrite32be(qe_gc->cpdata, ®s->cpdata); spin_unlock_irqrestore(&qe_gc->lock, flags); } @@ -160,7 +160,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index) { struct qe_pin *qe_pin; struct gpio_chip *gc; - struct of_mm_gpio_chip *mm_gc; struct qe_gpio_chip *qe_gc; int err; unsigned long flags; @@ -186,7 +185,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index) goto err0; } - mm_gc = to_of_mm_gpio_chip(gc); qe_gc = gpiochip_get_data(gc); spin_lock_irqsave(&qe_gc->lock, flags); @@ -255,11 +253,15 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin) spin_lock_irqsave(&qe_gc->lock, flags); if (second_reg) { - clrsetbits_be32(®s->cpdir2, mask2, sregs->cpdir2 & mask2); - clrsetbits_be32(®s->cppar2, mask2, sregs->cppar2 & mask2); + qe_clrsetbits_be32(®s->cpdir2, mask2, + sregs->cpdir2 & mask2); + qe_clrsetbits_be32(®s->cppar2, mask2, + sregs->cppar2 & mask2); } else { - clrsetbits_be32(®s->cpdir1, mask2, sregs->cpdir1 & mask2); - clrsetbits_be32(®s->cppar1, mask2, sregs->cppar1 & mask2); + qe_clrsetbits_be32(®s->cpdir1, mask2, + sregs->cpdir1 & mask2); + qe_clrsetbits_be32(®s->cppar1, mask2, + sregs->cppar1 & mask2); } if (sregs->cpdata & mask1) @@ -267,8 +269,8 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin) else qe_gc->cpdata &= ~mask1; - out_be32(®s->cpdata, qe_gc->cpdata); - clrsetbits_be32(®s->cpodr, mask1, sregs->cpodr & mask1); + qe_iowrite32be(qe_gc->cpdata, ®s->cpdata); + qe_clrsetbits_be32(®s->cpodr, mask1, sregs->cpodr & mask1); spin_unlock_irqrestore(&qe_gc->lock, flags); } diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index c9519e62308c..96c2057d8d8e 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -10,6 +10,7 @@ * General Purpose functions for the global management of the * QUICC Engine (QE). */ +#include <linux/bitmap.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -21,16 +22,12 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/ioport.h> +#include <linux/iopoll.h> #include <linux/crc32.h> #include <linux/mod_devicetable.h> #include <linux/of_platform.h> -#include <asm/irq.h> -#include <asm/page.h> -#include <asm/pgtable.h> #include <soc/fsl/qe/immap_qe.h> #include <soc/fsl/qe/qe.h> -#include <asm/prom.h> -#include <asm/rheap.h> static void qe_snums_init(void); static int qe_sdma_init(void); @@ -39,29 +36,32 @@ static DEFINE_SPINLOCK(qe_lock); DEFINE_SPINLOCK(cmxgcr_lock); EXPORT_SYMBOL(cmxgcr_lock); -/* QE snum state */ -enum qe_snum_state { - QE_SNUM_STATE_USED, - QE_SNUM_STATE_FREE -}; - -/* QE snum */ -struct qe_snum { - u8 num; - enum qe_snum_state state; -}; - /* We allocate this here because it is used almost exclusively for * the communication processor devices. */ struct qe_immap __iomem *qe_immr; EXPORT_SYMBOL(qe_immr); -static struct qe_snum snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */ +static u8 snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */ +static DECLARE_BITMAP(snum_state, QE_NUM_OF_SNUM); static unsigned int qe_num_of_snum; static phys_addr_t qebase = -1; +static struct device_node *qe_get_device_node(void) +{ + struct device_node *qe; + + /* + * Newer device trees have an "fsl,qe" compatible property for the QE + * node, but we still need to support older device trees. + */ + qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); + if (qe) + return qe; + return of_find_node_by_type(NULL, "qe"); +} + static phys_addr_t get_qe_base(void) { struct device_node *qe; @@ -71,12 +71,9 @@ static phys_addr_t get_qe_base(void) if (qebase != -1) return qebase; - qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); - if (!qe) { - qe = of_find_node_by_type(NULL, "qe"); - if (!qe) - return qebase; - } + qe = qe_get_device_node(); + if (!qe) + return qebase; ret = of_address_to_resource(qe, 0, &res); if (!ret) @@ -107,11 +104,12 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) { unsigned long flags; u8 mcn_shift = 0, dev_shift = 0; - u32 ret; + u32 val; + int ret; spin_lock_irqsave(&qe_lock, flags); if (cmd == QE_RESET) { - out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG)); + qe_iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr); } else { if (cmd == QE_ASSIGN_PAGE) { /* Here device is the SNUM, not sub-block */ @@ -128,20 +126,18 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) mcn_shift = QE_CR_MCN_NORMAL_SHIFT; } - out_be32(&qe_immr->cp.cecdr, cmd_input); - out_be32(&qe_immr->cp.cecr, - (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32) - mcn_protocol << mcn_shift)); + qe_iowrite32be(cmd_input, &qe_immr->cp.cecdr); + qe_iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift), + &qe_immr->cp.cecr); } /* wait for the QE_CR_FLG to clear */ - ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0, - 100, 0); - /* On timeout (e.g. failure), the expression will be false (ret == 0), - otherwise it will be true (ret == 1). */ + ret = readx_poll_timeout_atomic(qe_ioread32be, &qe_immr->cp.cecr, val, + (val & QE_CR_FLG) == 0, 0, 100); + /* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */ spin_unlock_irqrestore(&qe_lock, flags); - return ret == 1; + return ret == 0; } EXPORT_SYMBOL(qe_issue_cmd); @@ -163,23 +159,18 @@ static unsigned int brg_clk = 0; unsigned int qe_get_brg_clk(void) { struct device_node *qe; - int size; - const u32 *prop; + u32 brg; unsigned int mod; if (brg_clk) return brg_clk; - qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); - if (!qe) { - qe = of_find_node_by_type(NULL, "qe"); - if (!qe) - return brg_clk; - } + qe = qe_get_device_node(); + if (!qe) + return brg_clk; - prop = of_get_property(qe, "brg-frequency", &size); - if (prop && size == sizeof(*prop)) - brg_clk = *prop; + if (!of_property_read_u32(qe, "brg-frequency", &brg)) + brg_clk = brg; of_node_put(qe); @@ -199,6 +190,14 @@ EXPORT_SYMBOL(qe_get_brg_clk); #define PVR_VER_836x 0x8083 #define PVR_VER_832x 0x8084 +static bool qe_general4_errata(void) +{ +#ifdef CONFIG_PPC32 + return pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x); +#endif + return false; +} + /* Program the BRG to the given sampling rate and multiplier * * @brg: the BRG, QE_BRG1 - QE_BRG16 @@ -225,14 +224,14 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier) /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says that the BRG divisor must be even if you're not using divide-by-16 mode. */ - if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x)) + if (qe_general4_errata()) if (!div16 && (divisor & 1) && (divisor > 3)) divisor++; tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE | div16; - out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval); + qe_iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]); return 0; } @@ -281,7 +280,6 @@ EXPORT_SYMBOL(qe_clock_source); */ static void qe_snums_init(void) { - int i; static const u8 snum_init_76[] = { 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D, 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89, @@ -302,19 +300,39 @@ static void qe_snums_init(void) 0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59, 0x68, 0x69, 0x78, 0x79, 0x80, 0x81, }; - static const u8 *snum_init; + struct device_node *qe; + const u8 *snum_init; + int i; - qe_num_of_snum = qe_get_num_of_snums(); + bitmap_zero(snum_state, QE_NUM_OF_SNUM); + qe_num_of_snum = 28; /* The default number of snum for threads is 28 */ + qe = qe_get_device_node(); + if (qe) { + i = of_property_read_variable_u8_array(qe, "fsl,qe-snums", + snums, 1, QE_NUM_OF_SNUM); + if (i > 0) { + of_node_put(qe); + qe_num_of_snum = i; + return; + } + /* + * Fall back to legacy binding of using the value of + * fsl,qe-num-snums to choose one of the static arrays + * above. + */ + of_property_read_u32(qe, "fsl,qe-num-snums", &qe_num_of_snum); + of_node_put(qe); + } - if (qe_num_of_snum == 76) + if (qe_num_of_snum == 76) { snum_init = snum_init_76; - else + } else if (qe_num_of_snum == 28 || qe_num_of_snum == 46) { snum_init = snum_init_46; - - for (i = 0; i < qe_num_of_snum; i++) { - snums[i].num = snum_init[i]; - snums[i].state = QE_SNUM_STATE_FREE; + } else { + pr_err("QE: unsupported value of fsl,qe-num-snums: %u\n", qe_num_of_snum); + return; } + memcpy(snums, snum_init, qe_num_of_snum); } int qe_get_snum(void) @@ -324,12 +342,10 @@ int qe_get_snum(void) int i; spin_lock_irqsave(&qe_lock, flags); - for (i = 0; i < qe_num_of_snum; i++) { - if (snums[i].state == QE_SNUM_STATE_FREE) { - snums[i].state = QE_SNUM_STATE_USED; - snum = snums[i].num; - break; - } + i = find_first_zero_bit(snum_state, qe_num_of_snum); + if (i < qe_num_of_snum) { + set_bit(i, snum_state); + snum = snums[i]; } spin_unlock_irqrestore(&qe_lock, flags); @@ -339,36 +355,30 @@ EXPORT_SYMBOL(qe_get_snum); void qe_put_snum(u8 snum) { - int i; + const u8 *p = memchr(snums, snum, qe_num_of_snum); - for (i = 0; i < qe_num_of_snum; i++) { - if (snums[i].num == snum) { - snums[i].state = QE_SNUM_STATE_FREE; - break; - } - } + if (p) + clear_bit(p - snums, snum_state); } EXPORT_SYMBOL(qe_put_snum); static int qe_sdma_init(void) { struct sdma __iomem *sdma = &qe_immr->sdma; - static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM; - - if (!sdma) - return -ENODEV; + static s32 sdma_buf_offset = -ENOMEM; /* allocate 2 internal temporary buffers (512 bytes size each) for * the SDMA */ - if (IS_ERR_VALUE(sdma_buf_offset)) { + if (sdma_buf_offset < 0) { sdma_buf_offset = qe_muram_alloc(512 * 2, 4096); - if (IS_ERR_VALUE(sdma_buf_offset)) + if (sdma_buf_offset < 0) return -ENOMEM; } - out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK); - out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | - (0x1 << QE_SDMR_CEN_SHIFT))); + qe_iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, + &sdma->sdebcr); + qe_iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)), + &sdma->sdmr); return 0; } @@ -406,14 +416,14 @@ static void qe_upload_microcode(const void *base, "uploading microcode '%s'\n", ucode->id); /* Use auto-increment */ - out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) | - QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR); + qe_iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR, + &qe_immr->iram.iadd); for (i = 0; i < be32_to_cpu(ucode->count); i++) - out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i])); + qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata); /* Set I-RAM Ready Register */ - out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY)); + qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready); } /* @@ -498,7 +508,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware) * If the microcode calls for it, split the I-RAM. */ if (!firmware->split) - setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR); + qe_setbits_be16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR); if (firmware->soc.model) printk(KERN_INFO @@ -532,11 +542,13 @@ int qe_upload_firmware(const struct qe_firmware *firmware) u32 trap = be32_to_cpu(ucode->traps[j]); if (trap) - out_be32(&qe_immr->rsp[i].tibcr[j], trap); + qe_iowrite32be(trap, + &qe_immr->rsp[i].tibcr[j]); } /* Enable traps */ - out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr)); + qe_iowrite32be(be32_to_cpu(ucode->eccr), + &qe_immr->rsp[i].eccr); } qe_firmware_uploaded = 1; @@ -554,11 +566,9 @@ EXPORT_SYMBOL(qe_upload_firmware); struct qe_firmware_info *qe_get_firmware_info(void) { static int initialized; - struct property *prop; struct device_node *qe; struct device_node *fw = NULL; const char *sprop; - unsigned int i; /* * If we haven't checked yet, and a driver hasn't uploaded a firmware @@ -572,16 +582,9 @@ struct qe_firmware_info *qe_get_firmware_info(void) initialized = 1; - /* - * Newer device trees have an "fsl,qe" compatible property for the QE - * node, but we still need to support older device trees. - */ - qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); - if (!qe) { - qe = of_find_node_by_type(NULL, "qe"); - if (!qe) - return NULL; - } + qe = qe_get_device_node(); + if (!qe) + return NULL; /* Find the 'firmware' child node */ fw = of_get_child_by_name(qe, "firmware"); @@ -599,20 +602,11 @@ struct qe_firmware_info *qe_get_firmware_info(void) strlcpy(qe_firmware_info.id, sprop, sizeof(qe_firmware_info.id)); - prop = of_find_property(fw, "extended-modes", NULL); - if (prop && (prop->length == sizeof(u64))) { - const u64 *iprop = prop->value; - - qe_firmware_info.extended_modes = *iprop; - } + of_property_read_u64(fw, "extended-modes", + &qe_firmware_info.extended_modes); - prop = of_find_property(fw, "virtual-traps", NULL); - if (prop && (prop->length == 32)) { - const u32 *iprop = prop->value; - - for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++) - qe_firmware_info.vtraps[i] = iprop[i]; - } + of_property_read_u32_array(fw, "virtual-traps", qe_firmware_info.vtraps, + ARRAY_SIZE(qe_firmware_info.vtraps)); of_node_put(fw); @@ -623,24 +617,13 @@ EXPORT_SYMBOL(qe_get_firmware_info); unsigned int qe_get_num_of_risc(void) { struct device_node *qe; - int size; unsigned int num_of_risc = 0; - const u32 *prop; - qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); - if (!qe) { - /* Older devices trees did not have an "fsl,qe" - * compatible property, so we need to look for - * the QE node by name. - */ - qe = of_find_node_by_type(NULL, "qe"); - if (!qe) - return num_of_risc; - } + qe = qe_get_device_node(); + if (!qe) + return num_of_risc; - prop = of_get_property(qe, "fsl,qe-num-riscs", &size); - if (prop && size == sizeof(*prop)) - num_of_risc = *prop; + of_property_read_u32(qe, "fsl,qe-num-riscs", &num_of_risc); of_node_put(qe); @@ -650,37 +633,7 @@ EXPORT_SYMBOL(qe_get_num_of_risc); unsigned int qe_get_num_of_snums(void) { - struct device_node *qe; - int size; - unsigned int num_of_snums; - const u32 *prop; - - num_of_snums = 28; /* The default number of snum for threads is 28 */ - qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); - if (!qe) { - /* Older devices trees did not have an "fsl,qe" - * compatible property, so we need to look for - * the QE node by name. - */ - qe = of_find_node_by_type(NULL, "qe"); - if (!qe) - return num_of_snums; - } - - prop = of_get_property(qe, "fsl,qe-num-snums", &size); - if (prop && size == sizeof(*prop)) { - num_of_snums = *prop; - if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) { - /* No QE ever has fewer than 28 SNUMs */ - pr_err("QE: number of snum is invalid\n"); - of_node_put(qe); - return -EINVAL; - } - } - - of_node_put(qe); - - return num_of_snums; + return qe_num_of_snum; } EXPORT_SYMBOL(qe_get_num_of_snums); diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c index 83e85e61669f..a81a1a79f1ca 100644 --- a/drivers/soc/fsl/qe/qe_common.c +++ b/drivers/soc/fsl/qe/qe_common.c @@ -32,7 +32,7 @@ static phys_addr_t muram_pbase; struct muram_block { struct list_head head; - unsigned long start; + s32 start; int size; }; @@ -110,34 +110,30 @@ out_muram: * @algo: algorithm for alloc. * @data: data for genalloc's algorithm. * - * This function returns an offset into the muram area. + * This function returns a non-negative offset into the muram area, or + * a negative errno on failure. */ -static unsigned long cpm_muram_alloc_common(unsigned long size, - genpool_algo_t algo, void *data) +static s32 cpm_muram_alloc_common(unsigned long size, + genpool_algo_t algo, void *data) { struct muram_block *entry; - unsigned long start; - - if (!muram_pool && cpm_muram_init()) - goto out2; + s32 start; + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return -ENOMEM; start = gen_pool_alloc_algo(muram_pool, size, algo, data); - if (!start) - goto out2; + if (!start) { + kfree(entry); + return -ENOMEM; + } start = start - GENPOOL_OFFSET; memset_io(cpm_muram_addr(start), 0, size); - entry = kmalloc(sizeof(*entry), GFP_ATOMIC); - if (!entry) - goto out1; entry->start = start; entry->size = size; list_add(&entry->head, &muram_block_list); return start; -out1: - gen_pool_free(muram_pool, start, size); -out2: - return (unsigned long)-ENOMEM; } /* @@ -145,13 +141,14 @@ out2: * @size: number of bytes to allocate * @align: requested alignment, in bytes * - * This function returns an offset into the muram area. + * This function returns a non-negative offset into the muram area, or + * a negative errno on failure. * Use cpm_dpram_addr() to get the virtual address of the area. * Use cpm_muram_free() to free the allocation. */ -unsigned long cpm_muram_alloc(unsigned long size, unsigned long align) +s32 cpm_muram_alloc(unsigned long size, unsigned long align) { - unsigned long start; + s32 start; unsigned long flags; struct genpool_data_align muram_pool_data; @@ -168,12 +165,15 @@ EXPORT_SYMBOL(cpm_muram_alloc); * cpm_muram_free - free a chunk of multi-user ram * @offset: The beginning of the chunk as returned by cpm_muram_alloc(). */ -int cpm_muram_free(unsigned long offset) +void cpm_muram_free(s32 offset) { unsigned long flags; int size; struct muram_block *tmp; + if (offset < 0) + return; + size = 0; spin_lock_irqsave(&cpm_muram_lock, flags); list_for_each_entry(tmp, &muram_block_list, head) { @@ -186,7 +186,6 @@ int cpm_muram_free(unsigned long offset) } gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size); spin_unlock_irqrestore(&cpm_muram_lock, flags); - return size; } EXPORT_SYMBOL(cpm_muram_free); @@ -194,13 +193,14 @@ EXPORT_SYMBOL(cpm_muram_free); * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram * @offset: offset of allocation start address * @size: number of bytes to allocate - * This function returns an offset into the muram area + * This function returns @offset if the area was available, a negative + * errno otherwise. * Use cpm_dpram_addr() to get the virtual address of the area. * Use cpm_muram_free() to free the allocation. */ -unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) +s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) { - unsigned long start; + s32 start; unsigned long flags; struct genpool_data_fixed muram_pool_data_fixed; diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c index 9bac546998d3..0dd5bdb04a14 100644 --- a/drivers/soc/fsl/qe/qe_ic.c +++ b/drivers/soc/fsl/qe/qe_ic.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> +#include <linux/irq.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/stddef.h> @@ -24,9 +25,57 @@ #include <linux/spinlock.h> #include <asm/irq.h> #include <asm/io.h> -#include <soc/fsl/qe/qe_ic.h> +#include <soc/fsl/qe/qe.h> + +#define NR_QE_IC_INTS 64 + +/* QE IC registers offset */ +#define QEIC_CICR 0x00 +#define QEIC_CIVEC 0x04 +#define QEIC_CIPXCC 0x10 +#define QEIC_CIPYCC 0x14 +#define QEIC_CIPWCC 0x18 +#define QEIC_CIPZCC 0x1c +#define QEIC_CIMR 0x20 +#define QEIC_CRIMR 0x24 +#define QEIC_CIPRTA 0x30 +#define QEIC_CIPRTB 0x34 +#define QEIC_CHIVEC 0x60 + +struct qe_ic { + /* Control registers offset */ + u32 __iomem *regs; + + /* The remapper for this QEIC */ + struct irq_domain *irqhost; + + /* The "linux" controller struct */ + struct irq_chip hc_irq; + + /* VIRQ numbers of QE high/low irqs */ + unsigned int virq_high; + unsigned int virq_low; +}; -#include "qe_ic.h" +/* + * QE interrupt controller internal structure + */ +struct qe_ic_info { + /* Location of this source at the QIMR register */ + u32 mask; + + /* Mask register offset */ + u32 mask_reg; + + /* + * For grouped interrupts sources - the interrupt code as + * appears at the group priority register + */ + u8 pri_code; + + /* Group priority register offset */ + u32 pri_reg; +}; static DEFINE_RAW_SPINLOCK(qe_ic_lock); @@ -171,15 +220,15 @@ static struct qe_ic_info qe_ic_info[] = { }, }; -static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg) +static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg) { - return in_be32(base + (reg >> 2)); + return qe_ioread32be(base + (reg >> 2)); } -static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg, +static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg, u32 value) { - out_be32(base + (reg >> 2), value); + qe_iowrite32be(value, base + (reg >> 2)); } static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) @@ -281,8 +330,8 @@ static const struct irq_domain_ops qe_ic_host_ops = { .xlate = irq_domain_xlate_onetwocell, }; -/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ -unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) +/* Return an interrupt vector or 0 if no interrupt is pending. */ +static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) { int irq; @@ -292,13 +341,13 @@ unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26; if (irq == 0) - return NO_IRQ; + return 0; return irq_linear_revmap(qe_ic->irqhost, irq); } -/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ -unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) +/* Return an interrupt vector or 0 if no interrupt is pending. */ +static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) { int irq; @@ -308,18 +357,60 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26; if (irq == 0) - return NO_IRQ; + return 0; return irq_linear_revmap(qe_ic->irqhost, irq); } -void __init qe_ic_init(struct device_node *node, unsigned int flags, - void (*low_handler)(struct irq_desc *desc), - void (*high_handler)(struct irq_desc *desc)) +static void qe_ic_cascade_low(struct irq_desc *desc) +{ + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); + struct irq_chip *chip = irq_desc_get_chip(desc); + + if (cascade_irq != 0) + generic_handle_irq(cascade_irq); + + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); +} + +static void qe_ic_cascade_high(struct irq_desc *desc) +{ + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); + struct irq_chip *chip = irq_desc_get_chip(desc); + + if (cascade_irq != 0) + generic_handle_irq(cascade_irq); + + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); +} + +static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) { + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq; + struct irq_chip *chip = irq_desc_get_chip(desc); + + cascade_irq = qe_ic_get_high_irq(qe_ic); + if (cascade_irq == 0) + cascade_irq = qe_ic_get_low_irq(qe_ic); + + if (cascade_irq != 0) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} + +static void __init qe_ic_init(struct device_node *node) +{ + void (*low_handler)(struct irq_desc *desc); + void (*high_handler)(struct irq_desc *desc); struct qe_ic *qe_ic; struct resource res; - u32 temp = 0, ret, high_active = 0; + u32 ret; ret = of_address_to_resource(node, 0, &res); if (ret) @@ -343,166 +434,42 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags, qe_ic->virq_high = irq_of_parse_and_map(node, 0); qe_ic->virq_low = irq_of_parse_and_map(node, 1); - if (qe_ic->virq_low == NO_IRQ) { + if (!qe_ic->virq_low) { printk(KERN_ERR "Failed to map QE_IC low IRQ\n"); kfree(qe_ic); return; } - - /* default priority scheme is grouped. If spread mode is */ - /* required, configure cicr accordingly. */ - if (flags & QE_IC_SPREADMODE_GRP_W) - temp |= CICR_GWCC; - if (flags & QE_IC_SPREADMODE_GRP_X) - temp |= CICR_GXCC; - if (flags & QE_IC_SPREADMODE_GRP_Y) - temp |= CICR_GYCC; - if (flags & QE_IC_SPREADMODE_GRP_Z) - temp |= CICR_GZCC; - if (flags & QE_IC_SPREADMODE_GRP_RISCA) - temp |= CICR_GRTA; - if (flags & QE_IC_SPREADMODE_GRP_RISCB) - temp |= CICR_GRTB; - - /* choose destination signal for highest priority interrupt */ - if (flags & QE_IC_HIGH_SIGNAL) { - temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT); - high_active = 1; + if (qe_ic->virq_high != qe_ic->virq_low) { + low_handler = qe_ic_cascade_low; + high_handler = qe_ic_cascade_high; + } else { + low_handler = qe_ic_cascade_muxed_mpic; + high_handler = NULL; } - qe_ic_write(qe_ic->regs, QEIC_CICR, temp); + qe_ic_write(qe_ic->regs, QEIC_CICR, 0); irq_set_handler_data(qe_ic->virq_low, qe_ic); irq_set_chained_handler(qe_ic->virq_low, low_handler); - if (qe_ic->virq_high != NO_IRQ && - qe_ic->virq_high != qe_ic->virq_low) { + if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) { irq_set_handler_data(qe_ic->virq_high, qe_ic); irq_set_chained_handler(qe_ic->virq_high, high_handler); } } -void qe_ic_set_highest_priority(unsigned int virq, int high) +static int __init qe_ic_of_init(void) { - struct qe_ic *qe_ic = qe_ic_from_irq(virq); - unsigned int src = virq_to_hw(virq); - u32 temp = 0; - - temp = qe_ic_read(qe_ic->regs, QEIC_CICR); - - temp &= ~CICR_HP_MASK; - temp |= src << CICR_HP_SHIFT; - - temp &= ~CICR_HPIT_MASK; - temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT; - - qe_ic_write(qe_ic->regs, QEIC_CICR, temp); -} - -/* Set Priority level within its group, from 1 to 8 */ -int qe_ic_set_priority(unsigned int virq, unsigned int priority) -{ - struct qe_ic *qe_ic = qe_ic_from_irq(virq); - unsigned int src = virq_to_hw(virq); - u32 temp; + struct device_node *np; - if (priority > 8 || priority == 0) - return -EINVAL; - if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info), - "%s: Invalid hw irq number for QEIC\n", __func__)) - return -EINVAL; - if (qe_ic_info[src].pri_reg == 0) - return -EINVAL; - - temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg); - - if (priority < 4) { - temp &= ~(0x7 << (32 - priority * 3)); - temp |= qe_ic_info[src].pri_code << (32 - priority * 3); - } else { - temp &= ~(0x7 << (24 - priority * 3)); - temp |= qe_ic_info[src].pri_code << (24 - priority * 3); + np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); + if (!np) { + np = of_find_node_by_type(NULL, "qeic"); + if (!np) + return -ENODEV; } - - qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp); - + qe_ic_init(np); + of_node_put(np); return 0; } - -/* Set a QE priority to use high irq, only priority 1~2 can use high irq */ -int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high) -{ - struct qe_ic *qe_ic = qe_ic_from_irq(virq); - unsigned int src = virq_to_hw(virq); - u32 temp, control_reg = QEIC_CICNR, shift = 0; - - if (priority > 2 || priority == 0) - return -EINVAL; - if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info), - "%s: Invalid hw irq number for QEIC\n", __func__)) - return -EINVAL; - - switch (qe_ic_info[src].pri_reg) { - case QEIC_CIPZCC: - shift = CICNR_ZCC1T_SHIFT; - break; - case QEIC_CIPWCC: - shift = CICNR_WCC1T_SHIFT; - break; - case QEIC_CIPYCC: - shift = CICNR_YCC1T_SHIFT; - break; - case QEIC_CIPXCC: - shift = CICNR_XCC1T_SHIFT; - break; - case QEIC_CIPRTA: - shift = CRICR_RTA1T_SHIFT; - control_reg = QEIC_CRICR; - break; - case QEIC_CIPRTB: - shift = CRICR_RTB1T_SHIFT; - control_reg = QEIC_CRICR; - break; - default: - return -EINVAL; - } - - shift += (2 - priority) * 2; - temp = qe_ic_read(qe_ic->regs, control_reg); - temp &= ~(SIGNAL_MASK << shift); - temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift; - qe_ic_write(qe_ic->regs, control_reg, temp); - - return 0; -} - -static struct bus_type qe_ic_subsys = { - .name = "qe_ic", - .dev_name = "qe_ic", -}; - -static struct device device_qe_ic = { - .id = 0, - .bus = &qe_ic_subsys, -}; - -static int __init init_qe_ic_sysfs(void) -{ - int rc; - - printk(KERN_DEBUG "Registering qe_ic with sysfs...\n"); - - rc = subsys_system_register(&qe_ic_subsys, NULL); - if (rc) { - printk(KERN_ERR "Failed registering qe_ic sys class\n"); - return -ENODEV; - } - rc = device_register(&device_qe_ic); - if (rc) { - printk(KERN_ERR "Failed registering qe_ic sys device\n"); - return -ENODEV; - } - return 0; -} - -subsys_initcall(init_qe_ic_sysfs); +subsys_initcall(qe_ic_of_init); diff --git a/drivers/soc/fsl/qe/qe_ic.h b/drivers/soc/fsl/qe/qe_ic.h deleted file mode 100644 index 08c695672a03..000000000000 --- a/drivers/soc/fsl/qe/qe_ic.h +++ /dev/null @@ -1,99 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * drivers/soc/fsl/qe/qe_ic.h - * - * QUICC ENGINE Interrupt Controller Header - * - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. - * - * Author: Li Yang <leoli@freescale.com> - * Based on code from Shlomi Gridish <gridish@freescale.com> - */ -#ifndef _POWERPC_SYSDEV_QE_IC_H -#define _POWERPC_SYSDEV_QE_IC_H - -#include <soc/fsl/qe/qe_ic.h> - -#define NR_QE_IC_INTS 64 - -/* QE IC registers offset */ -#define QEIC_CICR 0x00 -#define QEIC_CIVEC 0x04 -#define QEIC_CRIPNR 0x08 -#define QEIC_CIPNR 0x0c -#define QEIC_CIPXCC 0x10 -#define QEIC_CIPYCC 0x14 -#define QEIC_CIPWCC 0x18 -#define QEIC_CIPZCC 0x1c -#define QEIC_CIMR 0x20 -#define QEIC_CRIMR 0x24 -#define QEIC_CICNR 0x28 -#define QEIC_CIPRTA 0x30 -#define QEIC_CIPRTB 0x34 -#define QEIC_CRICR 0x3c -#define QEIC_CHIVEC 0x60 - -/* Interrupt priority registers */ -#define CIPCC_SHIFT_PRI0 29 -#define CIPCC_SHIFT_PRI1 26 -#define CIPCC_SHIFT_PRI2 23 -#define CIPCC_SHIFT_PRI3 20 -#define CIPCC_SHIFT_PRI4 13 -#define CIPCC_SHIFT_PRI5 10 -#define CIPCC_SHIFT_PRI6 7 -#define CIPCC_SHIFT_PRI7 4 - -/* CICR priority modes */ -#define CICR_GWCC 0x00040000 -#define CICR_GXCC 0x00020000 -#define CICR_GYCC 0x00010000 -#define CICR_GZCC 0x00080000 -#define CICR_GRTA 0x00200000 -#define CICR_GRTB 0x00400000 -#define CICR_HPIT_SHIFT 8 -#define CICR_HPIT_MASK 0x00000300 -#define CICR_HP_SHIFT 24 -#define CICR_HP_MASK 0x3f000000 - -/* CICNR */ -#define CICNR_WCC1T_SHIFT 20 -#define CICNR_ZCC1T_SHIFT 28 -#define CICNR_YCC1T_SHIFT 12 -#define CICNR_XCC1T_SHIFT 4 - -/* CRICR */ -#define CRICR_RTA1T_SHIFT 20 -#define CRICR_RTB1T_SHIFT 28 - -/* Signal indicator */ -#define SIGNAL_MASK 3 -#define SIGNAL_HIGH 2 -#define SIGNAL_LOW 0 - -struct qe_ic { - /* Control registers offset */ - volatile u32 __iomem *regs; - - /* The remapper for this QEIC */ - struct irq_domain *irqhost; - - /* The "linux" controller struct */ - struct irq_chip hc_irq; - - /* VIRQ numbers of QE high/low irqs */ - unsigned int virq_high; - unsigned int virq_low; -}; - -/* - * QE interrupt controller internal structure - */ -struct qe_ic_info { - u32 mask; /* location of this source at the QIMR register. */ - u32 mask_reg; /* Mask register offset */ - u8 pri_code; /* for grouped interrupts sources - the interrupt - code as appears at the group priority register */ - u32 pri_reg; /* Group priority register offset */ -}; - -#endif /* _POWERPC_SYSDEV_QE_IC_H */ diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c index 3657e296a8a2..11ea08e97db7 100644 --- a/drivers/soc/fsl/qe/qe_io.c +++ b/drivers/soc/fsl/qe/qe_io.c @@ -18,8 +18,6 @@ #include <asm/io.h> #include <soc/fsl/qe/qe.h> -#include <asm/prom.h> -#include <sysdev/fsl_soc.h> #undef DEBUG @@ -30,7 +28,7 @@ int par_io_init(struct device_node *np) { struct resource res; int ret; - const u32 *num_ports; + u32 num_ports; /* Map Parallel I/O ports registers */ ret = of_address_to_resource(np, 0, &res); @@ -38,9 +36,8 @@ int par_io_init(struct device_node *np) return ret; par_io = ioremap(res.start, resource_size(&res)); - num_ports = of_get_property(np, "num-ports", NULL); - if (num_ports) - num_par_io_ports = *num_ports; + if (!of_property_read_u32(np, "num-ports", &num_ports)) + num_par_io_ports = num_ports; return 0; } @@ -57,16 +54,16 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir, pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1))); /* Set open drain, if required */ - tmp_val = in_be32(&par_io->cpodr); + tmp_val = qe_ioread32be(&par_io->cpodr); if (open_drain) - out_be32(&par_io->cpodr, pin_mask1bit | tmp_val); + qe_iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr); else - out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val); + qe_iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr); /* define direction */ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? - in_be32(&par_io->cpdir2) : - in_be32(&par_io->cpdir1); + qe_ioread32be(&par_io->cpdir2) : + qe_ioread32be(&par_io->cpdir1); /* get all bits mask for 2 bit per port */ pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS - @@ -78,34 +75,30 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir, /* clear and set 2 bits mask */ if (pin > (QE_PIO_PINS / 2) - 1) { - out_be32(&par_io->cpdir2, - ~pin_mask2bits & tmp_val); + qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2); tmp_val &= ~pin_mask2bits; - out_be32(&par_io->cpdir2, new_mask2bits | tmp_val); + qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2); } else { - out_be32(&par_io->cpdir1, - ~pin_mask2bits & tmp_val); + qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1); tmp_val &= ~pin_mask2bits; - out_be32(&par_io->cpdir1, new_mask2bits | tmp_val); + qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1); } /* define pin assignment */ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? - in_be32(&par_io->cppar2) : - in_be32(&par_io->cppar1); + qe_ioread32be(&par_io->cppar2) : + qe_ioread32be(&par_io->cppar1); new_mask2bits = (u32) (assignment << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2)); /* clear and set 2 bits mask */ if (pin > (QE_PIO_PINS / 2) - 1) { - out_be32(&par_io->cppar2, - ~pin_mask2bits & tmp_val); + qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2); tmp_val &= ~pin_mask2bits; - out_be32(&par_io->cppar2, new_mask2bits | tmp_val); + qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2); } else { - out_be32(&par_io->cppar1, - ~pin_mask2bits & tmp_val); + qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1); tmp_val &= ~pin_mask2bits; - out_be32(&par_io->cppar1, new_mask2bits | tmp_val); + qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1); } } EXPORT_SYMBOL(__par_io_config_pin); @@ -133,12 +126,12 @@ int par_io_data_set(u8 port, u8 pin, u8 val) /* calculate pin location */ pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin)); - tmp_val = in_be32(&par_io[port].cpdata); + tmp_val = qe_ioread32be(&par_io[port].cpdata); if (val == 0) /* clear */ - out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val); + qe_iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata); else /* set */ - out_be32(&par_io[port].cpdata, pin_mask | tmp_val); + qe_iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata); return 0; } @@ -147,23 +140,20 @@ EXPORT_SYMBOL(par_io_data_set); int par_io_of_config(struct device_node *np) { struct device_node *pio; - const phandle *ph; int pio_map_len; - const unsigned int *pio_map; + const __be32 *pio_map; if (par_io == NULL) { printk(KERN_ERR "par_io not initialized\n"); return -1; } - ph = of_get_property(np, "pio-handle", NULL); - if (ph == NULL) { + pio = of_parse_phandle(np, "pio-handle", 0); + if (pio == NULL) { printk(KERN_ERR "pio-handle not available\n"); return -1; } - pio = of_find_node_by_phandle(*ph); - pio_map = of_get_property(pio, "pio-map", &pio_map_len); if (pio_map == NULL) { printk(KERN_ERR "pio-map is not set!\n"); @@ -176,9 +166,15 @@ int par_io_of_config(struct device_node *np) } while (pio_map_len > 0) { - par_io_config_pin((u8) pio_map[0], (u8) pio_map[1], - (int) pio_map[2], (int) pio_map[3], - (int) pio_map[4], (int) pio_map[5]); + u8 port = be32_to_cpu(pio_map[0]); + u8 pin = be32_to_cpu(pio_map[1]); + int dir = be32_to_cpu(pio_map[2]); + int open_drain = be32_to_cpu(pio_map[3]); + int assignment = be32_to_cpu(pio_map[4]); + int has_irq = be32_to_cpu(pio_map[5]); + + par_io_config_pin(port, pin, dir, open_drain, + assignment, has_irq); pio_map += 6; pio_map_len -= 6; } diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c index e37ebc3be661..7d7d78d3ee50 100644 --- a/drivers/soc/fsl/qe/qe_tdm.c +++ b/drivers/soc/fsl/qe/qe_tdm.c @@ -169,10 +169,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info) &siram[siram_entry_id * 32 + 0x200 + i]); } - setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)], - SIR_LAST); - setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)], - SIR_LAST); + qe_setbits_be16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)], + SIR_LAST); + qe_setbits_be16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)], + SIR_LAST); /* Set SIxMR register */ sixmr = SIMR_SAD(siram_entry_id); diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c index 024d239ac1e1..90157acc5ba6 100644 --- a/drivers/soc/fsl/qe/ucc.c +++ b/drivers/soc/fsl/qe/ucc.c @@ -15,7 +15,6 @@ #include <linux/spinlock.h> #include <linux/export.h> -#include <asm/irq.h> #include <asm/io.h> #include <soc/fsl/qe/immap_qe.h> #include <soc/fsl/qe/qe.h> @@ -35,8 +34,8 @@ int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) return -EINVAL; spin_lock_irqsave(&cmxgcr_lock, flags); - clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, - ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); + qe_clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, + ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); spin_unlock_irqrestore(&cmxgcr_lock, flags); return 0; @@ -80,8 +79,8 @@ int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed) return -EINVAL; } - clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, - UCC_GUEMR_SET_RESERVED3 | speed); + qe_clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, + UCC_GUEMR_SET_RESERVED3 | speed); return 0; } @@ -109,9 +108,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask) get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift); if (set) - setbits32(cmxucr, mask << shift); + qe_setbits_be32(cmxucr, mask << shift); else - clrbits32(cmxucr, mask << shift); + qe_clrbits_be32(cmxucr, mask << shift); return 0; } @@ -207,8 +206,8 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, if (mode == COMM_DIR_RX) shift += 4; - clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, - clock_bits << shift); + qe_clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, + clock_bits << shift); return 0; } @@ -540,8 +539,8 @@ int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock, cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l : &qe_mux_reg->cmxsi1cr_h; - qe_clrsetbits32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, - clock_bits << shift); + qe_clrsetbits_be32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, + clock_bits << shift); return 0; } @@ -650,9 +649,9 @@ int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock, shift = ucc_get_tdm_sync_shift(mode, tdm_num); - qe_clrsetbits32(&qe_mux_reg->cmxsi1syr, - QE_CMXUCR_TX_CLK_SRC_MASK << shift, - source << shift); + qe_clrsetbits_be32(&qe_mux_reg->cmxsi1syr, + QE_CMXUCR_TX_CLK_SRC_MASK << shift, + source << shift); return 0; } diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c index af4d80e38521..ad6193ea4597 100644 --- a/drivers/soc/fsl/qe/ucc_fast.c +++ b/drivers/soc/fsl/qe/ucc_fast.c @@ -29,41 +29,42 @@ void ucc_fast_dump_regs(struct ucc_fast_private * uccf) printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs); printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr)); + &uccf->uf_regs->gumr, qe_ioread32be(&uccf->uf_regs->gumr)); printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr)); + &uccf->uf_regs->upsmr, qe_ioread32be(&uccf->uf_regs->upsmr)); printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr)); + &uccf->uf_regs->utodr, qe_ioread16be(&uccf->uf_regs->utodr)); printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr)); + &uccf->uf_regs->udsr, qe_ioread16be(&uccf->uf_regs->udsr)); printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce)); + &uccf->uf_regs->ucce, qe_ioread32be(&uccf->uf_regs->ucce)); printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm)); + &uccf->uf_regs->uccm, qe_ioread32be(&uccf->uf_regs->uccm)); printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n", - &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs)); + &uccf->uf_regs->uccs, qe_ioread8(&uccf->uf_regs->uccs)); printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb)); + &uccf->uf_regs->urfb, qe_ioread32be(&uccf->uf_regs->urfb)); printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs)); + &uccf->uf_regs->urfs, qe_ioread16be(&uccf->uf_regs->urfs)); printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet)); + &uccf->uf_regs->urfet, qe_ioread16be(&uccf->uf_regs->urfet)); printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset)); + &uccf->uf_regs->urfset, + qe_ioread16be(&uccf->uf_regs->urfset)); printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb)); + &uccf->uf_regs->utfb, qe_ioread32be(&uccf->uf_regs->utfb)); printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs)); + &uccf->uf_regs->utfs, qe_ioread16be(&uccf->uf_regs->utfs)); printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet)); + &uccf->uf_regs->utfet, qe_ioread16be(&uccf->uf_regs->utfet)); printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt)); + &uccf->uf_regs->utftt, qe_ioread16be(&uccf->uf_regs->utftt)); printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt)); + &uccf->uf_regs->utpt, qe_ioread16be(&uccf->uf_regs->utpt)); printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry)); + &uccf->uf_regs->urtry, qe_ioread32be(&uccf->uf_regs->urtry)); printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n", - &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr)); + &uccf->uf_regs->guemr, qe_ioread8(&uccf->uf_regs->guemr)); } EXPORT_SYMBOL(ucc_fast_dump_regs); @@ -85,7 +86,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock); void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf) { - out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD); + qe_iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr); } EXPORT_SYMBOL(ucc_fast_transmit_on_demand); @@ -97,7 +98,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode) uf_regs = uccf->uf_regs; /* Enable reception and/or transmission on this UCC. */ - gumr = in_be32(&uf_regs->gumr); + gumr = qe_ioread32be(&uf_regs->gumr); if (mode & COMM_DIR_TX) { gumr |= UCC_FAST_GUMR_ENT; uccf->enabled_tx = 1; @@ -106,7 +107,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode) gumr |= UCC_FAST_GUMR_ENR; uccf->enabled_rx = 1; } - out_be32(&uf_regs->gumr, gumr); + qe_iowrite32be(gumr, &uf_regs->gumr); } EXPORT_SYMBOL(ucc_fast_enable); @@ -118,7 +119,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode) uf_regs = uccf->uf_regs; /* Disable reception and/or transmission on this UCC. */ - gumr = in_be32(&uf_regs->gumr); + gumr = qe_ioread32be(&uf_regs->gumr); if (mode & COMM_DIR_TX) { gumr &= ~UCC_FAST_GUMR_ENT; uccf->enabled_tx = 0; @@ -127,7 +128,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode) gumr &= ~UCC_FAST_GUMR_ENR; uccf->enabled_rx = 0; } - out_be32(&uf_regs->gumr, gumr); + qe_iowrite32be(gumr, &uf_regs->gumr); } EXPORT_SYMBOL(ucc_fast_disable); @@ -196,6 +197,8 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc __func__); return -ENOMEM; } + uccf->ucc_fast_tx_virtual_fifo_base_offset = -1; + uccf->ucc_fast_rx_virtual_fifo_base_offset = -1; /* Fill fast UCC structure */ uccf->uf_info = uf_info; @@ -259,15 +262,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc gumr |= uf_info->tenc; gumr |= uf_info->tcrc; gumr |= uf_info->mode; - out_be32(&uf_regs->gumr, gumr); + qe_iowrite32be(gumr, &uf_regs->gumr); /* Allocate memory for Tx Virtual Fifo */ uccf->ucc_fast_tx_virtual_fifo_base_offset = qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); - if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { + if (uccf->ucc_fast_tx_virtual_fifo_base_offset < 0) { printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n", __func__); - uccf->ucc_fast_tx_virtual_fifo_base_offset = 0; ucc_fast_free(uccf); return -ENOMEM; } @@ -277,24 +279,25 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc qe_muram_alloc(uf_info->urfs + UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); - if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { + if (uccf->ucc_fast_rx_virtual_fifo_base_offset < 0) { printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n", __func__); - uccf->ucc_fast_rx_virtual_fifo_base_offset = 0; ucc_fast_free(uccf); return -ENOMEM; } /* Set Virtual Fifo registers */ - out_be16(&uf_regs->urfs, uf_info->urfs); - out_be16(&uf_regs->urfet, uf_info->urfet); - out_be16(&uf_regs->urfset, uf_info->urfset); - out_be16(&uf_regs->utfs, uf_info->utfs); - out_be16(&uf_regs->utfet, uf_info->utfet); - out_be16(&uf_regs->utftt, uf_info->utftt); + qe_iowrite16be(uf_info->urfs, &uf_regs->urfs); + qe_iowrite16be(uf_info->urfet, &uf_regs->urfet); + qe_iowrite16be(uf_info->urfset, &uf_regs->urfset); + qe_iowrite16be(uf_info->utfs, &uf_regs->utfs); + qe_iowrite16be(uf_info->utfet, &uf_regs->utfet); + qe_iowrite16be(uf_info->utftt, &uf_regs->utftt); /* utfb, urfb are offsets from MURAM base */ - out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset); - out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset); + qe_iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, + &uf_regs->utfb); + qe_iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, + &uf_regs->urfb); /* Mux clocking */ /* Grant Support */ @@ -362,14 +365,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc } /* Set interrupt mask register at UCC level. */ - out_be32(&uf_regs->uccm, uf_info->uccm_mask); + qe_iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); /* First, clear anything pending at UCC level, * otherwise, old garbage may come through * as soon as the dam is opened. */ /* Writing '1' clears */ - out_be32(&uf_regs->ucce, 0xffffffff); + qe_iowrite32be(0xffffffff, &uf_regs->ucce); *uccf_ret = uccf; return 0; @@ -381,11 +384,8 @@ void ucc_fast_free(struct ucc_fast_private * uccf) if (!uccf) return; - if (uccf->ucc_fast_tx_virtual_fifo_base_offset) - qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset); - - if (uccf->ucc_fast_rx_virtual_fifo_base_offset) - qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset); + qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset); + qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset); if (uccf->uf_regs) iounmap(uccf->uf_regs); diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c index 34f0ec3a63b5..274d34449846 100644 --- a/drivers/soc/fsl/qe/ucc_slow.c +++ b/drivers/soc/fsl/qe/ucc_slow.c @@ -78,7 +78,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) us_regs = uccs->us_regs; /* Enable reception and/or transmission on this UCC. */ - gumr_l = in_be32(&us_regs->gumr_l); + gumr_l = qe_ioread32be(&us_regs->gumr_l); if (mode & COMM_DIR_TX) { gumr_l |= UCC_SLOW_GUMR_L_ENT; uccs->enabled_tx = 1; @@ -87,7 +87,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) gumr_l |= UCC_SLOW_GUMR_L_ENR; uccs->enabled_rx = 1; } - out_be32(&us_regs->gumr_l, gumr_l); + qe_iowrite32be(gumr_l, &us_regs->gumr_l); } EXPORT_SYMBOL(ucc_slow_enable); @@ -99,7 +99,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) us_regs = uccs->us_regs; /* Disable reception and/or transmission on this UCC. */ - gumr_l = in_be32(&us_regs->gumr_l); + gumr_l = qe_ioread32be(&us_regs->gumr_l); if (mode & COMM_DIR_TX) { gumr_l &= ~UCC_SLOW_GUMR_L_ENT; uccs->enabled_tx = 0; @@ -108,7 +108,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) gumr_l &= ~UCC_SLOW_GUMR_L_ENR; uccs->enabled_rx = 0; } - out_be32(&us_regs->gumr_l, gumr_l); + qe_iowrite32be(gumr_l, &us_regs->gumr_l); } EXPORT_SYMBOL(ucc_slow_disable); @@ -154,6 +154,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc __func__); return -ENOMEM; } + uccs->rx_base_offset = -1; + uccs->tx_base_offset = -1; + uccs->us_pram_offset = -1; /* Fill slow UCC structure */ uccs->us_info = us_info; @@ -179,7 +182,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc /* Get PRAM base */ uccs->us_pram_offset = qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM); - if (IS_ERR_VALUE(uccs->us_pram_offset)) { + if (uccs->us_pram_offset < 0) { printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__); ucc_slow_free(uccs); return -ENOMEM; @@ -198,7 +201,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc return ret; } - out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length); + qe_iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr); INIT_LIST_HEAD(&uccs->confQ); @@ -206,10 +209,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc uccs->rx_base_offset = qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), QE_ALIGNMENT_OF_BD); - if (IS_ERR_VALUE(uccs->rx_base_offset)) { + if (uccs->rx_base_offset < 0) { printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__, us_info->rx_bd_ring_len); - uccs->rx_base_offset = 0; ucc_slow_free(uccs); return -ENOMEM; } @@ -217,9 +219,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc uccs->tx_base_offset = qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd), QE_ALIGNMENT_OF_BD); - if (IS_ERR_VALUE(uccs->tx_base_offset)) { + if (uccs->tx_base_offset < 0) { printk(KERN_ERR "%s: cannot allocate TX BDs", __func__); - uccs->tx_base_offset = 0; ucc_slow_free(uccs); return -ENOMEM; } @@ -228,27 +229,27 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset); for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) { /* clear bd buffer */ - out_be32(&bd->buf, 0); + qe_iowrite32be(0, &bd->buf); /* set bd status and length */ - out_be32((u32 *) bd, 0); + qe_iowrite32be(0, (u32 *)bd); bd++; } /* for last BD set Wrap bit */ - out_be32(&bd->buf, 0); - out_be32((u32 *) bd, cpu_to_be32(T_W)); + qe_iowrite32be(0, &bd->buf); + qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd); /* Init Rx bds */ bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { /* set bd status and length */ - out_be32((u32*)bd, 0); + qe_iowrite32be(0, (u32 *)bd); /* clear bd buffer */ - out_be32(&bd->buf, 0); + qe_iowrite32be(0, &bd->buf); bd++; } /* for last BD set Wrap bit */ - out_be32((u32*)bd, cpu_to_be32(R_W)); - out_be32(&bd->buf, 0); + qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd); + qe_iowrite32be(0, &bd->buf); /* Set GUMR (For more details see the hardware spec.). */ /* gumr_h */ @@ -269,7 +270,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc gumr |= UCC_SLOW_GUMR_H_TXSY; if (us_info->rtsm) gumr |= UCC_SLOW_GUMR_H_RTSM; - out_be32(&us_regs->gumr_h, gumr); + qe_iowrite32be(gumr, &us_regs->gumr_h); /* gumr_l */ gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc | @@ -282,7 +283,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc gumr |= UCC_SLOW_GUMR_L_TINV; if (us_info->tend) gumr |= UCC_SLOW_GUMR_L_TEND; - out_be32(&us_regs->gumr_l, gumr); + qe_iowrite32be(gumr, &us_regs->gumr_l); /* Function code registers */ @@ -292,8 +293,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc uccs->us_pram->rbmr = UCC_BMR_BO_BE; /* rbase, tbase are offsets from MURAM base */ - out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset); - out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset); + qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase); + qe_iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase); /* Mux clocking */ /* Grant Support */ @@ -323,14 +324,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc } /* Set interrupt mask register at UCC level. */ - out_be16(&us_regs->uccm, us_info->uccm_mask); + qe_iowrite16be(us_info->uccm_mask, &us_regs->uccm); /* First, clear anything pending at UCC level, * otherwise, old garbage may come through * as soon as the dam is opened. */ /* Writing '1' clears */ - out_be16(&us_regs->ucce, 0xffff); + qe_iowrite16be(0xffff, &us_regs->ucce); /* Issue QE Init command */ if (us_info->init_tx && us_info->init_rx) @@ -352,14 +353,9 @@ void ucc_slow_free(struct ucc_slow_private * uccs) if (!uccs) return; - if (uccs->rx_base_offset) - qe_muram_free(uccs->rx_base_offset); - - if (uccs->tx_base_offset) - qe_muram_free(uccs->tx_base_offset); - - if (uccs->us_pram) - qe_muram_free(uccs->us_pram_offset); + qe_muram_free(uccs->rx_base_offset); + qe_muram_free(uccs->tx_base_offset); + qe_muram_free(uccs->us_pram_offset); if (uccs->us_regs) iounmap(uccs->us_regs); diff --git a/drivers/soc/fsl/qe/usb.c b/drivers/soc/fsl/qe/usb.c index 32d8269fa692..890f236ea697 100644 --- a/drivers/soc/fsl/qe/usb.c +++ b/drivers/soc/fsl/qe/usb.c @@ -43,7 +43,7 @@ int qe_usb_clock_set(enum qe_clock clk, int rate) spin_lock_irqsave(&cmxgcr_lock, flags); - clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val); + qe_clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val); spin_unlock_irqrestore(&cmxgcr_lock, flags); diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c new file mode 100644 index 000000000000..a093dbe6d2cb --- /dev/null +++ b/drivers/soc/fsl/rcpm.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// rcpm.c - Freescale QorIQ RCPM driver +// +// Copyright 2019 NXP +// +// Author: Ran Wang <ran.wang_1@nxp.com> + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/suspend.h> +#include <linux/kernel.h> + +#define RCPM_WAKEUP_CELL_MAX_SIZE 7 + +struct rcpm { + unsigned int wakeup_cells; + void __iomem *ippdexpcr_base; + bool little_endian; +}; + +/** + * rcpm_pm_prepare - performs device-level tasks associated with power + * management, such as programming related to the wakeup source control. + * @dev: Device to handle. + * + */ +static int rcpm_pm_prepare(struct device *dev) +{ + int i, ret, idx; + void __iomem *base; + struct wakeup_source *ws; + struct rcpm *rcpm; + struct device_node *np = dev->of_node; + u32 value[RCPM_WAKEUP_CELL_MAX_SIZE + 1]; + u32 setting[RCPM_WAKEUP_CELL_MAX_SIZE] = {0}; + + rcpm = dev_get_drvdata(dev); + if (!rcpm) + return -EINVAL; + + base = rcpm->ippdexpcr_base; + idx = wakeup_sources_read_lock(); + + /* Begin with first registered wakeup source */ + for_each_wakeup_source(ws) { + + /* skip object which is not attached to device */ + if (!ws->dev || !ws->dev->parent) + continue; + + ret = device_property_read_u32_array(ws->dev->parent, + "fsl,rcpm-wakeup", value, + rcpm->wakeup_cells + 1); + + /* Wakeup source should refer to current rcpm device */ + if (ret || (np->phandle != value[0])) + continue; + + /* Property "#fsl,rcpm-wakeup-cells" of rcpm node defines the + * number of IPPDEXPCR register cells, and "fsl,rcpm-wakeup" + * of wakeup source IP contains an integer array: <phandle to + * RCPM node, IPPDEXPCR0 setting, IPPDEXPCR1 setting, + * IPPDEXPCR2 setting, etc>. + * + * So we will go thought them to collect setting data. + */ + for (i = 0; i < rcpm->wakeup_cells; i++) + setting[i] |= value[i + 1]; + } + + wakeup_sources_read_unlock(idx); + + /* Program all IPPDEXPCRn once */ + for (i = 0; i < rcpm->wakeup_cells; i++) { + u32 tmp = setting[i]; + void __iomem *address = base + i * 4; + + if (!tmp) + continue; + + /* We can only OR related bits */ + if (rcpm->little_endian) { + tmp |= ioread32(address); + iowrite32(tmp, address); + } else { + tmp |= ioread32be(address); + iowrite32be(tmp, address); + } + } + + return 0; +} + +static const struct dev_pm_ops rcpm_pm_ops = { + .prepare = rcpm_pm_prepare, +}; + +static int rcpm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *r; + struct rcpm *rcpm; + int ret; + + rcpm = devm_kzalloc(dev, sizeof(*rcpm), GFP_KERNEL); + if (!rcpm) + return -ENOMEM; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) + return -ENODEV; + + rcpm->ippdexpcr_base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(rcpm->ippdexpcr_base)) { + ret = PTR_ERR(rcpm->ippdexpcr_base); + return ret; + } + + rcpm->little_endian = device_property_read_bool( + &pdev->dev, "little-endian"); + + ret = device_property_read_u32(&pdev->dev, + "#fsl,rcpm-wakeup-cells", &rcpm->wakeup_cells); + if (ret) + return ret; + + dev_set_drvdata(&pdev->dev, rcpm); + + return 0; +} + +static const struct of_device_id rcpm_of_match[] = { + { .compatible = "fsl,qoriq-rcpm-2.1+", }, + {} +}; +MODULE_DEVICE_TABLE(of, rcpm_of_match); + +static struct platform_driver rcpm_driver = { + .driver = { + .name = "rcpm", + .of_match_table = rcpm_of_match, + .pm = &rcpm_pm_ops, + }, + .probe = rcpm_probe, +}; + +module_platform_driver(rcpm_driver); |