diff options
Diffstat (limited to 'drivers')
374 files changed, 24792 insertions, 7767 deletions
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 8ea8211b2d58..eb76a4c10dbf 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -26,6 +26,7 @@ #include <linux/slab.h> #include <linux/acpi.h> #include <asm/mwait.h> +#include <xen/xen.h> #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" @@ -477,6 +478,10 @@ static struct acpi_driver acpi_pad_driver = { static int __init acpi_pad_init(void) { + /* Xen ACPI PAD is used when running as Xen Dom0. */ + if (xen_initial_domain()) + return -ENODEV; + power_saving_mwait_init(); if (power_saving_mwait_eax == 0) return -EINVAL; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 680531062160..48e19d013170 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -526,6 +526,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec) acpi_ec_clear(ec); } +#ifdef CONFIG_PM_SLEEP static bool acpi_ec_query_flushed(struct acpi_ec *ec) { bool flushed; @@ -557,6 +558,7 @@ static void acpi_ec_disable_event(struct acpi_ec *ec) spin_unlock_irqrestore(&ec->lock, flags); __acpi_ec_flush_event(ec); } +#endif /* CONFIG_PM_SLEEP */ static bool acpi_ec_guard_event(struct acpi_ec *ec) { diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 384cfc3083e1..6cf4988206f2 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -129,8 +129,18 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state) control = obj->package.elements[1].integer.value; for (i = 0; i < fan->fps_count; i++) { - if (control == fan->fps[i].control) + /* + * When Fine Grain Control is set, return the state + * corresponding to maximum fan->fps[i].control + * value compared to the current speed. Here the + * fan->fps[] is sorted array with increasing speed. + */ + if (fan->fif.fine_grain_ctrl && control < fan->fps[i].control) { + i = (i > 0) ? i - 1 : 0; break; + } else if (control == fan->fps[i].control) { + break; + } } if (i == fan->fps_count) { dev_dbg(&device->dev, "Invalid control value returned\n"); diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 4305ee9db4b2..416953a42510 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -162,11 +162,18 @@ void acpi_os_vprintf(const char *fmt, va_list args) if (acpi_in_debugger) { kdb_printf("%s", buffer); } else { - printk(KERN_CONT "%s", buffer); + if (printk_get_level(buffer)) + printk("%s", buffer); + else + printk(KERN_CONT "%s", buffer); } #else - if (acpi_debugger_write_log(buffer) < 0) - printk(KERN_CONT "%s", buffer); + if (acpi_debugger_write_log(buffer) < 0) { + if (printk_get_level(buffer)) + printk("%s", buffer); + else + printk(KERN_CONT "%s", buffer); + } #endif } diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index f2fd3fee588a..03f5ec11ab31 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -468,10 +468,11 @@ static int acpi_data_get_property_array(struct acpi_device_data *data, } /** - * acpi_data_get_property_reference - returns handle to the referenced object - * @data: ACPI device data object containing the property + * __acpi_node_get_property_reference - returns handle to the referenced object + * @fwnode: Firmware node to get the property from * @propname: Name of the property * @index: Index of the reference to return + * @num_args: Maximum number of arguments after each reference * @args: Location to store the returned reference with optional arguments * * Find property with @name, verifify that it is a package containing at least @@ -482,17 +483,40 @@ static int acpi_data_get_property_array(struct acpi_device_data *data, * If there's more than one reference in the property value package, @index is * used to select the one to return. * + * It is possible to leave holes in the property value set like in the + * example below: + * + * Package () { + * "cs-gpios", + * Package () { + * ^GPIO, 19, 0, 0, + * ^GPIO, 20, 0, 0, + * 0, + * ^GPIO, 21, 0, 0, + * } + * } + * + * Calling this function with index %2 return %-ENOENT and with index %3 + * returns the last entry. If the property does not contain any more values + * %-ENODATA is returned. The NULL entry must be single integer and + * preferably contain value %0. + * * Return: %0 on success, negative error code on failure. */ -static int acpi_data_get_property_reference(struct acpi_device_data *data, - const char *propname, size_t index, - struct acpi_reference_args *args) +int __acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *propname, size_t index, size_t num_args, + struct acpi_reference_args *args) { const union acpi_object *element, *end; const union acpi_object *obj; + struct acpi_device_data *data; struct acpi_device *device; int ret, idx = 0; + data = acpi_device_data_of_node(fwnode); + if (!data) + return -EINVAL; + ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); if (ret) return ret; @@ -532,59 +556,54 @@ static int acpi_data_get_property_reference(struct acpi_device_data *data, while (element < end) { u32 nargs, i; - if (element->type != ACPI_TYPE_LOCAL_REFERENCE) - return -EPROTO; - - ret = acpi_bus_get_device(element->reference.handle, &device); - if (ret) - return -ENODEV; - - element++; - nargs = 0; - - /* assume following integer elements are all args */ - for (i = 0; element + i < end; i++) { - int type = element[i].type; + if (element->type == ACPI_TYPE_LOCAL_REFERENCE) { + ret = acpi_bus_get_device(element->reference.handle, + &device); + if (ret) + return -ENODEV; + + nargs = 0; + element++; + + /* assume following integer elements are all args */ + for (i = 0; element + i < end && i < num_args; i++) { + int type = element[i].type; + + if (type == ACPI_TYPE_INTEGER) + nargs++; + else if (type == ACPI_TYPE_LOCAL_REFERENCE) + break; + else + return -EPROTO; + } - if (type == ACPI_TYPE_INTEGER) - nargs++; - else if (type == ACPI_TYPE_LOCAL_REFERENCE) - break; - else + if (nargs > MAX_ACPI_REFERENCE_ARGS) return -EPROTO; - } - if (idx++ == index) { - args->adev = device; - args->nargs = nargs; - for (i = 0; i < nargs; i++) - args->args[i] = element[i].integer.value; + if (idx == index) { + args->adev = device; + args->nargs = nargs; + for (i = 0; i < nargs; i++) + args->args[i] = element[i].integer.value; - return 0; + return 0; + } + + element += nargs; + } else if (element->type == ACPI_TYPE_INTEGER) { + if (idx == index) + return -ENOENT; + element++; + } else { + return -EPROTO; } - element += nargs; + idx++; } - return -EPROTO; -} - -/** - * acpi_node_get_property_reference - get a handle to the referenced object. - * @fwnode: Firmware node to get the property from. - * @propname: Name of the property. - * @index: Index of the reference to return. - * @args: Location to store the returned reference with optional arguments. - */ -int acpi_node_get_property_reference(struct fwnode_handle *fwnode, - const char *name, size_t index, - struct acpi_reference_args *args) -{ - struct acpi_device_data *data = acpi_device_data_of_node(fwnode); - - return data ? acpi_data_get_property_reference(data, name, index, args) : -EINVAL; + return -ENODATA; } -EXPORT_SYMBOL_GPL(acpi_node_get_property_reference); +EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); static int acpi_data_prop_read_single(struct acpi_device_data *data, const char *propname, diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index f4ebe39539af..35e8fbca10ad 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -520,7 +520,8 @@ static void acpi_thermal_check(void *data) if (!tz->tz_enabled) return; - thermal_zone_device_update(tz->thermal_zone); + thermal_zone_device_update(tz->thermal_zone, + THERMAL_EVENT_UNSPECIFIED); } /* sys I/F for generic thermal sysfs support */ diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 90eabaf81215..ba5f11cebee2 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1400,142 +1400,56 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance) } #endif -/* - * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer - * to single msi. - */ -static int ahci_init_msix(struct pci_dev *pdev, unsigned int n_ports, - struct ahci_host_priv *hpriv, unsigned long flags) +static int ahci_get_irq_vector(struct ata_host *host, int port) { - int nvec, i, rc; - - /* Do not init MSI-X if MSI is disabled for the device */ - if (hpriv->flags & AHCI_HFLAG_NO_MSI) - return -ENODEV; - - nvec = pci_msix_vec_count(pdev); - if (nvec < 0) - return nvec; - - /* - * Proper MSI-X implementations will have a vector per-port. - * Barring that, we prefer single-MSI over single-MSIX. If this - * check fails (not enough MSI-X vectors for all ports) we will - * be called again with the flag clear iff ahci_init_msi() - * fails. - */ - if (flags & AHCI_HFLAG_MULTI_MSIX) { - if (nvec < n_ports) - return -ENODEV; - nvec = n_ports; - } else if (nvec) { - nvec = 1; - } else { - /* - * Emit dev_err() since this was the non-legacy irq - * method of last resort. - */ - rc = -ENODEV; - goto fail; - } - - for (i = 0; i < nvec; i++) - hpriv->msix[i].entry = i; - rc = pci_enable_msix_exact(pdev, hpriv->msix, nvec); - if (rc < 0) - goto fail; - - if (nvec > 1) - hpriv->flags |= AHCI_HFLAG_MULTI_MSIX; - hpriv->irq = hpriv->msix[0].vector; /* for single msi-x */ - - return nvec; -fail: - dev_err(&pdev->dev, - "failed to enable MSI-X with error %d, # of vectors: %d\n", - rc, nvec); - - return rc; + return pci_irq_vector(to_pci_dev(host->dev), port); } static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports, struct ahci_host_priv *hpriv) { - int rc, nvec; + int nvec; if (hpriv->flags & AHCI_HFLAG_NO_MSI) return -ENODEV; - nvec = pci_msi_vec_count(pdev); - if (nvec < 0) - return nvec; - /* * If number of MSIs is less than number of ports then Sharing Last * Message mode could be enforced. In this case assume that advantage * of multipe MSIs is negated and use single MSI mode instead. */ - if (nvec < n_ports) - goto single_msi; - - rc = pci_enable_msi_exact(pdev, nvec); - if (rc == -ENOSPC) - goto single_msi; - if (rc < 0) - return rc; + nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX, + PCI_IRQ_MSIX | PCI_IRQ_MSI); + if (nvec > 0) { + if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) { + hpriv->get_irq_vector = ahci_get_irq_vector; + hpriv->flags |= AHCI_HFLAG_MULTI_MSI; + return nvec; + } - /* fallback to single MSI mode if the controller enforced MRSM mode */ - if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) { - pci_disable_msi(pdev); + /* + * Fallback to single MSI mode if the controller enforced MRSM + * mode. + */ printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n"); - goto single_msi; + pci_free_irq_vectors(pdev); } - if (nvec > 1) - hpriv->flags |= AHCI_HFLAG_MULTI_MSI; - - goto out; - -single_msi: - nvec = 1; - - rc = pci_enable_msi(pdev); - if (rc < 0) - return rc; -out: - hpriv->irq = pdev->irq; - - return nvec; -} - -static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports, - struct ahci_host_priv *hpriv) -{ - int nvec; - /* - * Try to enable per-port MSI-X. If the host is not capable - * fall back to single MSI before finally attempting single - * MSI-X. + * -ENOSPC indicated we don't have enough vectors. Don't bother trying + * a single vectors for any other error: */ - nvec = ahci_init_msix(pdev, n_ports, hpriv, AHCI_HFLAG_MULTI_MSIX); - if (nvec >= 0) + if (nvec < 0 && nvec != -ENOSPC) return nvec; - nvec = ahci_init_msi(pdev, n_ports, hpriv); - if (nvec >= 0) - return nvec; - - /* try single-msix */ - nvec = ahci_init_msix(pdev, n_ports, hpriv, 0); - if (nvec >= 0) + /* + * If the host is not capable of supporting per-port vectors, fall + * back to single MSI before finally attempting single MSI-X. + */ + nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (nvec == 1) return nvec; - - /* legacy intx interrupts */ - pci_intx(pdev, 1); - hpriv->irq = pdev->irq; - - return 0; + return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); } static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1698,11 +1612,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!host) return -ENOMEM; host->private_data = hpriv; - hpriv->msix = devm_kzalloc(&pdev->dev, - sizeof(struct msix_entry) * n_ports, GFP_KERNEL); - if (!hpriv->msix) - return -ENOMEM; - ahci_init_interrupts(pdev, n_ports, hpriv); + + if (ahci_init_msi(pdev, n_ports, hpriv) < 0) { + /* legacy intx interrupts */ + pci_intx(pdev, 1); + } + hpriv->irq = pdev->irq; if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) host->flags |= ATA_HOST_PARALLEL_SCAN; diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 70b06bcfb7e3..0cc08f892fea 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -242,12 +242,10 @@ enum { AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ #ifdef CONFIG_PCI_MSI - AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */ - AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */ + AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */ #else /* compile out MSI infrastructure */ AHCI_HFLAG_MULTI_MSI = 0, - AHCI_HFLAG_MULTI_MSIX = 0, #endif AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */ @@ -351,7 +349,6 @@ struct ahci_host_priv { * the PHY position in this array. */ struct phy **phys; - struct msix_entry *msix; /* Optional MSI-X support */ unsigned nports; /* Number of ports */ void *plat_data; /* Other platform data */ unsigned int irq; /* interrupt line */ @@ -362,22 +359,11 @@ struct ahci_host_priv { */ void (*start_engine)(struct ata_port *ap); irqreturn_t (*irq_handler)(int irq, void *dev_instance); -}; -#ifdef CONFIG_PCI_MSI -static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port) -{ - if (hpriv->flags & AHCI_HFLAG_MULTI_MSIX) - return hpriv->msix[port].vector; - else - return hpriv->irq + port; -} -#else -static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port) -{ - return hpriv->irq; -} -#endif + /* only required for per-port MSI(-X) support */ + int (*get_irq_vector)(struct ata_host *host, + int port); +}; extern int ahci_ignore_sss; diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 7bdee9bd8786..1eba8dff875e 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -30,24 +30,23 @@ #define PORT_PHY3 0xB0 #define PORT_PHY4 0xB4 #define PORT_PHY5 0xB8 +#define PORT_AXICC 0xBC #define PORT_TRANS 0xC8 /* port register default value */ #define AHCI_PORT_PHY_1_CFG 0xa003fffe #define AHCI_PORT_TRANS_CFG 0x08000029 +#define AHCI_PORT_AXICC_CFG 0x3fffffff /* for ls1021a */ #define LS1021A_PORT_PHY2 0x28183414 #define LS1021A_PORT_PHY3 0x0e080e06 #define LS1021A_PORT_PHY4 0x064a080b #define LS1021A_PORT_PHY5 0x2aa86470 +#define LS1021A_AXICC_ADDR 0xC0 #define SATA_ECC_DISABLE 0x00020000 -/* for ls1043a */ -#define LS1043A_PORT_PHY2 0x28184d1f -#define LS1043A_PORT_PHY3 0x0e081509 - enum ahci_qoriq_type { AHCI_LS1021A, AHCI_LS1043A, @@ -137,7 +136,7 @@ static struct ata_port_operations ahci_qoriq_ops = { .hardreset = ahci_qoriq_hardreset, }; -static struct ata_port_info ahci_qoriq_port_info = { +static const struct ata_port_info ahci_qoriq_port_info = { .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, @@ -162,18 +161,19 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv) writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4); writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5); writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); + writel(AHCI_PORT_AXICC_CFG, reg_base + LS1021A_AXICC_ADDR); break; case AHCI_LS1043A: writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); - writel(LS1043A_PORT_PHY2, reg_base + PORT_PHY2); - writel(LS1043A_PORT_PHY3, reg_base + PORT_PHY3); writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); + writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); break; case AHCI_LS2080A: writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); + writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); break; } @@ -221,12 +221,6 @@ static int ahci_qoriq_probe(struct platform_device *pdev) if (rc) goto disable_resources; - /* Workaround for ls2080a */ - if (qoriq_priv->type == AHCI_LS2080A) { - hpriv->flags |= AHCI_HFLAG_NO_NCQ; - ahci_qoriq_port_info.flags &= ~ATA_FLAG_NCQ; - } - rc = ahci_platform_init_host(pdev, hpriv, &ahci_qoriq_port_info, &ahci_qoriq_sht); if (rc) diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c index 8ff428fe8e0f..bc345f249555 100644 --- a/drivers/ata/ahci_st.c +++ b/drivers/ata/ahci_st.c @@ -147,6 +147,7 @@ static struct scsi_host_template ahci_platform_sht = { static int st_ahci_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct st_ahci_drv_data *drv_data; struct ahci_host_priv *hpriv; int err; @@ -170,6 +171,9 @@ static int st_ahci_probe(struct platform_device *pdev) st_ahci_configure_oob(hpriv->mmio); + of_property_read_u32(dev->of_node, + "ports-implemented", &hpriv->force_port_map); + err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, &ahci_platform_sht); if (err) { diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index dcf2c724fd06..0d028ead99e8 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -2520,7 +2520,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host, */ for (i = 0; i < host->n_ports; i++) { struct ahci_port_priv *pp = host->ports[i]->private_data; - int irq = ahci_irq_vector(hpriv, i); + int irq = hpriv->get_irq_vector(host, i); /* Do not receive interrupts sent by dummy ports */ if (!pp) { @@ -2556,10 +2556,15 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht) int irq = hpriv->irq; int rc; - if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) { + if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) { if (hpriv->irq_handler) dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n"); + if (!hpriv->get_irq_vector) { + dev_err(host->dev, + "AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n"); + return -EIO; + } rc = ahci_host_activate_multi_irqs(host, sht); } else { diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index e207b33e4ce9..9cceb4a875a5 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1159,8 +1159,6 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; - sdev->no_report_opcodes = 1; - sdev->no_write_same = 1; /* Schedule policy is determined by ->qc_defer() callback and * it needs to see every deferred qc. Set dev_blocked to 1 to @@ -3282,18 +3280,125 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) return 1; } +/** + * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim + * @cmd: SCSI command being translated + * @trmax: Maximum number of entries that will fit in sector_size bytes. + * @sector: Starting sector + * @count: Total Range of request in logical sectors + * + * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted + * descriptor. + * + * Upto 64 entries of the format: + * 63:48 Range Length + * 47:0 LBA + * + * Range Length of 0 is ignored. + * LBA's should be sorted order and not overlap. + * + * NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET + * + * Return: Number of bytes copied into sglist. + */ +static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax, + u64 sector, u32 count) +{ + struct scsi_device *sdp = cmd->device; + size_t len = sdp->sector_size; + size_t r; + __le64 *buf; + u32 i = 0; + unsigned long flags; + + WARN_ON(len > ATA_SCSI_RBUF_SIZE); + + if (len > ATA_SCSI_RBUF_SIZE) + len = ATA_SCSI_RBUF_SIZE; + + spin_lock_irqsave(&ata_scsi_rbuf_lock, flags); + buf = ((void *)ata_scsi_rbuf); + memset(buf, 0, len); + while (i < trmax) { + u64 entry = sector | + ((u64)(count > 0xffff ? 0xffff : count) << 48); + buf[i++] = __cpu_to_le64(entry); + if (count <= 0xffff) + break; + count -= 0xffff; + sector += 0xffff; + } + r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len); + spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags); + + return r; +} + +/** + * ata_format_dsm_trim_descr() - SATL Write Same to ATA SCT Write Same + * @cmd: SCSI command being translated + * @lba: Starting sector + * @num: Number of sectors to be zero'd. + * + * Rewrite the WRITE SAME payload to be an SCT Write Same formatted + * descriptor. + * NOTE: Writes a pattern (0's) in the foreground. + * + * Return: Number of bytes copied into sglist. + */ +static size_t ata_format_sct_write_same(struct scsi_cmnd *cmd, u64 lba, u64 num) +{ + struct scsi_device *sdp = cmd->device; + size_t len = sdp->sector_size; + size_t r; + u16 *buf; + unsigned long flags; + + spin_lock_irqsave(&ata_scsi_rbuf_lock, flags); + buf = ((void *)ata_scsi_rbuf); + + put_unaligned_le16(0x0002, &buf[0]); /* SCT_ACT_WRITE_SAME */ + put_unaligned_le16(0x0101, &buf[1]); /* WRITE PTRN FG */ + put_unaligned_le64(lba, &buf[2]); + put_unaligned_le64(num, &buf[6]); + put_unaligned_le32(0u, &buf[10]); /* pattern */ + + WARN_ON(len > ATA_SCSI_RBUF_SIZE); + + if (len > ATA_SCSI_RBUF_SIZE) + len = ATA_SCSI_RBUF_SIZE; + + r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len); + spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags); + + return r; +} + +/** + * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same + * @qc: Command to be translated + * + * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or + * an SCT Write Same command. + * Based on WRITE SAME has the UNMAP flag + * When set translate to DSM TRIM + * When clear translate to SCT Write Same + */ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) { struct ata_taskfile *tf = &qc->tf; struct scsi_cmnd *scmd = qc->scsicmd; + struct scsi_device *sdp = scmd->device; + size_t len = sdp->sector_size; struct ata_device *dev = qc->dev; const u8 *cdb = scmd->cmnd; u64 block; u32 n_block; + const u32 trmax = len >> 3; u32 size; - void *buf; u16 fp; u8 bp = 0xff; + u8 unmap = cdb[1] & 0x8; /* we may not issue DMA commands if no DMA mode is set */ if (unlikely(!dev->dma_mode)) @@ -3305,11 +3410,26 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) } scsi_16_lba_len(cdb, &block, &n_block); - /* for now we only support WRITE SAME with the unmap bit set */ - if (unlikely(!(cdb[1] & 0x8))) { - fp = 1; - bp = 3; - goto invalid_fld; + if (unmap) { + /* If trim is not enabled the cmd is invalid. */ + if ((dev->horkage & ATA_HORKAGE_NOTRIM) || + !ata_id_has_trim(dev->id)) { + fp = 1; + bp = 3; + goto invalid_fld; + } + /* If the request is too large the cmd is invalid */ + if (n_block > 0xffff * trmax) { + fp = 2; + goto invalid_fld; + } + } else { + /* If write same is not available the cmd is invalid */ + if (!ata_id_sct_write_same(dev->id)) { + fp = 1; + bp = 3; + goto invalid_fld; + } } /* @@ -3319,32 +3439,54 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) if (!scsi_sg_count(scmd)) goto invalid_param_len; - buf = page_address(sg_page(scsi_sglist(scmd))); - - if (n_block <= 65535 * ATA_MAX_TRIM_RNUM) { - size = ata_set_lba_range_entries(buf, ATA_MAX_TRIM_RNUM, block, n_block); - } else { - fp = 2; - goto invalid_fld; - } + /* + * size must match sector size in bytes + * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count) + * is defined as number of 512 byte blocks to be transferred. + */ + if (unmap) { + size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block); + if (size != len) + goto invalid_param_len; - if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) { - /* Newer devices support queued TRIM commands */ - tf->protocol = ATA_PROT_NCQ; - tf->command = ATA_CMD_FPDMA_SEND; - tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; - tf->nsect = qc->tag << 3; - tf->hob_feature = (size / 512) >> 8; - tf->feature = size / 512; + if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) { + /* Newer devices support queued TRIM commands */ + tf->protocol = ATA_PROT_NCQ; + tf->command = ATA_CMD_FPDMA_SEND; + tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; + tf->nsect = qc->tag << 3; + tf->hob_feature = (size / 512) >> 8; + tf->feature = size / 512; - tf->auxiliary = 1; + tf->auxiliary = 1; + } else { + tf->protocol = ATA_PROT_DMA; + tf->hob_feature = 0; + tf->feature = ATA_DSM_TRIM; + tf->hob_nsect = (size / 512) >> 8; + tf->nsect = size / 512; + tf->command = ATA_CMD_DSM; + } } else { - tf->protocol = ATA_PROT_DMA; + size = ata_format_sct_write_same(scmd, block, n_block); + if (size != len) + goto invalid_param_len; + tf->hob_feature = 0; - tf->feature = ATA_DSM_TRIM; - tf->hob_nsect = (size / 512) >> 8; - tf->nsect = size / 512; - tf->command = ATA_CMD_DSM; + tf->feature = 0; + tf->hob_nsect = 0; + tf->nsect = 1; + tf->lbah = 0; + tf->lbam = 0; + tf->lbal = ATA_CMD_STANDBYNOW1; + tf->hob_lbah = 0; + tf->hob_lbam = 0; + tf->hob_lbal = 0; + tf->device = ATA_CMD_STANDBYNOW1; + tf->protocol = ATA_PROT_DMA; + tf->command = ATA_CMD_WRITE_LOG_DMA_EXT; + if (unlikely(dev->flags & ATA_DFLAG_PIO)) + tf->command = ATA_CMD_WRITE_LOG_EXT; } tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | @@ -3368,6 +3510,76 @@ invalid_opcode: } /** + * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN + * @args: device MAINTENANCE_IN data / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * + * Yields a subset to satisfy scsi_report_opcode() + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) +{ + struct ata_device *dev = args->dev; + u8 *cdb = args->cmd->cmnd; + u8 supported = 0; + unsigned int err = 0; + + if (cdb[2] != 1) { + ata_dev_warn(dev, "invalid command format %d\n", cdb[2]); + err = 2; + goto out; + } + switch (cdb[3]) { + case INQUIRY: + case MODE_SENSE: + case MODE_SENSE_10: + case READ_CAPACITY: + case SERVICE_ACTION_IN_16: + case REPORT_LUNS: + case REQUEST_SENSE: + case SYNCHRONIZE_CACHE: + case REZERO_UNIT: + case SEEK_6: + case SEEK_10: + case TEST_UNIT_READY: + case SEND_DIAGNOSTIC: + case MAINTENANCE_IN: + case READ_6: + case READ_10: + case READ_16: + case WRITE_6: + case WRITE_10: + case WRITE_16: + case ATA_12: + case ATA_16: + case VERIFY: + case VERIFY_16: + case MODE_SELECT: + case MODE_SELECT_10: + case START_STOP: + supported = 3; + break; + case WRITE_SAME_16: + if (!ata_id_sct_write_same(dev->id)) + break; + /* fallthrough: if SCT ... only enable for ZBC */ + case ZBC_IN: + case ZBC_OUT: + if (ata_id_zoned_cap(dev->id) || + dev->class == ATA_DEV_ZAC) + supported = 3; + break; + default: + break; + } +out: + rbuf[1] = supported; /* supported */ + return err; +} + +/** * ata_scsi_report_zones_complete - convert ATA output * @qc: command structure returning the data * @@ -3610,7 +3822,7 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc, { struct ata_taskfile *tf = &qc->tf; struct ata_device *dev = qc->dev; - char mpage[CACHE_MPAGE_LEN]; + u8 mpage[CACHE_MPAGE_LEN]; u8 wce; int i; @@ -3666,7 +3878,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc, const u8 *buf, int len, u16 *fp) { struct ata_device *dev = qc->dev; - char mpage[CONTROL_MPAGE_LEN]; + u8 mpage[CONTROL_MPAGE_LEN]; u8 d_sense; int i; @@ -3701,8 +3913,6 @@ static int ata_mselect_control(struct ata_queued_cmd *qc, dev->flags |= ATA_DFLAG_D_SENSE; else dev->flags &= ~ATA_DFLAG_D_SENSE; - qc->scsicmd->result = SAM_STAT_GOOD; - qc->scsicmd->scsi_done(qc->scsicmd); return 0; } @@ -3829,6 +4039,8 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) if (ata_mselect_control(qc, p, pg_len, &fp) < 0) { fp += hdr_len + bd_len; goto invalid_param; + } else { + goto skip; /* No ATA command to send */ } break; default: /* invalid page code */ @@ -4147,6 +4359,13 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) ata_scsi_invalid_field(dev, cmd, 1); break; + case MAINTENANCE_IN: + if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES) + ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in); + else + ata_scsi_invalid_field(dev, cmd, 1); + break; + /* all other commands */ default: ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0); @@ -4179,7 +4398,6 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) shost->max_lun = 1; shost->max_channel = 1; shost->max_cmd_len = 16; - shost->no_write_same = 1; /* Schedule policy is determined by ->qc_defer() * callback and it needs to see every deferred qc. diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c index 9f27b14009f9..1611e0e8d767 100644 --- a/drivers/ata/pata_at91.c +++ b/drivers/ata/pata_at91.c @@ -347,10 +347,8 @@ static int at91sam9_smc_fields_init(struct device *dev) field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC); fields.mode = devm_regmap_field_alloc(dev, smc, field); - if (IS_ERR(fields.mode)) - return PTR_ERR(fields.mode); - return 0; + return PTR_ERR_OR_ZERO(fields.mode); } static int pata_at91_probe(struct platform_device *pdev) diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 27245957eee3..475a00669427 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -152,8 +152,7 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev) div = 8; T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate()); - if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T)) - BUG(); + BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T)); t1 = timing.setup; if (t1) diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 745489a1c86a..efc48bf89d51 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -1727,15 +1727,13 @@ static int mv_port_start(struct ata_port *ap) return -ENOMEM; ap->private_data = pp; - pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); + pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); if (!pp->crqb) return -ENOMEM; - memset(pp->crqb, 0, MV_CRQB_Q_SZ); - pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); + pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); if (!pp->crpb) goto out_port_free_dma_mem; - memset(pp->crpb, 0, MV_CRPB_Q_SZ); /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index c07e725ea93d..10e1b9eee10e 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -119,4 +119,13 @@ config CFAG12864B_RATE If you compile this as a module, you can still override this value using the module parameters. +config IMG_ASCII_LCD + tristate "Imagination Technologies ASCII LCD Display" + default y if MIPS_MALTA || MIPS_SEAD3 + select SYSCON + help + Enable this to support the simple ASCII LCD displays found on + development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3 + from Imagination Technologies. + endif # AUXDISPLAY diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile index 8a8936a468b9..3127175c89df 100644 --- a/drivers/auxdisplay/Makefile +++ b/drivers/auxdisplay/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_KS0108) += ks0108.o obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o +obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c new file mode 100644 index 000000000000..bf43b5d2aafc --- /dev/null +++ b/drivers/auxdisplay/img-ascii-lcd.c @@ -0,0 +1,443 @@ +/* + * Copyright (C) 2016 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <generated/utsrelease.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/sysfs.h> + +struct img_ascii_lcd_ctx; + +/** + * struct img_ascii_lcd_config - Configuration information about an LCD model + * @num_chars: the number of characters the LCD can display + * @external_regmap: true if registers are in a system controller, else false + * @update: function called to update the LCD + */ +struct img_ascii_lcd_config { + unsigned int num_chars; + bool external_regmap; + void (*update)(struct img_ascii_lcd_ctx *ctx); +}; + +/** + * struct img_ascii_lcd_ctx - Private data structure + * @pdev: the ASCII LCD platform device + * @base: the base address of the LCD registers + * @regmap: the regmap through which LCD registers are accessed + * @offset: the offset within regmap to the start of the LCD registers + * @cfg: pointer to the LCD model configuration + * @message: the full message to display or scroll on the LCD + * @message_len: the length of the @message string + * @scroll_pos: index of the first character of @message currently displayed + * @scroll_rate: scroll interval in jiffies + * @timer: timer used to implement scrolling + * @curr: the string currently displayed on the LCD + */ +struct img_ascii_lcd_ctx { + struct platform_device *pdev; + union { + void __iomem *base; + struct regmap *regmap; + }; + u32 offset; + const struct img_ascii_lcd_config *cfg; + char *message; + unsigned int message_len; + unsigned int scroll_pos; + unsigned int scroll_rate; + struct timer_list timer; + char curr[] __aligned(8); +}; + +/* + * MIPS Boston development board + */ + +static void boston_update(struct img_ascii_lcd_ctx *ctx) +{ + ulong val; + +#if BITS_PER_LONG == 64 + val = *((u64 *)&ctx->curr[0]); + __raw_writeq(val, ctx->base); +#elif BITS_PER_LONG == 32 + val = *((u32 *)&ctx->curr[0]); + __raw_writel(val, ctx->base); + val = *((u32 *)&ctx->curr[4]); + __raw_writel(val, ctx->base + 4); +#else +# error Not 32 or 64 bit +#endif +} + +static struct img_ascii_lcd_config boston_config = { + .num_chars = 8, + .update = boston_update, +}; + +/* + * MIPS Malta development board + */ + +static void malta_update(struct img_ascii_lcd_ctx *ctx) +{ + unsigned int i; + int err; + + for (i = 0; i < ctx->cfg->num_chars; i++) { + err = regmap_write(ctx->regmap, + ctx->offset + (i * 8), ctx->curr[i]); + if (err) + break; + } + + if (unlikely(err)) + pr_err_ratelimited("Failed to update LCD display: %d\n", err); +} + +static struct img_ascii_lcd_config malta_config = { + .num_chars = 8, + .external_regmap = true, + .update = malta_update, +}; + +/* + * MIPS SEAD3 development board + */ + +enum { + SEAD3_REG_LCD_CTRL = 0x00, +#define SEAD3_REG_LCD_CTRL_SETDRAM BIT(7) + SEAD3_REG_LCD_DATA = 0x08, + SEAD3_REG_CPLD_STATUS = 0x10, +#define SEAD3_REG_CPLD_STATUS_BUSY BIT(0) + SEAD3_REG_CPLD_DATA = 0x18, +#define SEAD3_REG_CPLD_DATA_BUSY BIT(7) +}; + +static int sead3_wait_sm_idle(struct img_ascii_lcd_ctx *ctx) +{ + unsigned int status; + int err; + + do { + err = regmap_read(ctx->regmap, + ctx->offset + SEAD3_REG_CPLD_STATUS, + &status); + if (err) + return err; + } while (status & SEAD3_REG_CPLD_STATUS_BUSY); + + return 0; + +} + +static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx) +{ + unsigned int cpld_data; + int err; + + err = sead3_wait_sm_idle(ctx); + if (err) + return err; + + do { + err = regmap_read(ctx->regmap, + ctx->offset + SEAD3_REG_LCD_CTRL, + &cpld_data); + if (err) + return err; + + err = sead3_wait_sm_idle(ctx); + if (err) + return err; + + err = regmap_read(ctx->regmap, + ctx->offset + SEAD3_REG_CPLD_DATA, + &cpld_data); + if (err) + return err; + } while (cpld_data & SEAD3_REG_CPLD_DATA_BUSY); + + return 0; +} + +static void sead3_update(struct img_ascii_lcd_ctx *ctx) +{ + unsigned int i; + int err; + + for (i = 0; i < ctx->cfg->num_chars; i++) { + err = sead3_wait_lcd_idle(ctx); + if (err) + break; + + err = regmap_write(ctx->regmap, + ctx->offset + SEAD3_REG_LCD_CTRL, + SEAD3_REG_LCD_CTRL_SETDRAM | i); + if (err) + break; + + err = sead3_wait_lcd_idle(ctx); + if (err) + break; + + err = regmap_write(ctx->regmap, + ctx->offset + SEAD3_REG_LCD_DATA, + ctx->curr[i]); + if (err) + break; + } + + if (unlikely(err)) + pr_err_ratelimited("Failed to update LCD display: %d\n", err); +} + +static struct img_ascii_lcd_config sead3_config = { + .num_chars = 16, + .external_regmap = true, + .update = sead3_update, +}; + +static const struct of_device_id img_ascii_lcd_matches[] = { + { .compatible = "img,boston-lcd", .data = &boston_config }, + { .compatible = "mti,malta-lcd", .data = &malta_config }, + { .compatible = "mti,sead3-lcd", .data = &sead3_config }, +}; + +/** + * img_ascii_lcd_scroll() - scroll the display by a character + * @arg: really a pointer to the private data structure + * + * Scroll the current message along the LCD by one character, rearming the + * timer if required. + */ +static void img_ascii_lcd_scroll(unsigned long arg) +{ + struct img_ascii_lcd_ctx *ctx = (struct img_ascii_lcd_ctx *)arg; + unsigned int i, ch = ctx->scroll_pos; + unsigned int num_chars = ctx->cfg->num_chars; + + /* update the current message string */ + for (i = 0; i < num_chars;) { + /* copy as many characters from the string as possible */ + for (; i < num_chars && ch < ctx->message_len; i++, ch++) + ctx->curr[i] = ctx->message[ch]; + + /* wrap around to the start of the string */ + ch = 0; + } + + /* update the LCD */ + ctx->cfg->update(ctx); + + /* move on to the next character */ + ctx->scroll_pos++; + ctx->scroll_pos %= ctx->message_len; + + /* rearm the timer */ + if (ctx->message_len > ctx->cfg->num_chars) + mod_timer(&ctx->timer, jiffies + ctx->scroll_rate); +} + +/** + * img_ascii_lcd_display() - set the message to be displayed + * @ctx: pointer to the private data structure + * @msg: the message to display + * @count: length of msg, or -1 + * + * Display a new message @msg on the LCD. @msg can be longer than the number of + * characters the LCD can display, in which case it will begin scrolling across + * the LCD display. + * + * Return: 0 on success, -ENOMEM on memory allocation failure + */ +static int img_ascii_lcd_display(struct img_ascii_lcd_ctx *ctx, + const char *msg, ssize_t count) +{ + char *new_msg; + + /* stop the scroll timer */ + del_timer_sync(&ctx->timer); + + if (count == -1) + count = strlen(msg); + + /* if the string ends with a newline, trim it */ + if (msg[count - 1] == '\n') + count--; + + new_msg = devm_kmalloc(&ctx->pdev->dev, count + 1, GFP_KERNEL); + if (!new_msg) + return -ENOMEM; + + memcpy(new_msg, msg, count); + new_msg[count] = 0; + + if (ctx->message) + devm_kfree(&ctx->pdev->dev, ctx->message); + + ctx->message = new_msg; + ctx->message_len = count; + ctx->scroll_pos = 0; + + /* update the LCD */ + img_ascii_lcd_scroll((unsigned long)ctx); + + return 0; +} + +/** + * message_show() - read message via sysfs + * @dev: the LCD device + * @attr: the LCD message attribute + * @buf: the buffer to read the message into + * + * Read the current message being displayed or scrolled across the LCD display + * into @buf, for reads from sysfs. + * + * Return: the number of characters written to @buf + */ +static ssize_t message_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", ctx->message); +} + +/** + * message_store() - write a new message via sysfs + * @dev: the LCD device + * @attr: the LCD message attribute + * @buf: the buffer containing the new message + * @count: the size of the message in @buf + * + * Write a new message to display or scroll across the LCD display from sysfs. + * + * Return: the size of the message on success, else -ERRNO + */ +static ssize_t message_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev); + int err; + + err = img_ascii_lcd_display(ctx, buf, count); + return err ?: count; +} + +static DEVICE_ATTR_RW(message); + +/** + * img_ascii_lcd_probe() - probe an LCD display device + * @pdev: the LCD platform device + * + * Probe an LCD display device, ensuring that we have the required resources in + * order to access the LCD & setting up private data as well as sysfs files. + * + * Return: 0 on success, else -ERRNO + */ +static int img_ascii_lcd_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + const struct img_ascii_lcd_config *cfg; + struct img_ascii_lcd_ctx *ctx; + struct resource *res; + int err; + + match = of_match_device(img_ascii_lcd_matches, &pdev->dev); + if (!match) + return -ENODEV; + + cfg = match->data; + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx) + cfg->num_chars, + GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + if (cfg->external_regmap) { + ctx->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node); + if (IS_ERR(ctx->regmap)) + return PTR_ERR(ctx->regmap); + + if (of_property_read_u32(pdev->dev.of_node, "offset", + &ctx->offset)) + return -EINVAL; + } else { + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctx->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ctx->base)) + return PTR_ERR(ctx->base); + } + + ctx->pdev = pdev; + ctx->cfg = cfg; + ctx->message = NULL; + ctx->scroll_pos = 0; + ctx->scroll_rate = HZ / 2; + + /* initialise a timer for scrolling the message */ + init_timer(&ctx->timer); + ctx->timer.function = img_ascii_lcd_scroll; + ctx->timer.data = (unsigned long)ctx; + + platform_set_drvdata(pdev, ctx); + + /* display a default message */ + err = img_ascii_lcd_display(ctx, "Linux " UTS_RELEASE " ", -1); + if (err) + goto out_del_timer; + + err = device_create_file(&pdev->dev, &dev_attr_message); + if (err) + goto out_del_timer; + + return 0; +out_del_timer: + del_timer_sync(&ctx->timer); + return err; +} + +/** + * img_ascii_lcd_remove() - remove an LCD display device + * @pdev: the LCD platform device + * + * Remove an LCD display device, freeing private resources & ensuring that the + * driver stops using the LCD display registers. + * + * Return: 0 + */ +static int img_ascii_lcd_remove(struct platform_device *pdev) +{ + struct img_ascii_lcd_ctx *ctx = platform_get_drvdata(pdev); + + device_remove_file(&pdev->dev, &dev_attr_message); + del_timer_sync(&ctx->timer); + return 0; +} + +static struct platform_driver img_ascii_lcd_driver = { + .driver = { + .name = "img-ascii-lcd", + .of_match_table = img_ascii_lcd_matches, + }, + .probe = img_ascii_lcd_probe, + .remove = img_ascii_lcd_remove, +}; +module_platform_driver(img_ascii_lcd_driver); diff --git a/drivers/char/random.c b/drivers/char/random.c index d131e152c8ce..d6876d506220 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -479,8 +479,8 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf, static void crng_reseed(struct crng_state *crng, struct entropy_store *r); static void push_to_pool(struct work_struct *work); -static __u32 input_pool_data[INPUT_POOL_WORDS]; -static __u32 blocking_pool_data[OUTPUT_POOL_WORDS]; +static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy; +static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy; static struct entropy_store input_pool = { .poolinfo = &poolinfo_table[0], diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c index 480a777db577..7c19d9b22785 100644 --- a/drivers/char/tb0219.c +++ b/drivers/char/tb0219.c @@ -21,6 +21,7 @@ #include <linux/fs.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/uaccess.h> #include <asm/io.h> #include <asm/reboot.h> diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 1b2f28f69a81..4852d9efe74e 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -80,11 +80,17 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, { struct cppc_cpudata *cpu; struct cpufreq_freqs freqs; + u32 desired_perf; int ret = 0; cpu = all_cpu_data[policy->cpu]; - cpu->perf_ctrls.desired_perf = (u64)target_freq * policy->max / cppc_dmi_max_khz; + desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz; + /* Return if it is exactly the same perf */ + if (desired_perf == cpu->perf_ctrls.desired_perf) + return ret; + + cpu->perf_ctrls.desired_perf = desired_perf; freqs.old = policy->cur; freqs.new = target_freq; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 18da4f8051d3..13475890d792 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -17,6 +17,7 @@ struct cs_policy_dbs_info { struct policy_dbs_info policy_dbs; unsigned int down_skip; + unsigned int requested_freq; }; static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs) @@ -61,6 +62,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) { struct policy_dbs_info *policy_dbs = policy->governor_data; struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs); + unsigned int requested_freq = dbs_info->requested_freq; struct dbs_data *dbs_data = policy_dbs->dbs_data; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int load = dbs_update(policy); @@ -72,10 +74,16 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) if (cs_tuners->freq_step == 0) goto out; + /* + * If requested_freq is out of range, it is likely that the limits + * changed in the meantime, so fall back to current frequency in that + * case. + */ + if (requested_freq > policy->max || requested_freq < policy->min) + requested_freq = policy->cur; + /* Check for frequency increase */ if (load > dbs_data->up_threshold) { - unsigned int requested_freq = policy->cur; - dbs_info->down_skip = 0; /* if we are already at full speed then break out early */ @@ -83,8 +91,11 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) goto out; requested_freq += get_freq_target(cs_tuners, policy); + if (requested_freq > policy->max) + requested_freq = policy->max; __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H); + dbs_info->requested_freq = requested_freq; goto out; } @@ -95,7 +106,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) /* Check for frequency decrease */ if (load < cs_tuners->down_threshold) { - unsigned int freq_target, requested_freq = policy->cur; + unsigned int freq_target; /* * if we cannot reduce the frequency anymore, break out early */ @@ -109,6 +120,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) requested_freq = policy->min; __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L); + dbs_info->requested_freq = requested_freq; } out: @@ -287,6 +299,7 @@ static void cs_start(struct cpufreq_policy *policy) struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data); dbs_info->down_skip = 0; + dbs_info->requested_freq = policy->cur; } static struct dbs_governor cs_governor = { diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 806f2039571e..f535f8123258 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -225,7 +225,7 @@ struct cpudata { static struct cpudata **all_cpu_data; /** - * struct pid_adjust_policy - Stores static PID configuration data + * struct pstate_adjust_policy - Stores static PID configuration data * @sample_rate_ms: PID calculation sample rate in ms * @sample_rate_ns: Sample rate calculation in ns * @deadband: PID deadband @@ -562,12 +562,12 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask) int min, hw_min, max, hw_max, cpu, range, adj_range; u64 value, cap; - rdmsrl(MSR_HWP_CAPABILITIES, cap); - hw_min = HWP_LOWEST_PERF(cap); - hw_max = HWP_HIGHEST_PERF(cap); - range = hw_max - hw_min; - for_each_cpu(cpu, cpumask) { + rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); + hw_min = HWP_LOWEST_PERF(cap); + hw_max = HWP_HIGHEST_PERF(cap); + range = hw_max - hw_min; + rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); adj_range = limits->min_perf_pct * range / 100; min = hw_min + adj_range; @@ -1232,6 +1232,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) { struct sample *sample = &cpu->sample; int32_t busy_frac, boost; + int target, avg_pstate; busy_frac = div_fp(sample->mperf, sample->tsc); @@ -1242,7 +1243,26 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) busy_frac = boost; sample->busy_scaled = busy_frac * 100; - return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled); + + target = limits->no_turbo || limits->turbo_disabled ? + cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; + target += target >> 2; + target = mul_fp(target, busy_frac); + if (target < cpu->pstate.min_pstate) + target = cpu->pstate.min_pstate; + + /* + * If the average P-state during the previous cycle was higher than the + * current target, add 50% of the difference to the target to reduce + * possible performance oscillations and offset possible performance + * loss related to moving the workload from one CPU to another within + * a package/module. + */ + avg_pstate = get_avg_pstate(cpu); + if (avg_pstate > target) + target += (avg_pstate - target) >> 1; + + return target; } static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) @@ -1251,10 +1271,11 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) u64 duration_ns; /* - * perf_scaled is the average performance during the last sampling - * period scaled by the ratio of the maximum P-state to the P-state - * requested last time (in percent). That measures the system's - * response to the previous P-state selection. + * perf_scaled is the ratio of the average P-state during the last + * sampling period to the P-state requested last time (in percent). + * + * That measures the system's response to the previous P-state + * selection. */ max_pstate = cpu->pstate.max_pstate_physical; current_pstate = cpu->pstate.current_pstate; diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips index 4102be01d06a..512ee37b374b 100644 --- a/drivers/cpuidle/Kconfig.mips +++ b/drivers/cpuidle/Kconfig.mips @@ -5,7 +5,7 @@ config MIPS_CPS_CPUIDLE bool "CPU Idle driver for MIPS CPS platforms" depends on CPU_IDLE && MIPS_CPS depends on SYS_SUPPORTS_MIPS_CPS - select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT + select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT || CPU_MIPSR6 select GENERIC_CLOCKEVENTS_BROADCAST if SMP select MIPS_CPS_PM default y diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c index 1adb6980b707..926ba9871c62 100644 --- a/drivers/cpuidle/cpuidle-cps.c +++ b/drivers/cpuidle/cpuidle-cps.c @@ -163,7 +163,7 @@ static int __init cps_cpuidle_init(void) core = cpu_data[cpu].core; device = &per_cpu(cpuidle_dev, cpu); device->cpu = cpu; -#ifdef CONFIG_MIPS_MT +#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]); #endif diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 478006b7764a..bf3ea7603a58 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -137,6 +137,10 @@ static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) cur_time = jiffies; + /* Immediately exit if previous_freq is not initialized yet. */ + if (!devfreq->previous_freq) + goto out; + prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq); if (prev_lev < 0) { ret = prev_lev; @@ -594,17 +598,19 @@ struct devfreq *devfreq_add_device(struct device *dev, if (devfreq->governor) err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, NULL); - mutex_unlock(&devfreq_list_lock); if (err) { dev_err(dev, "%s: Unable to start governor for the device\n", __func__); goto err_init; } + mutex_unlock(&devfreq_list_lock); return devfreq; err_init: list_del(&devfreq->node); + mutex_unlock(&devfreq_list_lock); + device_unregister(&devfreq->dev); err_out: return ERR_PTR(err); diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig index 0fdae8608961..cd949800eed9 100644 --- a/drivers/devfreq/event/Kconfig +++ b/drivers/devfreq/event/Kconfig @@ -17,6 +17,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver" depends on ARCH_EXYNOS || COMPILE_TEST select PM_OPP + select REGMAP_MMIO help This add the devfreq-event driver for Exynos SoC. It provides NoC (Network on Chip) Probe counters to measure the bandwidth of AXI bus. diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c index a5841403bde8..49e712aca0c1 100644 --- a/drivers/devfreq/event/exynos-nocp.c +++ b/drivers/devfreq/event/exynos-nocp.c @@ -176,9 +176,6 @@ static int exynos_nocp_get_event(struct devfreq_event_dev *edev, return 0; out: - edata->load_count = 0; - edata->total_count = 0; - dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n"); return ret; diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 631c977b0da5..180f0a96528c 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -566,6 +566,11 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused) lynx->registers = ioremap_nocache(pci_resource_start(dev, 0), PCILYNX_MAX_REGISTER); + if (lynx->registers == NULL) { + dev_err(&dev->dev, "Failed to map registers\n"); + ret = -ENOMEM; + goto fail_deallocate_lynx; + } lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device, sizeof(struct pcl), &lynx->rcv_start_pcl_bus); @@ -578,7 +583,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused) lynx->rcv_buffer == NULL) { dev_err(&dev->dev, "Failed to allocate receive buffer\n"); ret = -ENOMEM; - goto fail_deallocate; + goto fail_deallocate_buffers; } lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus); lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID); @@ -641,7 +646,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused) dev_err(&dev->dev, "Failed to allocate shared interrupt %d\n", dev->irq); ret = -EIO; - goto fail_deallocate; + goto fail_deallocate_buffers; } lynx->misc.parent = &dev->dev; @@ -668,7 +673,7 @@ fail_free_irq: reg_write(lynx, PCI_INT_ENABLE, 0); free_irq(lynx->pci_device->irq, lynx); -fail_deallocate: +fail_deallocate_buffers: if (lynx->rcv_start_pcl) pci_free_consistent(lynx->pci_device, sizeof(struct pcl), lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); @@ -679,6 +684,8 @@ fail_deallocate: pci_free_consistent(lynx->pci_device, PAGE_SIZE, lynx->rcv_buffer, lynx->rcv_buffer_bus); iounmap(lynx->registers); + +fail_deallocate_lynx: kfree(lynx); fail_disable: diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 45c8817d068c..e422568e14ad 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -794,6 +794,22 @@ static int pca953x_probe(struct i2c_client *client, } mutex_init(&chip->i2c_lock); + /* + * In case we have an i2c-mux controlled by a GPIO provided by an + * expander using the same driver higher on the device tree, read the + * i2c adapter nesting depth and use the retrieved value as lockdep + * subclass for chip->i2c_lock. + * + * REVISIT: This solution is not complete. It protects us from lockdep + * false positives when the expander controlling the i2c-mux is on + * a different level on the device tree, but not when it's on the same + * level on a different branch (in which case the subclass number + * would be the same). + * + * TODO: Once a correct solution is developed, a similar fix should be + * applied to all other i2c-controlled GPIO expanders (and potentially + * regmap-i2c). + */ lockdep_set_subclass(&chip->i2c_lock, i2c_adapter_depth(client->adapter)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 2e3a0543760d..e3281d4e3e41 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -765,7 +765,7 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) return ret; } -static void amdgpu_connector_destroy(struct drm_connector *connector) +static void amdgpu_connector_unregister(struct drm_connector *connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); @@ -773,6 +773,12 @@ static void amdgpu_connector_destroy(struct drm_connector *connector) drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); amdgpu_connector->ddc_bus->has_aux = false; } +} + +static void amdgpu_connector_destroy(struct drm_connector *connector) +{ + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + amdgpu_connector_free_edid(connector); kfree(amdgpu_connector->con_priv); drm_connector_unregister(connector); @@ -826,6 +832,7 @@ static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = { .dpms = drm_helper_connector_dpms, .detect = amdgpu_connector_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .set_property = amdgpu_connector_set_lcd_property, }; @@ -936,6 +943,7 @@ static const struct drm_connector_funcs amdgpu_connector_vga_funcs = { .dpms = drm_helper_connector_dpms, .detect = amdgpu_connector_vga_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .set_property = amdgpu_connector_set_property, }; @@ -1203,6 +1211,7 @@ static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = { .detect = amdgpu_connector_dvi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = amdgpu_connector_set_property, + .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .force = amdgpu_connector_dvi_force, }; @@ -1493,6 +1502,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = { .detect = amdgpu_connector_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = amdgpu_connector_set_property, + .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .force = amdgpu_connector_dvi_force, }; @@ -1502,6 +1512,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { .detect = amdgpu_connector_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = amdgpu_connector_set_lcd_property, + .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .force = amdgpu_connector_dvi_force, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index e203e5561107..a5e2fcbef0f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) ctx->rings[i].sequence = 1; ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; } + + ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); + /* create context entity for each ring */ for (i = 0; i < adev->num_rings; i++) { struct amdgpu_ring *ring = adev->rings[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7dbe85d67d26..b4f4a9239069 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1408,16 +1408,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_block_status[i].valid) continue; - if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD || - adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE) - continue; - /* enable clockgating to save power */ - r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_GATE); - if (r) { - DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); - return r; - } if (adev->ip_blocks[i].funcs->late_init) { r = adev->ip_blocks[i].funcs->late_init((void *)adev); if (r) { @@ -1426,6 +1416,18 @@ static int amdgpu_late_init(struct amdgpu_device *adev) } adev->ip_block_status[i].late_initialized = true; } + /* skip CG for VCE/UVD, it's handled specially */ + if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD && + adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) { + /* enable clockgating to save power */ + r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_GATE); + if (r) { + DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].funcs->name, r); + return r; + } + } } return 0; @@ -1435,6 +1437,30 @@ static int amdgpu_fini(struct amdgpu_device *adev) { int i, r; + /* need to disable SMC first */ + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].hw) + continue; + if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) { + /* ungate blocks before hw fini so that we can shutdown the blocks safely */ + r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_UNGATE); + if (r) { + DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].funcs->name, r); + return r; + } + r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].funcs->name, r); + } + adev->ip_block_status[i].hw = false; + break; + } + } + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_block_status[i].hw) continue; @@ -2073,7 +2099,8 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) if (!adev->ip_block_status[i].valid) continue; if (adev->ip_blocks[i].funcs->check_soft_reset) - adev->ip_blocks[i].funcs->check_soft_reset(adev); + adev->ip_block_status[i].hang = + adev->ip_blocks[i].funcs->check_soft_reset(adev); if (adev->ip_block_status[i].hang) { DRM_INFO("IP block:%d is hang!\n", i); asic_hang = true; @@ -2102,12 +2129,20 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) static bool amdgpu_need_full_reset(struct amdgpu_device *adev) { - if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || - adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || - adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || - adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) { - DRM_INFO("Some block need full reset!\n"); - return true; + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) || + (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) || + (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) || + (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) { + if (adev->ip_block_status[i].hang) { + DRM_INFO("Some block need full reset!\n"); + return true; + } + } } return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index fe36caf1b7d7..14f57d9915e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, printk("\n"); } + u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_crtc *crtc; struct amdgpu_crtc *amdgpu_crtc; - u32 line_time_us, vblank_lines; + u32 vblank_in_pixels; u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { amdgpu_crtc = to_amdgpu_crtc(crtc); if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { - line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / - amdgpu_crtc->hw_mode.clock; - vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - + vblank_in_pixels = + amdgpu_crtc->hw_mode.crtc_htotal * + (amdgpu_crtc->hw_mode.crtc_vblank_end - amdgpu_crtc->hw_mode.crtc_vdisplay + - (amdgpu_crtc->v_border * 2); - vblank_time_us = vblank_lines * line_time_us; + (amdgpu_crtc->v_border * 2)); + + vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index e1fa8731d1e2..3cb5e903cd62 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -345,8 +345,8 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, ent = debugfs_create_file(name, S_IFREG | S_IRUGO, root, ring, &amdgpu_debugfs_ring_fops); - if (IS_ERR(ent)) - return PTR_ERR(ent); + if (!ent) + return -ENOMEM; i_size_write(ent->d_inode, ring->ring_size + 12); ring->ent = ent; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index f80a0834e889..3c082e143730 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -1514,14 +1514,16 @@ static int cz_dpm_set_powergating_state(void *handle, return 0; } -/* borrowed from KV, need future unify */ static int cz_dpm_get_temperature(struct amdgpu_device *adev) { int actual_temp = 0; - uint32_t temp = RREG32_SMC(0xC0300E0C); + uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP); + uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP); - if (temp) + if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL)) actual_temp = 1000 * ((temp / 8) - 49); + else + actual_temp = 1000 * (temp / 8); return actual_temp; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 613ebb7ed50f..4108c686aa7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -3188,16 +3188,11 @@ static int dce_v10_0_wait_for_idle(void *handle) return 0; } -static int dce_v10_0_check_soft_reset(void *handle) +static bool dce_v10_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (dce_v10_0_is_display_hung(adev)) - adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true; - else - adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false; - - return 0; + return dce_v10_0_is_display_hung(adev); } static int dce_v10_0_soft_reset(void *handle) @@ -3205,9 +3200,6 @@ static int dce_v10_0_soft_reset(void *handle) u32 srbm_soft_reset = 0, tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) - return 0; - if (dce_v10_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 6c6ff57b1c95..ee6a48a09214 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4087,14 +4087,21 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev) static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) { int r; + u32 tmp; gfx_v8_0_rlc_stop(adev); /* disable CG */ - WREG32(mmRLC_CGCG_CGLS_CTRL, 0); + tmp = RREG32(mmRLC_CGCG_CGLS_CTRL); + tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | + RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); + WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); if (adev->asic_type == CHIP_POLARIS11 || - adev->asic_type == CHIP_POLARIS10) - WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0); + adev->asic_type == CHIP_POLARIS10) { + tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D); + tmp &= ~0x3; + WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp); + } /* disable PG */ WREG32(mmRLC_PG_CNTL, 0); @@ -5137,7 +5144,7 @@ static int gfx_v8_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v8_0_check_soft_reset(void *handle) +static bool gfx_v8_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; @@ -5189,16 +5196,14 @@ static int gfx_v8_0_check_soft_reset(void *handle) SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); if (grbm_soft_reset || srbm_soft_reset) { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true; adev->gfx.grbm_soft_reset = grbm_soft_reset; adev->gfx.srbm_soft_reset = srbm_soft_reset; + return true; } else { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false; adev->gfx.grbm_soft_reset = 0; adev->gfx.srbm_soft_reset = 0; + return false; } - - return 0; } static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev, @@ -5226,7 +5231,8 @@ static int gfx_v8_0_pre_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + if ((!adev->gfx.grbm_soft_reset) && + (!adev->gfx.srbm_soft_reset)) return 0; grbm_soft_reset = adev->gfx.grbm_soft_reset; @@ -5264,7 +5270,8 @@ static int gfx_v8_0_soft_reset(void *handle) u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + if ((!adev->gfx.grbm_soft_reset) && + (!adev->gfx.srbm_soft_reset)) return 0; grbm_soft_reset = adev->gfx.grbm_soft_reset; @@ -5334,7 +5341,8 @@ static int gfx_v8_0_post_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + if ((!adev->gfx.grbm_soft_reset) && + (!adev->gfx.srbm_soft_reset)) return 0; grbm_soft_reset = adev->gfx.grbm_soft_reset; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 1b319f5bc696..c22ef140a542 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1099,7 +1099,7 @@ static int gmc_v8_0_wait_for_idle(void *handle) } -static int gmc_v8_0_check_soft_reset(void *handle) +static bool gmc_v8_0_check_soft_reset(void *handle) { u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1116,20 +1116,19 @@ static int gmc_v8_0_check_soft_reset(void *handle) SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } if (srbm_soft_reset) { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true; adev->mc.srbm_soft_reset = srbm_soft_reset; + return true; } else { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false; adev->mc.srbm_soft_reset = 0; + return false; } - return 0; } static int gmc_v8_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + if (!adev->mc.srbm_soft_reset) return 0; gmc_v8_0_mc_stop(adev, &adev->mc.save); @@ -1145,7 +1144,7 @@ static int gmc_v8_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + if (!adev->mc.srbm_soft_reset) return 0; srbm_soft_reset = adev->mc.srbm_soft_reset; @@ -1175,7 +1174,7 @@ static int gmc_v8_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + if (!adev->mc.srbm_soft_reset) return 0; gmc_v8_0_mc_resume(adev, &adev->mc.save); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index f325fd86430b..a9d10941fb53 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1268,7 +1268,7 @@ static int sdma_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v3_0_check_soft_reset(void *handle) +static bool sdma_v3_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; @@ -1281,14 +1281,12 @@ static int sdma_v3_0_check_soft_reset(void *handle) } if (srbm_soft_reset) { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true; adev->sdma.srbm_soft_reset = srbm_soft_reset; + return true; } else { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false; adev->sdma.srbm_soft_reset = 0; + return false; } - - return 0; } static int sdma_v3_0_pre_soft_reset(void *handle) @@ -1296,7 +1294,7 @@ static int sdma_v3_0_pre_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + if (!adev->sdma.srbm_soft_reset) return 0; srbm_soft_reset = adev->sdma.srbm_soft_reset; @@ -1315,7 +1313,7 @@ static int sdma_v3_0_post_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + if (!adev->sdma.srbm_soft_reset) return 0; srbm_soft_reset = adev->sdma.srbm_soft_reset; @@ -1335,7 +1333,7 @@ static int sdma_v3_0_soft_reset(void *handle) u32 srbm_soft_reset = 0; u32 tmp; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + if (!adev->sdma.srbm_soft_reset) return 0; srbm_soft_reset = adev->sdma.srbm_soft_reset; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 8bd08925b370..3de7bca5854b 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -3499,6 +3499,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, max_sclk = 75000; max_mclk = 80000; } + /* Limit clocks for some HD8600 parts */ + if (adev->pdev->device == 0x6660 && + adev->pdev->revision == 0x83) { + max_sclk = 75000; + max_mclk = 80000; + } if (rps->vce_active) { rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index d127d59f953a..b4ea229bb449 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -373,7 +373,7 @@ static int tonga_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int tonga_ih_check_soft_reset(void *handle) +static bool tonga_ih_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; @@ -384,21 +384,19 @@ static int tonga_ih_check_soft_reset(void *handle) SOFT_RESET_IH, 1); if (srbm_soft_reset) { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true; adev->irq.srbm_soft_reset = srbm_soft_reset; + return true; } else { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false; adev->irq.srbm_soft_reset = 0; + return false; } - - return 0; } static int tonga_ih_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + if (!adev->irq.srbm_soft_reset) return 0; return tonga_ih_hw_fini(adev); @@ -408,7 +406,7 @@ static int tonga_ih_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + if (!adev->irq.srbm_soft_reset) return 0; return tonga_ih_hw_init(adev); @@ -419,7 +417,7 @@ static int tonga_ih_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + if (!adev->irq.srbm_soft_reset) return 0; srbm_soft_reset = adev->irq.srbm_soft_reset; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index e0fd9f21ed95..ab3df6d75656 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -770,7 +770,7 @@ static int uvd_v6_0_wait_for_idle(void *handle) } #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd -static int uvd_v6_0_check_soft_reset(void *handle) +static bool uvd_v6_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; @@ -782,19 +782,19 @@ static int uvd_v6_0_check_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); if (srbm_soft_reset) { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true; adev->uvd.srbm_soft_reset = srbm_soft_reset; + return true; } else { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false; adev->uvd.srbm_soft_reset = 0; + return false; } - return 0; } + static int uvd_v6_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + if (!adev->uvd.srbm_soft_reset) return 0; uvd_v6_0_stop(adev); @@ -806,7 +806,7 @@ static int uvd_v6_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + if (!adev->uvd.srbm_soft_reset) return 0; srbm_soft_reset = adev->uvd.srbm_soft_reset; @@ -836,7 +836,7 @@ static int uvd_v6_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + if (!adev->uvd.srbm_soft_reset) return 0; mdelay(5); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 3f6db4ec0102..8533269ec160 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -561,7 +561,7 @@ static int vce_v3_0_wait_for_idle(void *handle) #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) -static int vce_v3_0_check_soft_reset(void *handle) +static bool vce_v3_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; @@ -591,16 +591,15 @@ static int vce_v3_0_check_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + mutex_unlock(&adev->grbm_idx_mutex); if (srbm_soft_reset) { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true; adev->vce.srbm_soft_reset = srbm_soft_reset; + return true; } else { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false; adev->vce.srbm_soft_reset = 0; + return false; } - mutex_unlock(&adev->grbm_idx_mutex); - return 0; } static int vce_v3_0_soft_reset(void *handle) @@ -608,7 +607,7 @@ static int vce_v3_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + if (!adev->vce.srbm_soft_reset) return 0; srbm_soft_reset = adev->vce.srbm_soft_reset; @@ -638,7 +637,7 @@ static int vce_v3_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + if (!adev->vce.srbm_soft_reset) return 0; mdelay(5); @@ -651,7 +650,7 @@ static int vce_v3_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + if (!adev->vce.srbm_soft_reset) return 0; mdelay(5); diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index c934b78c9e2f..bec8125bceb0 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -165,7 +165,7 @@ struct amd_ip_funcs { /* poll for idle */ int (*wait_for_idle)(void *handle); /* check soft reset the IP block */ - int (*check_soft_reset)(void *handle); + bool (*check_soft_reset)(void *handle); /* pre soft reset the IP block */ int (*pre_soft_reset)(void *handle); /* soft reset the IP block */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c index 92b117843875..8cee4e0f9fde 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c @@ -49,6 +49,7 @@ static const pem_event_action * const uninitialize_event[] = { uninitialize_display_phy_access_tasks, disable_gfx_voltage_island_power_gating_tasks, disable_gfx_clock_gating_tasks, + uninitialize_thermal_controller_tasks, set_boot_state_tasks, adjust_power_state_tasks, disable_dynamic_state_management_tasks, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 7e4fcbbbe086..960424913496 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1785,6 +1785,21 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c return 0; } +static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr) +{ + int actual_temp = 0; + uint32_t val = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP); + uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP); + + if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL)) + actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + else + actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return actual_temp; +} + static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -1881,6 +1896,9 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) case AMDGPU_PP_SENSOR_VCE_POWER: *value = cz_hwmgr->vce_power_gated ? 0 : 1; return 0; + case AMDGPU_PP_SENSOR_GPU_TEMP: + *value = cz_thermal_get_temperature(hwmgr); + return 0; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 508245d49d33..609996c84ad5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1030,20 +1030,19 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); /* disable SCLK dpm */ - if (!data->sclk_dpm_key_disabled) - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_DPM_Disable) == 0), - "Failed to disable SCLK DPM!", - return -EINVAL); + if (!data->sclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to disable SCLK DPM when DPM is disabled", + return 0); + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable); + } /* disable MCLK dpm */ if (!data->mclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Disable) == 0), - "Failed to disable MCLK DPM!", - return -EINVAL); + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to disable MCLK DPM when DPM is disabled", + return 0); + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable); } return 0; @@ -1069,10 +1068,13 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) return -EINVAL); } - if (smu7_disable_sclk_mclk_dpm(hwmgr)) { - printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); - return -EINVAL; - } + smu7_disable_sclk_mclk_dpm(hwmgr); + + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to disable voltage DPM when DPM is disabled", + return 0); + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable); return 0; } @@ -1226,7 +1228,7 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable VR hot GPIO interrupt!", result = tmp_result); - smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay); + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay); tmp_result = smu7_enable_sclk_control(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), @@ -1306,6 +1308,12 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to disable thermal auto throttle!", result = tmp_result); + if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)), + "Failed to disable AVFS!", + return -EINVAL); + } + tmp_result = smu7_stop_dpm(hwmgr); PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to stop DPM!", result = tmp_result); @@ -1452,8 +1460,10 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; - if (table_info != NULL) - sclk_table = table_info->vdd_dep_on_sclk; + if (table_info == NULL) + return -EINVAL; + + sclk_table = table_info->vdd_dep_on_sclk; for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; @@ -3802,13 +3812,15 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) { - const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1); - const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2); + const struct smu7_power_state *psa; + const struct smu7_power_state *psb; int i; if (pstate1 == NULL || pstate2 == NULL || equal == NULL) return -EINVAL; + psa = cast_const_phw_smu7_power_state(pstate1); + psb = cast_const_phw_smu7_power_state(pstate2); /* If the two states don't even have the same number of performance levels they cannot be the same state. */ if (psa->performance_level_count != psb->performance_level_count) { *equal = false; @@ -4324,6 +4336,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .set_mclk_od = smu7_set_mclk_od, .get_clock_by_type = smu7_get_clock_by_type, .read_sensor = smu7_read_sensor, + .dynamic_state_management_disable = smu7_disable_dpm_tasks, }; uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c index eda802bc63c8..8c889caba420 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c @@ -2458,7 +2458,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), "Invalid VramInfo table.", return -EINVAL); - if (!data->is_memory_gddr5) { + if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) { table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; for (k = 0; k < table->num_entries; k++) { diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 1df2d33d0b40..ffb2ab389d1d 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -54,9 +54,6 @@ int drm_name_info(struct seq_file *m, void *data) mutex_lock(&dev->master_mutex); master = dev->master; - if (!master) - goto out_unlock; - seq_printf(m, "%s", dev->driver->name); if (dev->dev) seq_printf(m, " dev=%s", dev_name(dev->dev)); @@ -65,7 +62,6 @@ int drm_name_info(struct seq_file *m, void *data) if (dev->unique) seq_printf(m, " unique=%s", dev->unique); seq_printf(m, "\n"); -out_unlock: mutex_unlock(&dev->master_mutex); return 0; diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 6a4b020dd0b4..5a26eb4545aa 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) struct drm_device *dev = rdev->ddev; struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; - u32 line_time_us, vblank_lines; + u32 vblank_in_pixels; u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { radeon_crtc = to_radeon_crtc(crtc); if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { - line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / - radeon_crtc->hw_mode.clock; - vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - - radeon_crtc->hw_mode.crtc_vdisplay + - (radeon_crtc->v_border * 2); - vblank_time_us = vblank_lines * line_time_us; + vblank_in_pixels = + radeon_crtc->hw_mode.crtc_htotal * + (radeon_crtc->hw_mode.crtc_vblank_end - + radeon_crtc->hw_mode.crtc_vdisplay + + (radeon_crtc->v_border * 2)); + + vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock; break; } } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 50e96d2c593d..e18839d52e3e 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -927,6 +927,16 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) return ret; } +static void radeon_connector_unregister(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + + if (radeon_connector->ddc_bus->has_aux) { + drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux); + radeon_connector->ddc_bus->has_aux = false; + } +} + static void radeon_connector_destroy(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -984,6 +994,7 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .set_property = radeon_lvds_set_property, }; @@ -1111,6 +1122,7 @@ static const struct drm_connector_funcs radeon_vga_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_vga_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .set_property = radeon_connector_set_property, }; @@ -1188,6 +1200,7 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_tv_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .set_property = radeon_connector_set_property, }; @@ -1519,6 +1532,7 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = { .detect = radeon_dvi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_connector_set_property, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; @@ -1832,6 +1846,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = { .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_connector_set_property, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; @@ -1841,6 +1856,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = { .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_lvds_set_property, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; @@ -1850,6 +1866,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_lvds_set_property, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b8ab30a7dd6d..cdb8cb568c15 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1675,20 +1675,20 @@ int radeon_modeset_init(struct radeon_device *rdev) void radeon_modeset_fini(struct radeon_device *rdev) { - radeon_fbdev_fini(rdev); - kfree(rdev->mode_info.bios_hardcoded_edid); - - /* free i2c buses */ - radeon_i2c_fini(rdev); - if (rdev->mode_info.mode_config_initialized) { - radeon_afmt_fini(rdev); drm_kms_helper_poll_fini(rdev->ddev); radeon_hpd_fini(rdev); drm_crtc_force_disable_all(rdev->ddev); + radeon_fbdev_fini(rdev); + radeon_afmt_fini(rdev); drm_mode_config_cleanup(rdev->ddev); rdev->mode_info.mode_config_initialized = false; } + + kfree(rdev->mode_info.bios_hardcoded_edid); + + /* free i2c buses */ + radeon_i2c_fini(rdev); } static bool is_hdtv_mode(const struct drm_display_mode *mode) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 91c8f4339566..00ea0002b539 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -96,9 +96,10 @@ * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI * 2.46.0 - Add PFP_SYNC_ME support on evergreen * 2.47.0 - Add UVD_NO_OP register support + * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 47 +#define KMS_DRIVER_MINOR 48 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 021aa005623f..29f7817af821 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -982,9 +982,8 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) { if (!i2c) return; + WARN_ON(i2c->has_aux); i2c_del_adapter(&i2c->adapter); - if (i2c->has_aux) - drm_dp_aux_unregister(&i2c->aux); kfree(i2c); } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 7ee9aafbdf74..e402be8821c4 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -4431,6 +4431,7 @@ static bool si_vm_reg_valid(u32 reg) case SPI_CONFIG_CNTL: case SPI_CONFIG_CNTL_1: case TA_CNTL_AUX: + case TA_CS_BC_BASE_ADDR: return true; default: DRM_ERROR("Invalid register 0x%x in CS\n", reg); diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index eb220eecba78..65a911ddd509 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -1145,6 +1145,7 @@ #define SPI_LB_CU_MASK 0x9354 #define TA_CNTL_AUX 0x9508 +#define TA_CS_BC_BASE_ADDR 0x950C #define CC_RB_BACKEND_DISABLE 0x98F4 #define BACKEND_DISABLE(x) ((x) << 16) diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 98fffa3a09f7..5ab67219f71e 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1680,7 +1680,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, static void of_i2c_register_devices(struct i2c_adapter *adap) { - struct device_node *node; + struct device_node *bus, *node; /* Only register child devices if the adapter has a node pointer set */ if (!adap->dev.of_node) @@ -1688,11 +1688,17 @@ static void of_i2c_register_devices(struct i2c_adapter *adap) dev_dbg(&adap->dev, "of_i2c: walking child nodes\n"); - for_each_available_child_of_node(adap->dev.of_node, node) { + bus = of_get_child_by_name(adap->dev.of_node, "i2c-bus"); + if (!bus) + bus = of_node_get(adap->dev.of_node); + + for_each_available_child_of_node(bus, node) { if (of_node_test_and_set_flag(node, OF_POPULATED)) continue; of_i2c_register_device(adap, node); } + + of_node_put(bus); } static int of_dev_node_match(struct device *dev, void *data) diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 19a418a1b631..fb3fb89640e5 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -89,4 +89,6 @@ source "drivers/infiniband/sw/rxe/Kconfig" source "drivers/infiniband/hw/hfi1/Kconfig" +source "drivers/infiniband/hw/qedr/Kconfig" + endif # INFINIBAND diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index 21fe401ff178..e7a5ed9f6f3f 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/ obj-$(CONFIG_INFINIBAND_USNIC) += usnic/ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ obj-$(CONFIG_INFINIBAND_HNS) += hns/ +obj-$(CONFIG_INFINIBAND_QEDR) += qedr/ diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 875597b0e69c..097365932b09 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -83,8 +83,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev, static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, struct hns_roce_mtt *hr_mtt, struct hns_roce_uar *hr_uar, - struct hns_roce_cq *hr_cq, int vector, - int collapsed) + struct hns_roce_cq *hr_cq, int vector) { struct hns_roce_cmd_mailbox *mailbox = NULL; struct hns_roce_cq_table *cq_table = NULL; @@ -153,6 +152,9 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, hr_cq->cons_index = 0; hr_cq->uar = hr_uar; + atomic_set(&hr_cq->refcount, 1); + init_completion(&hr_cq->free); + return 0; err_radix: @@ -192,6 +194,11 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev, /* Waiting interrupt process procedure carried out */ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); + /* wait for all interrupt processed */ + if (atomic_dec_and_test(&hr_cq->refcount)) + complete(&hr_cq->free); + wait_for_completion(&hr_cq->free); + spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, hr_cq->cqn); spin_unlock_irq(&cq_table->lock); @@ -300,10 +307,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, cq_entries = roundup_pow_of_two((unsigned int)cq_entries); hr_cq->ib_cq.cqe = cq_entries - 1; - mutex_init(&hr_cq->resize_mutex); spin_lock_init(&hr_cq->lock); - hr_cq->hr_resize_buf = NULL; - hr_cq->resize_umem = NULL; if (context) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { @@ -338,8 +342,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, } /* Allocate cq index, fill cq_context */ - ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, - uar, hr_cq, vector, 0); + ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar, + hr_cq, vector); if (ret) { dev_err(dev, "Creat CQ .Failed to cq_alloc.\n"); goto err_mtt; @@ -353,12 +357,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, if (context) { if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) { ret = -EFAULT; - goto err_mtt; + goto err_cqc; } } return &hr_cq->ib_cq; +err_cqc: + hns_roce_free_cq(hr_dev, hr_cq); + err_mtt: hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); if (context) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index ea735800eb18..341731553a60 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -62,7 +62,7 @@ #define HNS_ROCE_AEQE_OF_VEC_NUM 1 /* 4G/4K = 1M */ -#define HNS_ROCE_SL_SHIFT 29 +#define HNS_ROCE_SL_SHIFT 28 #define HNS_ROCE_TCLASS_SHIFT 20 #define HNS_ROCE_FLOW_LABLE_MASK 0xfffff @@ -74,7 +74,9 @@ #define MR_TYPE_DMA 0x03 #define PKEY_ID 0xffff +#define GUID_LEN 8 #define NODE_DESC_SIZE 64 +#define DB_REG_OFFSET 0x1000 #define SERV_TYPE_RC 0 #define SERV_TYPE_RD 1 @@ -282,20 +284,11 @@ struct hns_roce_cq_buf { struct hns_roce_mtt hr_mtt; }; -struct hns_roce_cq_resize { - struct hns_roce_cq_buf hr_buf; - int cqe; -}; - struct hns_roce_cq { struct ib_cq ib_cq; struct hns_roce_cq_buf hr_buf; - /* pointer to store information after resize*/ - struct hns_roce_cq_resize *hr_resize_buf; spinlock_t lock; - struct mutex resize_mutex; struct ib_umem *umem; - struct ib_umem *resize_umem; void (*comp)(struct hns_roce_cq *); void (*event)(struct hns_roce_cq *, enum hns_roce_event); @@ -408,6 +401,7 @@ struct hns_roce_qp { u32 buff_size; struct mutex mutex; u8 port; + u8 phy_port; u8 sl; u8 resp_depth; u8 state; @@ -471,7 +465,6 @@ struct hns_roce_caps { u32 max_rq_desc_sz; /* 64 */ int max_qp_init_rdma; int max_qp_dest_rdma; - int sqp_start; int num_cqs; int max_cqes; int reserved_cqs; @@ -512,6 +505,8 @@ struct hns_roce_hw { void (*write_cqc)(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle, int nent, u32 vector); + int (*clear_hem)(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, int obj); int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, @@ -533,7 +528,6 @@ struct hns_roce_dev { struct hns_roce_uar priv_uar; const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; spinlock_t sm_lock; - spinlock_t cq_db_lock; spinlock_t bt_cmd_lock; struct hns_roce_ib_iboe iboe; diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c index 98af7fecf2f1..21e21b03cfb5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_eq.c +++ b/drivers/infiniband/hw/hns/hns_roce_eq.c @@ -66,9 +66,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev, { struct device *dev = &hr_dev->pdev->dev; - qpn = roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); dev_warn(dev, "Local Work Queue Catastrophic Error.\n"); switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { @@ -96,13 +93,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev, default: break; } - - hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); } static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, @@ -111,9 +101,6 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, { struct device *dev = &hr_dev->pdev->dev; - qpn = roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); dev_warn(dev, "Local Access Violation Work Queue Error.\n"); switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { @@ -141,13 +128,69 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, default: break; } +} + +static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev, + struct hns_roce_aeqe *aeqe, + int event_type) +{ + struct device *dev = &hr_dev->pdev->dev; + int phy_port; + int qpn; + + qpn = roce_get_field(aeqe->event.qp_event.qp, + HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, + HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); + phy_port = roce_get_field(aeqe->event.qp_event.qp, + HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M, + HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S); + if (qpn <= 1) + qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port; + + switch (event_type) { + case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: + dev_warn(dev, "Invalid Req Local Work Queue Error.\n" + "QP %d, phy_port %d.\n", qpn, phy_port); + break; + case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: + hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn); + break; + case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: + hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn); + break; + default: + break; + } + + hns_roce_qp_event(hr_dev, qpn, event_type); +} + +static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev, + struct hns_roce_aeqe *aeqe, + int event_type) +{ + struct device *dev = &hr_dev->pdev->dev; + u32 cqn; + + cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, + HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, + HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)); + + switch (event_type) { + case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: + dev_warn(dev, "CQ 0x%x access err.\n", cqn); + break; + case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: + dev_warn(dev, "CQ 0x%x overflow\n", cqn); + break; + case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: + dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn); + break; + default: + break; + } - hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); + hns_roce_cq_event(hr_dev, cqn, event_type); } static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev, @@ -185,7 +228,7 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) struct device *dev = &hr_dev->pdev->dev; struct hns_roce_aeqe *aeqe; int aeqes_found = 0; - int qpn = 0; + int event_type; while ((aeqe = next_aeqe_sw(eq))) { dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe, @@ -195,9 +238,10 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) /* Memory barrier */ rmb(); - switch (roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)) { + event_type = roce_get_field(aeqe->asyn, + HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, + HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S); + switch (event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: dev_warn(dev, "PATH MIG not supported\n"); break; @@ -211,23 +255,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) dev_warn(dev, "PATH MIG failed\n"); break; case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - dev_warn(dev, "qpn = 0x%lx\n", - roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)); - hns_roce_qp_event(hr_dev, - roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); - break; case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn); - break; case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn); + hns_roce_qp_err_handle(hr_dev, aeqe, event_type); break; case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: @@ -235,40 +265,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) dev_warn(dev, "SRQ not support!\n"); break; case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - dev_warn(dev, "CQ 0x%lx access err.\n", - roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)); - hns_roce_cq_event(hr_dev, - le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); - break; case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - dev_warn(dev, "CQ 0x%lx overflow\n", - roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)); - hns_roce_cq_event(hr_dev, - le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); - break; case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: - dev_warn(dev, "CQ ID invalid.\n"); - hns_roce_cq_event(hr_dev, - le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); + hns_roce_cq_err_handle(hr_dev, aeqe, event_type); break; case HNS_ROCE_EVENT_TYPE_PORT_CHANGE: dev_warn(dev, "port change.\n"); @@ -290,11 +289,8 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)); break; default: - dev_warn(dev, "Unhandled event 0x%lx on EQ %d at index %u\n", - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S), - eq->eqn, eq->cons_index); + dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n", + event_type, eq->eqn, eq->cons_index); break; }; diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h index fe4388191a3c..c6d212d12e03 100644 --- a/drivers/infiniband/hw/hns/hns_roce_eq.h +++ b/drivers/infiniband/hw/hns/hns_roce_eq.h @@ -107,6 +107,10 @@ struct hns_roce_aeqe { #define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \ (((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S) +#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25 +#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M \ + (((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S) + #define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0 #define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \ (((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S) diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index d53d64362389..250d8f280390 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -36,14 +36,10 @@ #include "hns_roce_hem.h" #include "hns_roce_common.h" -#define HW_SYNC_TIMEOUT_MSECS 500 -#define HW_SYNC_SLEEP_TIME_INTERVAL 20 - #define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17) #define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17) #define DMA_ADDR_T_SHIFT 12 -#define BT_CMD_SYNC_SHIFT 31 #define BT_BA_SHIFT 32 struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages, @@ -213,74 +209,6 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, return ret; } -static int hns_roce_clear_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, - unsigned long obj) -{ - struct device *dev = &hr_dev->pdev->dev; - unsigned long end = 0; - unsigned long flags; - void __iomem *bt_cmd; - uint32_t bt_cmd_val[2]; - u32 bt_cmd_h_val = 0; - int ret = 0; - - switch (table->type) { - case HEM_TYPE_QPC: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC); - break; - case HEM_TYPE_MTPT: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, - HEM_TYPE_MTPT); - break; - case HEM_TYPE_CQC: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC); - break; - case HEM_TYPE_SRQC: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, - HEM_TYPE_SRQC); - break; - default: - return ret; - } - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); - roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); - roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 0); - - spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags); - - bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - - end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies; - while (1) { - if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { - if (!(time_before(jiffies, end))) { - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, - flags); - return -EBUSY; - } - } else { - break; - } - msleep(HW_SYNC_SLEEP_TIME_INTERVAL); - } - - bt_cmd_val[0] = 0; - bt_cmd_val[1] = bt_cmd_h_val; - hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); - - return ret; -} - int hns_roce_table_get(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, unsigned long obj) { @@ -333,7 +261,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, if (--table->hem[i]->refcount == 0) { /* Clear HEM base address */ - if (hns_roce_clear_hem(hr_dev, table, obj)) + if (hr_dev->hw->clear_hem(hr_dev, table, obj)) dev_warn(dev, "Clear HEM base address failed.\n"); hns_roce_free_hem(hr_dev, table->hem[i]); @@ -456,7 +384,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, for (i = 0; i < table->num_hem; ++i) if (table->hem[i]) { - if (hns_roce_clear_hem(hr_dev, table, + if (hr_dev->hw->clear_hem(hr_dev, table, i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)) dev_err(dev, "Clear HEM base address failed.\n"); diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index ad6617588fba..435748858252 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h @@ -34,6 +34,10 @@ #ifndef _HNS_ROCE_HEM_H #define _HNS_ROCE_HEM_H +#define HW_SYNC_TIMEOUT_MSECS 500 +#define HW_SYNC_SLEEP_TIME_INTERVAL 20 +#define BT_CMD_SYNC_SHIFT 31 + enum { /* MAP HEM(Hardware Entry Memory) */ HEM_TYPE_QPC = 0, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 399f5dedaf2d..71232e5fabf6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -73,8 +73,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, u32 ind = 0; int ret = 0; - spin_lock_irqsave(&qp->sq.lock, flags); + if (unlikely(ibqp->qp_type != IB_QPT_GSI && + ibqp->qp_type != IB_QPT_RC)) { + dev_err(dev, "un-supported QP type\n"); + *bad_wr = NULL; + return -EOPNOTSUPP; + } + spin_lock_irqsave(&qp->sq.lock, flags); ind = qp->sq_next_wqe; for (nreq = 0; wr; ++nreq, wr = wr->next) { if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { @@ -162,7 +168,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, roce_set_field(ud_sq_wqe->u32_36, UD_SEND_WQE_U32_36_SGID_INDEX_M, UD_SEND_WQE_U32_36_SGID_INDEX_S, - hns_get_gid_index(hr_dev, qp->port, + hns_get_gid_index(hr_dev, qp->phy_port, ah->av.gid_index)); roce_set_field(ud_sq_wqe->u32_40, @@ -205,8 +211,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, (wr->send_flags & IB_SEND_FENCE ? (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0); - wqe = (struct hns_roce_wqe_ctrl_seg *)wqe + - sizeof(struct hns_roce_wqe_ctrl_seg); + wqe += sizeof(struct hns_roce_wqe_ctrl_seg); switch (wr->opcode) { case IB_WR_RDMA_READ: @@ -235,8 +240,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; } ctrl->flag |= cpu_to_le32(ps_opcode); - wqe = (struct hns_roce_wqe_raddr_seg *)wqe + - sizeof(struct hns_roce_wqe_raddr_seg); + wqe += sizeof(struct hns_roce_wqe_raddr_seg); dseg = wqe; if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { @@ -253,8 +257,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, memcpy(wqe, ((void *) (uintptr_t) wr->sg_list[i].addr), wr->sg_list[i].length); - wqe = (struct hns_roce_wqe_raddr_seg *) - wqe + wr->sg_list[i].length; + wqe += wr->sg_list[i].length; } ctrl->flag |= HNS_ROCE_WQE_INLINE; } else { @@ -266,9 +269,6 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, HNS_ROCE_WQE_SGE_NUM_BIT); } ind++; - } else { - dev_dbg(dev, "unSupported QP type\n"); - break; } } @@ -285,7 +285,7 @@ out: SQ_DOORBELL_U32_4_SQ_HEAD_S, (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M, - SQ_DOORBELL_U32_4_PORT_S, qp->port); + SQ_DOORBELL_U32_4_PORT_S, qp->phy_port); roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M, SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); @@ -365,14 +365,14 @@ out: /* SW update GSI rq header */ reg_val = roce_read(to_hr_dev(ibqp->device), ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->port); + QP1C_CFGN_OFFSET * hr_qp->phy_port); roce_set_field(reg_val, ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, hr_qp->rq.head); roce_write(to_hr_dev(ibqp->device), ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->port, reg_val); + QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val); } else { rq_db.u32_4 = 0; rq_db.u32_8 = 0; @@ -789,6 +789,66 @@ static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag) } } +static int hns_roce_bt_init(struct hns_roce_dev *hr_dev) +{ + struct device *dev = &hr_dev->pdev->dev; + struct hns_roce_v1_priv *priv; + int ret; + + priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; + + priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev, + HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map, + GFP_KERNEL); + if (!priv->bt_table.qpc_buf.buf) + return -ENOMEM; + + priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev, + HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map, + GFP_KERNEL); + if (!priv->bt_table.mtpt_buf.buf) { + ret = -ENOMEM; + goto err_failed_alloc_mtpt_buf; + } + + priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev, + HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map, + GFP_KERNEL); + if (!priv->bt_table.cqc_buf.buf) { + ret = -ENOMEM; + goto err_failed_alloc_cqc_buf; + } + + return 0; + +err_failed_alloc_cqc_buf: + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); + +err_failed_alloc_mtpt_buf: + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); + + return ret; +} + +static void hns_roce_bt_free(struct hns_roce_dev *hr_dev) +{ + struct device *dev = &hr_dev->pdev->dev; + struct hns_roce_v1_priv *priv; + + priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; + + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map); + + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); + + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); +} + /** * hns_roce_v1_reset - reset RoCE * @hr_dev: RoCE device struct pointer @@ -879,7 +939,6 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev) caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE; caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE; caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT; - caps->sqp_start = 0; caps->reserved_lkey = 0; caps->reserved_pds = 0; caps->reserved_mrws = 1; @@ -944,8 +1003,18 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev) hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP); + ret = hns_roce_bt_init(hr_dev); + if (ret) { + dev_err(dev, "bt init failed!\n"); + goto error_failed_bt_init; + } + return 0; +error_failed_bt_init: + hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); + hns_roce_raq_free(hr_dev); + error_failed_raq_init: hns_roce_db_free(hr_dev); return ret; @@ -953,6 +1022,7 @@ error_failed_raq_init: void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) { + hns_roce_bt_free(hr_dev); hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); hns_roce_raq_free(hr_dev); hns_roce_db_free(hr_dev); @@ -1192,9 +1262,7 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) return get_sw_cqe(hr_cq, hr_cq->cons_index); } -void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index, - spinlock_t *doorbell_lock) - +void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) { u32 doorbell[2]; @@ -1254,8 +1322,7 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, */ wmb(); - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index, - &to_hr_dev(hr_cq->ib_cq.device)->cq_db_lock); + hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); } } @@ -1485,7 +1552,8 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, /* SQ conrespond to CQE */ sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4, CQE_BYTE_4_WQE_INDEX_M, - CQE_BYTE_4_WQE_INDEX_S)); + CQE_BYTE_4_WQE_INDEX_S)& + ((*cur_qp)->sq.wqe_cnt-1)); switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) { case HNS_ROCE_WQE_OPCODE_SEND: wc->opcode = IB_WC_SEND; @@ -1591,10 +1659,8 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) break; } - if (npolled) { - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index, - &to_hr_dev(ibcq->device)->cq_db_lock); - } + if (npolled) + hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); spin_unlock_irqrestore(&hr_cq->lock, flags); @@ -1604,6 +1670,74 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) return ret; } +int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, int obj) +{ + struct device *dev = &hr_dev->pdev->dev; + struct hns_roce_v1_priv *priv; + unsigned long end = 0, flags = 0; + uint32_t bt_cmd_val[2] = {0}; + void __iomem *bt_cmd; + u64 bt_ba = 0; + + priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; + + switch (table->type) { + case HEM_TYPE_QPC: + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC); + bt_ba = priv->bt_table.qpc_buf.map >> 12; + break; + case HEM_TYPE_MTPT: + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT); + bt_ba = priv->bt_table.mtpt_buf.map >> 12; + break; + case HEM_TYPE_CQC: + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC); + bt_ba = priv->bt_table.cqc_buf.map >> 12; + break; + case HEM_TYPE_SRQC: + dev_dbg(dev, "HEM_TYPE_SRQC not support.\n"); + return -EINVAL; + default: + return 0; + } + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); + roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); + roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); + + spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags); + + bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; + + end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies; + while (1) { + if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { + if (!(time_before(jiffies, end))) { + dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); + spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, + flags); + return -EBUSY; + } + } else { + break; + } + msleep(HW_SYNC_SLEEP_TIME_INTERVAL); + } + + bt_cmd_val[0] = (uint32_t)bt_ba; + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); + hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); + + spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); + + return 0; +} + static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt, enum hns_roce_qp_state cur_state, @@ -1733,13 +1867,10 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M, QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head); roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M, - QP1C_BYTES_16_PORT_NUM_S, hr_qp->port); + QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SIGNALING_TYPE_S, hr_qp->sq_signal_bits); - roce_set_bit(context->qp1c_bytes_16, - QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S, - hr_qp->sq_signal_bits); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, 1); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, @@ -1784,7 +1915,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, /* Copy context to QP1C register */ addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG + - hr_qp->port * sizeof(*context)); + hr_qp->phy_port * sizeof(*context)); writel(context->qp1c_bytes_4, addr); writel(context->sq_rq_bt_l, addr + 1); @@ -1795,15 +1926,16 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, writel(context->qp1c_bytes_28, addr + 6); writel(context->qp1c_bytes_32, addr + 7); writel(context->cur_sq_wqe_ba_l, addr + 8); + writel(context->qp1c_bytes_40, addr + 9); } /* Modify QP1C status */ reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG + - hr_qp->port * sizeof(*context)); + hr_qp->phy_port * sizeof(*context)); roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state); roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG + - hr_qp->port * sizeof(*context), reg_val); + hr_qp->phy_port * sizeof(*context), reg_val); hr_qp->state = new_state; if (new_state == IB_QPS_RESET) { @@ -1836,12 +1968,10 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct device *dev = &hr_dev->pdev->dev; struct hns_roce_qp_context *context; - struct hns_roce_rq_db rq_db; dma_addr_t dma_handle_2 = 0; dma_addr_t dma_handle = 0; uint32_t doorbell[2] = {0}; int rq_pa_start = 0; - u32 reg_val = 0; u64 *mtts_2 = NULL; int ret = -EINVAL; u64 *mtts = NULL; @@ -2119,7 +2249,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qpc_bytes_68, QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M, - QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, 0); + QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, + hr_qp->rq.head); roce_set_field(context->qpc_bytes_68, QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M, QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0); @@ -2186,7 +2317,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, - hr_qp->port); + hr_qp->phy_port); roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_SL_M, QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl); @@ -2257,20 +2388,17 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_bit(context->qpc_bytes_140, QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0); - roce_set_field(context->qpc_bytes_144, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, - attr->qp_state); - roce_set_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M, QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0); roce_set_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, 0); + QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, + attr->retry_cnt); roce_set_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, 0); + QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, + attr->rnr_retry); roce_set_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_LSN_M, QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100); @@ -2281,10 +2409,19 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M, QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S, attr->retry_cnt); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, - attr->timeout); + if (attr->timeout < 0x12) { + dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n", + attr->timeout); + roce_set_field(context->qpc_bytes_156, + QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, + QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, + 0x12); + } else { + roce_set_field(context->qpc_bytes_156, + QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, + QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, + attr->timeout); + } roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M, QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S, @@ -2292,7 +2429,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, - hr_qp->port); + hr_qp->phy_port); roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_SL_M, QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl); @@ -2357,21 +2494,15 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M, QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S, 0); - } else if ((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) || + } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) || (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) || (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) || (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) || - (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) { - roce_set_field(context->qpc_bytes_144, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, - attr->qp_state); - - } else { - dev_err(dev, "not support this modify\n"); + (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) { + dev_err(dev, "not support this status migration\n"); goto out; } @@ -2397,43 +2528,32 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { /* Memory barrier */ wmb(); - if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { - /* SW update GSI rq header */ - reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->port); - roce_set_field(reg_val, - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, - hr_qp->rq.head); - roce_write(hr_dev, ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->port, reg_val); - } else { - rq_db.u32_4 = 0; - rq_db.u32_8 = 0; - - roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M, - RQ_DOORBELL_U32_4_RQ_HEAD_S, - hr_qp->rq.head); - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M, - RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M, - RQ_DOORBELL_U32_8_CMD_S, 1); - roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, - 1); - doorbell[0] = rq_db.u32_4; - doorbell[1] = rq_db.u32_8; - - hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); + roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M, + RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head); + roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M, + RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); + roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M, + RQ_DOORBELL_U32_8_CMD_S, 1); + roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1); + + if (ibqp->uobject) { + hr_qp->rq.db_reg_l = hr_dev->reg_base + + ROCEE_DB_OTHERS_L_0_REG + + DB_REG_OFFSET * hr_dev->priv_uar.index; } + + hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); } hr_qp->state = new_state; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) hr_qp->resp_depth = attr->max_dest_rd_atomic; - if (attr_mask & IB_QP_PORT) - hr_qp->port = (attr->port_num - 1); + if (attr_mask & IB_QP_PORT) { + hr_qp->port = attr->port_num - 1; + hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; + } if (new_state == IB_QPS_RESET && !ibqp->uobject) { hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, @@ -2789,6 +2909,7 @@ struct hns_roce_hw hns_roce_hw_v1 = { .set_mtu = hns_roce_v1_set_mtu, .write_mtpt = hns_roce_v1_write_mtpt, .write_cqc = hns_roce_v1_write_cqc, + .clear_hem = hns_roce_v1_clear_hem, .modify_qp = hns_roce_v1_modify_qp, .query_qp = hns_roce_v1_query_qp, .destroy_qp = hns_roce_v1_destroy_qp, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h index 316b592b1636..539b0a3b92b0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h @@ -102,6 +102,8 @@ #define HNS_ROCE_V1_EXT_ODB_ALFUL \ (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) +#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17) + #define HNS_ROCE_ODB_POLL_MODE 0 #define HNS_ROCE_SDB_NORMAL_MODE 0 @@ -971,9 +973,16 @@ struct hns_roce_db_table { struct hns_roce_ext_db *ext_db; }; +struct hns_roce_bt_table { + struct hns_roce_buf_list qpc_buf; + struct hns_roce_buf_list mtpt_buf; + struct hns_roce_buf_list cqc_buf; +}; + struct hns_roce_v1_priv { struct hns_roce_db_table db_table; struct hns_roce_raq_table raq_table; + struct hns_roce_bt_table bt_table; }; int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index f64f0dde9a88..764e35a54457 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -355,8 +355,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev, props->max_qp = hr_dev->caps.num_qps; props->max_qp_wr = hr_dev->caps.max_wqes; props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | - IB_DEVICE_RC_RNR_NAK_GEN | - IB_DEVICE_LOCAL_DMA_LKEY; + IB_DEVICE_RC_RNR_NAK_GEN; props->max_sge = hr_dev->caps.max_sq_sg; props->max_sge_rd = 1; props->max_cq = hr_dev->caps.num_cqs; @@ -372,6 +371,25 @@ static int hns_roce_query_device(struct ib_device *ib_dev, return 0; } +static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev, + u8 port_num) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); + struct net_device *ndev; + + if (port_num < 1 || port_num > hr_dev->caps.num_ports) + return NULL; + + rcu_read_lock(); + + ndev = hr_dev->iboe.netdevs[port_num - 1]; + if (ndev) + dev_hold(ndev); + + rcu_read_unlock(); + return ndev; +} + static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, struct ib_port_attr *props) { @@ -584,6 +602,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) struct device *dev = &hr_dev->pdev->dev; iboe = &hr_dev->iboe; + spin_lock_init(&iboe->lock); ib_dev = &hr_dev->ib_dev; strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX); @@ -618,6 +637,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->query_port = hns_roce_query_port; ib_dev->modify_port = hns_roce_modify_port; ib_dev->get_link_layer = hns_roce_get_link_layer; + ib_dev->get_netdev = hns_roce_get_netdev; ib_dev->query_gid = hns_roce_query_gid; ib_dev->query_pkey = hns_roce_query_pkey; ib_dev->alloc_ucontext = hns_roce_alloc_ucontext; @@ -667,8 +687,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) goto error_failed_setup_mtu_gids; } - spin_lock_init(&iboe->lock); - iboe->nb.notifier_call = hns_roce_netdev_event; ret = register_netdevice_notifier(&iboe->nb); if (ret) { @@ -777,6 +795,15 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) if (IS_ERR(hr_dev->reg_base)) return PTR_ERR(hr_dev->reg_base); + /* read the node_guid of IB device from the DT or ACPI */ + ret = device_property_read_u8_array(dev, "node-guid", + (u8 *)&hr_dev->ib_dev.node_guid, + GUID_LEN); + if (ret) { + dev_err(dev, "couldn't get node_guid from DT or ACPI!\n"); + return ret; + } + /* get the RoCE associated ethernet ports or netdevices */ for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) { if (dev_of_node(dev)) { @@ -923,7 +950,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) struct device *dev = &hr_dev->pdev->dev; spin_lock_init(&hr_dev->sm_lock); - spin_lock_init(&hr_dev->cq_db_lock); spin_lock_init(&hr_dev->bt_cmd_lock); ret = hns_roce_init_uar_table(hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 59f5e2be046b..fb87883ead34 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -564,11 +564,14 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) { dev_err(dev, "Just support 4K page size but is 0x%x now!\n", mr->umem->page_size); + ret = -EINVAL; + goto err_umem; } if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) { dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n", length); + ret = -EINVAL; goto err_umem; } diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 16271b5bd170..05db7d59812a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -35,19 +35,7 @@ static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) { - struct device *dev = &hr_dev->pdev->dev; - unsigned long pd_number; - int ret = 0; - - ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number); - if (ret == -1) { - dev_err(dev, "alloc pdn from pdbitmap failed\n"); - return -ENOMEM; - } - - *pdn = pd_number; - - return 0; + return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn); } static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) @@ -117,9 +105,15 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) if (ret == -1) return -ENOMEM; - uar->index = (uar->index - 1) % hr_dev->caps.phy_num_uars + 1; + if (uar->index > 0) + uar->index = (uar->index - 1) % + (hr_dev->caps.phy_num_uars - 1) + 1; res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&hr_dev->pdev->dev, "memory resource not found!\n"); + return -EINVAL; + } uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 645c18d809a5..e86dd8d06777 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -32,14 +32,14 @@ */ #include <linux/platform_device.h> +#include <rdma/ib_addr.h> #include <rdma/ib_umem.h> #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_hem.h" #include "hns_roce_user.h" -#define DB_REG_OFFSET 0x1000 -#define SQP_NUM 12 +#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) { @@ -113,16 +113,8 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, int align, unsigned long *base) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - int ret = 0; - unsigned long qpn; - - ret = hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, &qpn); - if (ret == -1) - return -ENOMEM; - - *base = qpn; - return 0; + return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base); } enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) @@ -255,7 +247,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - if (base_qpn < (hr_dev->caps.sqp_start + 2 * hr_dev->caps.num_ports)) + if (base_qpn < SQP_NUM) return; hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); @@ -345,12 +337,10 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, - enum ib_qp_type type, struct hns_roce_qp *hr_qp) { struct device *dev = &hr_dev->pdev->dev; u32 max_cnt; - (void)type; if (cap->max_send_wr > hr_dev->caps.max_wqes || cap->max_send_sge > hr_dev->caps.max_sq_sg || @@ -476,7 +466,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, /* Set SQ size */ ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap, - init_attr->qp_type, hr_qp); + hr_qp); if (ret) { dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); goto err_out; @@ -617,21 +607,19 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, return ERR_PTR(-ENOMEM); hr_qp = &hr_sqp->hr_qp; + hr_qp->port = init_attr->port_num - 1; + hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; + hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + + hr_dev->iboe.phy_port[hr_qp->port]; ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, - hr_dev->caps.sqp_start + - hr_dev->caps.num_ports + - init_attr->port_num - 1, hr_qp); + hr_qp->ibqp.qp_num, hr_qp); if (ret) { dev_err(dev, "Create GSI QP failed!\n"); kfree(hr_sqp); return ERR_PTR(ret); } - hr_qp->port = (init_attr->port_num - 1); - hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start + - hr_dev->caps.num_ports + - init_attr->port_num - 1; break; } default:{ @@ -670,6 +658,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct device *dev = &hr_dev->pdev->dev; int ret = -EINVAL; int p; + enum ib_mtu active_mtu; mutex_lock(&hr_qp->mutex); @@ -700,6 +689,19 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, } } + if (attr_mask & IB_QP_PATH_MTU) { + p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; + active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); + + if (attr->path_mtu > IB_MTU_2048 || + attr->path_mtu < IB_MTU_256 || + attr->path_mtu > active_mtu) { + dev_err(dev, "attr path_mtu(%d)invalid while modify qp", + attr->path_mtu); + goto out; + } + } + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", @@ -782,29 +784,11 @@ static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n) { - struct ib_qp *ibqp = &hr_qp->ibqp; - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - - if ((n < 0) || (n > hr_qp->rq.wqe_cnt)) { - dev_err(&hr_dev->pdev->dev, "rq wqe index:%d,rq wqe cnt:%d\r\n", - n, hr_qp->rq.wqe_cnt); - return NULL; - } - return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); } void *get_send_wqe(struct hns_roce_qp *hr_qp, int n) { - struct ib_qp *ibqp = &hr_qp->ibqp; - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - - if ((n < 0) || (n > hr_qp->sq.wqe_cnt)) { - dev_err(&hr_dev->pdev->dev, "sq wqe index:%d,sq wqe cnt:%d\r\n", - n, hr_qp->sq.wqe_cnt); - return NULL; - } - return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); } @@ -837,8 +821,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) /* A port include two SQP, six port total 12 */ ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, - hr_dev->caps.num_qps - 1, - hr_dev->caps.sqp_start + SQP_NUM, + hr_dev->caps.num_qps - 1, SQP_NUM, reserved_from_top); if (ret) { dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n", diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig new file mode 100644 index 000000000000..7c06d85568d4 --- /dev/null +++ b/drivers/infiniband/hw/qedr/Kconfig @@ -0,0 +1,7 @@ +config INFINIBAND_QEDR + tristate "QLogic RoCE driver" + depends on 64BIT && QEDE + select QED_LL2 + ---help--- + This driver provides low-level InfiniBand over Ethernet + support for QLogic QED host channel adapters (HCAs). diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile new file mode 100644 index 000000000000..ba7067c77f2f --- /dev/null +++ b/drivers/infiniband/hw/qedr/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o + +qedr-y := main.o verbs.o qedr_cm.o diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c new file mode 100644 index 000000000000..7b74d09a8217 --- /dev/null +++ b/drivers/infiniband/hw/qedr/main.c @@ -0,0 +1,914 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/module.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_addr.h> +#include <rdma/ib_user_verbs.h> +#include <linux/netdevice.h> +#include <linux/iommu.h> +#include <net/addrconf.h> +#include <linux/qed/qede_roce.h> +#include <linux/qed/qed_chain.h> +#include <linux/qed/qed_if.h> +#include "qedr.h" +#include "verbs.h" +#include <rdma/qedr-abi.h> + +MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver"); +MODULE_AUTHOR("QLogic Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(QEDR_MODULE_VERSION); + +#define QEDR_WQ_MULTIPLIER_DFT (3) + +void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num, + enum ib_event_type type) +{ + struct ib_event ibev; + + ibev.device = &dev->ibdev; + ibev.element.port_num = port_num; + ibev.event = type; + + ib_dispatch_event(&ibev); +} + +static enum rdma_link_layer qedr_link_layer(struct ib_device *device, + u8 port_num) +{ + return IB_LINK_LAYER_ETHERNET; +} + +static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str, + size_t str_len) +{ + struct qedr_dev *qedr = get_qedr_dev(ibdev); + u32 fw_ver = (u32)qedr->attr.fw_ver; + + snprintf(str, str_len, "%d. %d. %d. %d", + (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF, + (fw_ver >> 8) & 0xFF, fw_ver & 0xFF); +} + +static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num) +{ + struct qedr_dev *qdev; + + qdev = get_qedr_dev(dev); + dev_hold(qdev->ndev); + + /* The HW vendor's device driver must guarantee + * that this function returns NULL before the net device reaches + * NETDEV_UNREGISTER_FINAL state. + */ + return qdev->ndev; +} + +static int qedr_register_device(struct qedr_dev *dev) +{ + strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX); + + dev->ibdev.node_guid = dev->attr.node_guid; + memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC)); + dev->ibdev.owner = THIS_MODULE; + dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION; + + dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) | + QEDR_UVERBS(QUERY_DEVICE) | + QEDR_UVERBS(QUERY_PORT) | + QEDR_UVERBS(ALLOC_PD) | + QEDR_UVERBS(DEALLOC_PD) | + QEDR_UVERBS(CREATE_COMP_CHANNEL) | + QEDR_UVERBS(CREATE_CQ) | + QEDR_UVERBS(RESIZE_CQ) | + QEDR_UVERBS(DESTROY_CQ) | + QEDR_UVERBS(REQ_NOTIFY_CQ) | + QEDR_UVERBS(CREATE_QP) | + QEDR_UVERBS(MODIFY_QP) | + QEDR_UVERBS(QUERY_QP) | + QEDR_UVERBS(DESTROY_QP) | + QEDR_UVERBS(REG_MR) | + QEDR_UVERBS(DEREG_MR) | + QEDR_UVERBS(POLL_CQ) | + QEDR_UVERBS(POST_SEND) | + QEDR_UVERBS(POST_RECV); + + dev->ibdev.phys_port_cnt = 1; + dev->ibdev.num_comp_vectors = dev->num_cnq; + dev->ibdev.node_type = RDMA_NODE_IB_CA; + + dev->ibdev.query_device = qedr_query_device; + dev->ibdev.query_port = qedr_query_port; + dev->ibdev.modify_port = qedr_modify_port; + + dev->ibdev.query_gid = qedr_query_gid; + dev->ibdev.add_gid = qedr_add_gid; + dev->ibdev.del_gid = qedr_del_gid; + + dev->ibdev.alloc_ucontext = qedr_alloc_ucontext; + dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext; + dev->ibdev.mmap = qedr_mmap; + + dev->ibdev.alloc_pd = qedr_alloc_pd; + dev->ibdev.dealloc_pd = qedr_dealloc_pd; + + dev->ibdev.create_cq = qedr_create_cq; + dev->ibdev.destroy_cq = qedr_destroy_cq; + dev->ibdev.resize_cq = qedr_resize_cq; + dev->ibdev.req_notify_cq = qedr_arm_cq; + + dev->ibdev.create_qp = qedr_create_qp; + dev->ibdev.modify_qp = qedr_modify_qp; + dev->ibdev.query_qp = qedr_query_qp; + dev->ibdev.destroy_qp = qedr_destroy_qp; + + dev->ibdev.query_pkey = qedr_query_pkey; + + dev->ibdev.create_ah = qedr_create_ah; + dev->ibdev.destroy_ah = qedr_destroy_ah; + + dev->ibdev.get_dma_mr = qedr_get_dma_mr; + dev->ibdev.dereg_mr = qedr_dereg_mr; + dev->ibdev.reg_user_mr = qedr_reg_user_mr; + dev->ibdev.alloc_mr = qedr_alloc_mr; + dev->ibdev.map_mr_sg = qedr_map_mr_sg; + + dev->ibdev.poll_cq = qedr_poll_cq; + dev->ibdev.post_send = qedr_post_send; + dev->ibdev.post_recv = qedr_post_recv; + + dev->ibdev.process_mad = qedr_process_mad; + dev->ibdev.get_port_immutable = qedr_port_immutable; + dev->ibdev.get_netdev = qedr_get_netdev; + + dev->ibdev.dma_device = &dev->pdev->dev; + + dev->ibdev.get_link_layer = qedr_link_layer; + dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str; + + return ib_register_device(&dev->ibdev, NULL); +} + +/* This function allocates fast-path status block memory */ +static int qedr_alloc_mem_sb(struct qedr_dev *dev, + struct qed_sb_info *sb_info, u16 sb_id) +{ + struct status_block *sb_virt; + dma_addr_t sb_phys; + int rc; + + sb_virt = dma_alloc_coherent(&dev->pdev->dev, + sizeof(*sb_virt), &sb_phys, GFP_KERNEL); + if (!sb_virt) + return -ENOMEM; + + rc = dev->ops->common->sb_init(dev->cdev, sb_info, + sb_virt, sb_phys, sb_id, + QED_SB_TYPE_CNQ); + if (rc) { + pr_err("Status block initialization failed\n"); + dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt), + sb_virt, sb_phys); + return rc; + } + + return 0; +} + +static void qedr_free_mem_sb(struct qedr_dev *dev, + struct qed_sb_info *sb_info, int sb_id) +{ + if (sb_info->sb_virt) { + dev->ops->common->sb_release(dev->cdev, sb_info, sb_id); + dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt), + (void *)sb_info->sb_virt, sb_info->sb_phys); + } +} + +static void qedr_free_resources(struct qedr_dev *dev) +{ + int i; + + for (i = 0; i < dev->num_cnq; i++) { + qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); + dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); + } + + kfree(dev->cnq_array); + kfree(dev->sb_array); + kfree(dev->sgid_tbl); +} + +static int qedr_alloc_resources(struct qedr_dev *dev) +{ + struct qedr_cnq *cnq; + __le16 *cons_pi; + u16 n_entries; + int i, rc; + + dev->sgid_tbl = kzalloc(sizeof(union ib_gid) * + QEDR_MAX_SGID, GFP_KERNEL); + if (!dev->sgid_tbl) + return -ENOMEM; + + spin_lock_init(&dev->sgid_lock); + + /* Allocate Status blocks for CNQ */ + dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array), + GFP_KERNEL); + if (!dev->sb_array) { + rc = -ENOMEM; + goto err1; + } + + dev->cnq_array = kcalloc(dev->num_cnq, + sizeof(*dev->cnq_array), GFP_KERNEL); + if (!dev->cnq_array) { + rc = -ENOMEM; + goto err2; + } + + dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev); + + /* Allocate CNQ PBLs */ + n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE); + for (i = 0; i < dev->num_cnq; i++) { + cnq = &dev->cnq_array[i]; + + rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i], + dev->sb_start + i); + if (rc) + goto err3; + + rc = dev->ops->common->chain_alloc(dev->cdev, + QED_CHAIN_USE_TO_CONSUME, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, + n_entries, + sizeof(struct regpair *), + &cnq->pbl); + if (rc) + goto err4; + + cnq->dev = dev; + cnq->sb = &dev->sb_array[i]; + cons_pi = dev->sb_array[i].sb_virt->pi_array; + cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX]; + cnq->index = i; + sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev)); + + DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n", + i, qed_chain_get_cons_idx(&cnq->pbl)); + } + + return 0; +err4: + qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); +err3: + for (--i; i >= 0; i--) { + dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); + qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); + } + kfree(dev->cnq_array); +err2: + kfree(dev->sb_array); +err1: + kfree(dev->sgid_tbl); + return rc; +} + +/* QEDR sysfs interface */ +static ssize_t show_rev(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct qedr_dev *dev = dev_get_drvdata(device); + + return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor); +} + +static ssize_t show_hca_type(struct device *device, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET"); +} + +static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); +static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL); + +static struct device_attribute *qedr_attributes[] = { + &dev_attr_hw_rev, + &dev_attr_hca_type +}; + +static void qedr_remove_sysfiles(struct qedr_dev *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++) + device_remove_file(&dev->ibdev.dev, qedr_attributes[i]); +} + +static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev) +{ + struct pci_dev *bridge; + u32 val; + + dev->atomic_cap = IB_ATOMIC_NONE; + + bridge = pdev->bus->self; + if (!bridge) + return; + + /* Check whether we are connected directly or via a switch */ + while (bridge && bridge->bus->parent) { + DP_DEBUG(dev, QEDR_MSG_INIT, + "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n", + bridge->bus->number, bridge->bus->primary); + /* Need to check Atomic Op Routing Supported all the way to + * root complex. + */ + pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val); + if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) { + pcie_capability_clear_word(pdev, + PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_ATOMIC_REQ); + return; + } + bridge = bridge->bus->parent->self; + } + bridge = pdev->bus->self; + + /* according to bridge capability */ + pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val); + if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) { + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_ATOMIC_REQ); + dev->atomic_cap = IB_ATOMIC_GLOB; + } else { + pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_ATOMIC_REQ); + } +} + +static const struct qed_rdma_ops *qed_ops; + +#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) + +static irqreturn_t qedr_irq_handler(int irq, void *handle) +{ + u16 hw_comp_cons, sw_comp_cons; + struct qedr_cnq *cnq = handle; + struct regpair *cq_handle; + struct qedr_cq *cq; + + qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0); + + qed_sb_update_sb_idx(cnq->sb); + + hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); + + /* Align protocol-index and chain reads */ + rmb(); + + while (sw_comp_cons != hw_comp_cons) { + cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl); + cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi, + cq_handle->lo); + + if (cq == NULL) { + DP_ERR(cnq->dev, + "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n", + cq_handle->hi, cq_handle->lo, sw_comp_cons, + hw_comp_cons); + + break; + } + + if (cq->sig != QEDR_CQ_MAGIC_NUMBER) { + DP_ERR(cnq->dev, + "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n", + cq_handle->hi, cq_handle->lo, cq); + break; + } + + cq->arm_flags = 0; + + if (cq->ibcq.comp_handler) + (*cq->ibcq.comp_handler) + (&cq->ibcq, cq->ibcq.cq_context); + + sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); + + cnq->n_comp++; + + } + + qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, + sw_comp_cons); + + qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1); + + return IRQ_HANDLED; +} + +static void qedr_sync_free_irqs(struct qedr_dev *dev) +{ + u32 vector; + int i; + + for (i = 0; i < dev->int_info.used_cnt; i++) { + if (dev->int_info.msix_cnt) { + vector = dev->int_info.msix[i * dev->num_hwfns].vector; + synchronize_irq(vector); + free_irq(vector, &dev->cnq_array[i]); + } + } + + dev->int_info.used_cnt = 0; +} + +static int qedr_req_msix_irqs(struct qedr_dev *dev) +{ + int i, rc = 0; + + if (dev->num_cnq > dev->int_info.msix_cnt) { + DP_ERR(dev, + "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n", + dev->num_cnq, dev->int_info.msix_cnt); + return -EINVAL; + } + + for (i = 0; i < dev->num_cnq; i++) { + rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector, + qedr_irq_handler, 0, dev->cnq_array[i].name, + &dev->cnq_array[i]); + if (rc) { + DP_ERR(dev, "Request cnq %d irq failed\n", i); + qedr_sync_free_irqs(dev); + } else { + DP_DEBUG(dev, QEDR_MSG_INIT, + "Requested cnq irq for %s [entry %d]. Cookie is at %p\n", + dev->cnq_array[i].name, i, + &dev->cnq_array[i]); + dev->int_info.used_cnt++; + } + } + + return rc; +} + +static int qedr_setup_irqs(struct qedr_dev *dev) +{ + int rc; + + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n"); + + /* Learn Interrupt configuration */ + rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq); + if (rc < 0) + return rc; + + rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info); + if (rc) { + DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n"); + return rc; + } + + if (dev->int_info.msix_cnt) { + DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n", + dev->int_info.msix_cnt); + rc = qedr_req_msix_irqs(dev); + if (rc) + return rc; + } + + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n"); + + return 0; +} + +static int qedr_set_device_attr(struct qedr_dev *dev) +{ + struct qed_rdma_device *qed_attr; + struct qedr_device_attr *attr; + u32 page_size; + + /* Part 1 - query core capabilities */ + qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); + + /* Part 2 - check capabilities */ + page_size = ~dev->attr.page_size_caps + 1; + if (page_size > PAGE_SIZE) { + DP_ERR(dev, + "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n", + PAGE_SIZE, page_size); + return -ENODEV; + } + + /* Part 3 - copy and update capabilities */ + attr = &dev->attr; + attr->vendor_id = qed_attr->vendor_id; + attr->vendor_part_id = qed_attr->vendor_part_id; + attr->hw_ver = qed_attr->hw_ver; + attr->fw_ver = qed_attr->fw_ver; + attr->node_guid = qed_attr->node_guid; + attr->sys_image_guid = qed_attr->sys_image_guid; + attr->max_cnq = qed_attr->max_cnq; + attr->max_sge = qed_attr->max_sge; + attr->max_inline = qed_attr->max_inline; + attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE); + attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE); + attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc; + attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc; + attr->max_dev_resp_rd_atomic_resc = + qed_attr->max_dev_resp_rd_atomic_resc; + attr->max_cq = qed_attr->max_cq; + attr->max_qp = qed_attr->max_qp; + attr->max_mr = qed_attr->max_mr; + attr->max_mr_size = qed_attr->max_mr_size; + attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES); + attr->max_mw = qed_attr->max_mw; + attr->max_fmr = qed_attr->max_fmr; + attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl; + attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size; + attr->max_pd = qed_attr->max_pd; + attr->max_ah = qed_attr->max_ah; + attr->max_pkey = qed_attr->max_pkey; + attr->max_srq = qed_attr->max_srq; + attr->max_srq_wr = qed_attr->max_srq_wr; + attr->dev_caps = qed_attr->dev_caps; + attr->page_size_caps = qed_attr->page_size_caps; + attr->dev_ack_delay = qed_attr->dev_ack_delay; + attr->reserved_lkey = qed_attr->reserved_lkey; + attr->bad_pkey_counter = qed_attr->bad_pkey_counter; + attr->max_stats_queues = qed_attr->max_stats_queues; + + return 0; +} + +void qedr_unaffiliated_event(void *context, + u8 event_code) +{ + pr_err("unaffiliated event not implemented yet\n"); +} + +void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle) +{ +#define EVENT_TYPE_NOT_DEFINED 0 +#define EVENT_TYPE_CQ 1 +#define EVENT_TYPE_QP 2 + struct qedr_dev *dev = (struct qedr_dev *)context; + union event_ring_data *data = fw_handle; + u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) + + data->roce_handle.lo; + u8 event_type = EVENT_TYPE_NOT_DEFINED; + struct ib_event event; + struct ib_cq *ibcq; + struct ib_qp *ibqp; + struct qedr_cq *cq; + struct qedr_qp *qp; + + switch (e_code) { + case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR: + event.event = IB_EVENT_CQ_ERR; + event_type = EVENT_TYPE_CQ; + break; + case ROCE_ASYNC_EVENT_SQ_DRAINED: + event.event = IB_EVENT_SQ_DRAINED; + event_type = EVENT_TYPE_QP; + break; + case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR: + event.event = IB_EVENT_QP_FATAL; + event_type = EVENT_TYPE_QP; + break; + case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR: + event.event = IB_EVENT_QP_REQ_ERR; + event_type = EVENT_TYPE_QP; + break; + case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR: + event.event = IB_EVENT_QP_ACCESS_ERR; + event_type = EVENT_TYPE_QP; + break; + default: + DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code, + roce_handle64); + } + + switch (event_type) { + case EVENT_TYPE_CQ: + cq = (struct qedr_cq *)(uintptr_t)roce_handle64; + if (cq) { + ibcq = &cq->ibcq; + if (ibcq->event_handler) { + event.device = ibcq->device; + event.element.cq = ibcq; + ibcq->event_handler(&event, ibcq->cq_context); + } + } else { + WARN(1, + "Error: CQ event with NULL pointer ibcq. Handle=%llx\n", + roce_handle64); + } + DP_ERR(dev, "CQ event %d on hanlde %p\n", e_code, cq); + break; + case EVENT_TYPE_QP: + qp = (struct qedr_qp *)(uintptr_t)roce_handle64; + if (qp) { + ibqp = &qp->ibqp; + if (ibqp->event_handler) { + event.device = ibqp->device; + event.element.qp = ibqp; + ibqp->event_handler(&event, ibqp->qp_context); + } + } else { + WARN(1, + "Error: QP event with NULL pointer ibqp. Handle=%llx\n", + roce_handle64); + } + DP_ERR(dev, "QP event %d on hanlde %p\n", e_code, qp); + break; + default: + break; + } +} + +static int qedr_init_hw(struct qedr_dev *dev) +{ + struct qed_rdma_add_user_out_params out_params; + struct qed_rdma_start_in_params *in_params; + struct qed_rdma_cnq_params *cur_pbl; + struct qed_rdma_events events; + dma_addr_t p_phys_table; + u32 page_cnt; + int rc = 0; + int i; + + in_params = kzalloc(sizeof(*in_params), GFP_KERNEL); + if (!in_params) { + rc = -ENOMEM; + goto out; + } + + in_params->desired_cnq = dev->num_cnq; + for (i = 0; i < dev->num_cnq; i++) { + cur_pbl = &in_params->cnq_pbl_list[i]; + + page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl); + cur_pbl->num_pbl_pages = page_cnt; + + p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl); + cur_pbl->pbl_ptr = (u64)p_phys_table; + } + + events.affiliated_event = qedr_affiliated_event; + events.unaffiliated_event = qedr_unaffiliated_event; + events.context = dev; + + in_params->events = &events; + in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS; + in_params->max_mtu = dev->ndev->mtu; + ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr); + + rc = dev->ops->rdma_init(dev->cdev, in_params); + if (rc) + goto out; + + rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params); + if (rc) + goto out; + + dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr; + dev->db_phys_addr = out_params.dpi_phys_addr; + dev->db_size = out_params.dpi_size; + dev->dpi = out_params.dpi; + + rc = qedr_set_device_attr(dev); +out: + kfree(in_params); + if (rc) + DP_ERR(dev, "Init HW Failed rc = %d\n", rc); + + return rc; +} + +void qedr_stop_hw(struct qedr_dev *dev) +{ + dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi); + dev->ops->rdma_stop(dev->rdma_ctx); +} + +static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, + struct net_device *ndev) +{ + struct qed_dev_rdma_info dev_info; + struct qedr_dev *dev; + int rc = 0, i; + + dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev)); + if (!dev) { + pr_err("Unable to allocate ib device\n"); + return NULL; + } + + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n"); + + dev->pdev = pdev; + dev->ndev = ndev; + dev->cdev = cdev; + + qed_ops = qed_get_rdma_ops(); + if (!qed_ops) { + DP_ERR(dev, "Failed to get qed roce operations\n"); + goto init_err; + } + + dev->ops = qed_ops; + rc = qed_ops->fill_dev_info(cdev, &dev_info); + if (rc) + goto init_err; + + dev->num_hwfns = dev_info.common.num_hwfns; + dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev); + + dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); + if (!dev->num_cnq) { + DP_ERR(dev, "not enough CNQ resources.\n"); + goto init_err; + } + + dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT; + + qedr_pci_set_atomic(dev, pdev); + + rc = qedr_alloc_resources(dev); + if (rc) + goto init_err; + + rc = qedr_init_hw(dev); + if (rc) + goto alloc_err; + + rc = qedr_setup_irqs(dev); + if (rc) + goto irq_err; + + rc = qedr_register_device(dev); + if (rc) { + DP_ERR(dev, "Unable to allocate register device\n"); + goto reg_err; + } + + for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++) + if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) + goto sysfs_err; + + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); + return dev; + +sysfs_err: + ib_unregister_device(&dev->ibdev); +reg_err: + qedr_sync_free_irqs(dev); +irq_err: + qedr_stop_hw(dev); +alloc_err: + qedr_free_resources(dev); +init_err: + ib_dealloc_device(&dev->ibdev); + DP_ERR(dev, "qedr driver load failed rc=%d\n", rc); + + return NULL; +} + +static void qedr_remove(struct qedr_dev *dev) +{ + /* First unregister with stack to stop all the active traffic + * of the registered clients. + */ + qedr_remove_sysfiles(dev); + ib_unregister_device(&dev->ibdev); + + qedr_stop_hw(dev); + qedr_sync_free_irqs(dev); + qedr_free_resources(dev); + ib_dealloc_device(&dev->ibdev); +} + +static int qedr_close(struct qedr_dev *dev) +{ + qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); + + return 0; +} + +static void qedr_shutdown(struct qedr_dev *dev) +{ + qedr_close(dev); + qedr_remove(dev); +} + +static void qedr_mac_address_change(struct qedr_dev *dev) +{ + union ib_gid *sgid = &dev->sgid_tbl[0]; + u8 guid[8], mac_addr[6]; + int rc; + + /* Update SGID */ + ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr); + guid[0] = mac_addr[0] ^ 2; + guid[1] = mac_addr[1]; + guid[2] = mac_addr[2]; + guid[3] = 0xff; + guid[4] = 0xfe; + guid[5] = mac_addr[3]; + guid[6] = mac_addr[4]; + guid[7] = mac_addr[5]; + sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); + memcpy(&sgid->raw[8], guid, sizeof(guid)); + + /* Update LL2 */ + rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev, + dev->gsi_ll2_mac_address, + dev->ndev->dev_addr); + + ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); + + qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); + + if (rc) + DP_ERR(dev, "Error updating mac filter\n"); +} + +/* event handling via NIC driver ensures that all the NIC specific + * initialization done before RoCE driver notifies + * event to stack. + */ +static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) +{ + switch (event) { + case QEDE_UP: + qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); + break; + case QEDE_DOWN: + qedr_close(dev); + break; + case QEDE_CLOSE: + qedr_shutdown(dev); + break; + case QEDE_CHANGE_ADDR: + qedr_mac_address_change(dev); + break; + default: + pr_err("Event not supported\n"); + } +} + +static struct qedr_driver qedr_drv = { + .name = "qedr_driver", + .add = qedr_add, + .remove = qedr_remove, + .notify = qedr_notify, +}; + +static int __init qedr_init_module(void) +{ + return qede_roce_register_driver(&qedr_drv); +} + +static void __exit qedr_exit_module(void) +{ + qede_roce_unregister_driver(&qedr_drv); +} + +module_init(qedr_init_module); +module_exit(qedr_exit_module); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h new file mode 100644 index 000000000000..620badd7d4fb --- /dev/null +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -0,0 +1,495 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __QEDR_H__ +#define __QEDR_H__ + +#include <linux/pci.h> +#include <rdma/ib_addr.h> +#include <linux/qed/qed_if.h> +#include <linux/qed/qed_chain.h> +#include <linux/qed/qed_roce_if.h> +#include <linux/qed/qede_roce.h> +#include "qedr_hsi.h" + +#define QEDR_MODULE_VERSION "8.10.10.0" +#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA" +#define DP_NAME(dev) ((dev)->ibdev.name) + +#define DP_DEBUG(dev, module, fmt, ...) \ + pr_debug("(%s) " module ": " fmt, \ + DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__) + +#define QEDR_MSG_INIT "INIT" +#define QEDR_MSG_MISC "MISC" +#define QEDR_MSG_CQ " CQ" +#define QEDR_MSG_MR " MR" +#define QEDR_MSG_RQ " RQ" +#define QEDR_MSG_SQ " SQ" +#define QEDR_MSG_QP " QP" +#define QEDR_MSG_GSI " GSI" + +#define QEDR_CQ_MAGIC_NUMBER (0x11223344) + +struct qedr_dev; + +struct qedr_cnq { + struct qedr_dev *dev; + struct qed_chain pbl; + struct qed_sb_info *sb; + char name[32]; + u64 n_comp; + __le16 *hw_cons_ptr; + u8 index; +}; + +#define QEDR_MAX_SGID 128 + +struct qedr_device_attr { + u32 vendor_id; + u32 vendor_part_id; + u32 hw_ver; + u64 fw_ver; + u64 node_guid; + u64 sys_image_guid; + u8 max_cnq; + u8 max_sge; + u16 max_inline; + u32 max_sqe; + u32 max_rqe; + u8 max_qp_resp_rd_atomic_resc; + u8 max_qp_req_rd_atomic_resc; + u64 max_dev_resp_rd_atomic_resc; + u32 max_cq; + u32 max_qp; + u32 max_mr; + u64 max_mr_size; + u32 max_cqe; + u32 max_mw; + u32 max_fmr; + u32 max_mr_mw_fmr_pbl; + u64 max_mr_mw_fmr_size; + u32 max_pd; + u32 max_ah; + u8 max_pkey; + u32 max_srq; + u32 max_srq_wr; + u8 max_srq_sge; + u8 max_stats_queues; + u32 dev_caps; + + u64 page_size_caps; + u8 dev_ack_delay; + u32 reserved_lkey; + u32 bad_pkey_counter; + struct qed_rdma_events events; +}; + +struct qedr_dev { + struct ib_device ibdev; + struct qed_dev *cdev; + struct pci_dev *pdev; + struct net_device *ndev; + + enum ib_atomic_cap atomic_cap; + + void *rdma_ctx; + struct qedr_device_attr attr; + + const struct qed_rdma_ops *ops; + struct qed_int_info int_info; + + struct qed_sb_info *sb_array; + struct qedr_cnq *cnq_array; + int num_cnq; + int sb_start; + + void __iomem *db_addr; + u64 db_phys_addr; + u32 db_size; + u16 dpi; + + union ib_gid *sgid_tbl; + + /* Lock for sgid table */ + spinlock_t sgid_lock; + + u64 guid; + + u32 dp_module; + u8 dp_level; + u8 num_hwfns; + uint wq_multiplier; + u8 gsi_ll2_mac_address[ETH_ALEN]; + int gsi_qp_created; + struct qedr_cq *gsi_sqcq; + struct qedr_cq *gsi_rqcq; + struct qedr_qp *gsi_qp; +}; + +#define QEDR_MAX_SQ_PBL (0x8000) +#define QEDR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *)) +#define QEDR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge)) +#define QEDR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \ + QEDR_SQE_ELEMENT_SIZE) +#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \ + QEDR_SQE_ELEMENT_SIZE) +#define QEDR_MAX_SQE ((QEDR_MAX_SQ_PBL_ENTRIES) *\ + (RDMA_RING_PAGE_SIZE) / \ + (QEDR_SQE_ELEMENT_SIZE) /\ + (QEDR_MAX_SQE_ELEMENTS_PER_SQE)) +/* RQ */ +#define QEDR_MAX_RQ_PBL (0x2000) +#define QEDR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *)) +#define QEDR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge)) +#define QEDR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE) +#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \ + QEDR_RQE_ELEMENT_SIZE) +#define QEDR_MAX_RQE ((QEDR_MAX_RQ_PBL_ENTRIES) *\ + (RDMA_RING_PAGE_SIZE) / \ + (QEDR_RQE_ELEMENT_SIZE) /\ + (QEDR_MAX_RQE_ELEMENTS_PER_RQE)) + +#define QEDR_CQE_SIZE (sizeof(union rdma_cqe)) +#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024) +#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \ + sizeof(u64)) - 1) +#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \ + (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE)) + +#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) + +#define QEDR_MAX_PORT (1) + +#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) + +#define QEDR_ROCE_PKEY_MAX 1 +#define QEDR_ROCE_PKEY_TABLE_LEN 1 +#define QEDR_ROCE_PKEY_DEFAULT 0xffff + +struct qedr_pbl { + struct list_head list_entry; + void *va; + dma_addr_t pa; +}; + +struct qedr_ucontext { + struct ib_ucontext ibucontext; + struct qedr_dev *dev; + struct qedr_pd *pd; + u64 dpi_addr; + u64 dpi_phys_addr; + u32 dpi_size; + u16 dpi; + + struct list_head mm_head; + + /* Lock to protect mm list */ + struct mutex mm_list_lock; +}; + +union db_prod64 { + struct rdma_pwm_val32_data data; + u64 raw; +}; + +enum qedr_cq_type { + QEDR_CQ_TYPE_GSI, + QEDR_CQ_TYPE_KERNEL, + QEDR_CQ_TYPE_USER, +}; + +struct qedr_pbl_info { + u32 num_pbls; + u32 num_pbes; + u32 pbl_size; + u32 pbe_size; + bool two_layered; +}; + +struct qedr_userq { + struct ib_umem *umem; + struct qedr_pbl_info pbl_info; + struct qedr_pbl *pbl_tbl; + u64 buf_addr; + size_t buf_len; +}; + +struct qedr_cq { + struct ib_cq ibcq; + + enum qedr_cq_type cq_type; + u32 sig; + + u16 icid; + + /* Lock to protect completion handler */ + spinlock_t comp_handler_lock; + + /* Lock to protect multiplem CQ's */ + spinlock_t cq_lock; + u8 arm_flags; + struct qed_chain pbl; + + void __iomem *db_addr; + union db_prod64 db; + + u8 pbl_toggle; + union rdma_cqe *latest_cqe; + union rdma_cqe *toggle_cqe; + + u32 cq_cons; + + struct qedr_userq q; +}; + +struct qedr_pd { + struct ib_pd ibpd; + u32 pd_id; + struct qedr_ucontext *uctx; +}; + +struct qedr_mm { + struct { + u64 phy_addr; + unsigned long len; + } key; + struct list_head entry; +}; + +union db_prod32 { + struct rdma_pwm_val16_data data; + u32 raw; +}; + +struct qedr_qp_hwq_info { + /* WQE Elements */ + struct qed_chain pbl; + u64 p_phys_addr_tbl; + u32 max_sges; + + /* WQE */ + u16 prod; + u16 cons; + u16 wqe_cons; + u16 gsi_cons; + u16 max_wr; + + /* DB */ + void __iomem *db; + union db_prod32 db_data; +}; + +#define QEDR_INC_SW_IDX(p_info, index) \ + do { \ + p_info->index = (p_info->index + 1) & \ + qed_chain_get_capacity(p_info->pbl) \ + } while (0) + +enum qedr_qp_err_bitmap { + QEDR_QP_ERR_SQ_FULL = 1, + QEDR_QP_ERR_RQ_FULL = 2, + QEDR_QP_ERR_BAD_SR = 4, + QEDR_QP_ERR_BAD_RR = 8, + QEDR_QP_ERR_SQ_PBL_FULL = 16, + QEDR_QP_ERR_RQ_PBL_FULL = 32, +}; + +struct qedr_qp { + struct ib_qp ibqp; /* must be first */ + struct qedr_dev *dev; + + struct qedr_qp_hwq_info sq; + struct qedr_qp_hwq_info rq; + + u32 max_inline_data; + + /* Lock for QP's */ + spinlock_t q_lock; + struct qedr_cq *sq_cq; + struct qedr_cq *rq_cq; + struct qedr_srq *srq; + enum qed_roce_qp_state state; + u32 id; + struct qedr_pd *pd; + enum ib_qp_type qp_type; + struct qed_rdma_qp *qed_qp; + u32 qp_id; + u16 icid; + u16 mtu; + int sgid_idx; + u32 rq_psn; + u32 sq_psn; + u32 qkey; + u32 dest_qp_num; + + /* Relevant to qps created from kernel space only (ULPs) */ + u8 prev_wqe_size; + u16 wqe_cons; + u32 err_bitmap; + bool signaled; + + /* SQ shadow */ + struct { + u64 wr_id; + enum ib_wc_opcode opcode; + u32 bytes_len; + u8 wqe_size; + bool signaled; + dma_addr_t icrc_mapping; + u32 *icrc; + struct qedr_mr *mr; + } *wqe_wr_id; + + /* RQ shadow */ + struct { + u64 wr_id; + struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE]; + u8 wqe_size; + + u8 smac[ETH_ALEN]; + u16 vlan_id; + int rc; + } *rqe_wr_id; + + /* Relevant to qps created from user space only (applications) */ + struct qedr_userq usq; + struct qedr_userq urq; +}; + +struct qedr_ah { + struct ib_ah ibah; + struct ib_ah_attr attr; +}; + +enum qedr_mr_type { + QEDR_MR_USER, + QEDR_MR_KERNEL, + QEDR_MR_DMA, + QEDR_MR_FRMR, +}; + +struct mr_info { + struct qedr_pbl *pbl_table; + struct qedr_pbl_info pbl_info; + struct list_head free_pbl_list; + struct list_head inuse_pbl_list; + u32 completed; + u32 completed_handled; +}; + +struct qedr_mr { + struct ib_mr ibmr; + struct ib_umem *umem; + + struct qed_rdma_register_tid_in_params hw_mr; + enum qedr_mr_type type; + + struct qedr_dev *dev; + struct mr_info info; + + u64 *pages; + u32 npages; +}; + +#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT))) + +#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \ + RDMA_CQE_RESPONDER_IMM_FLG_SHIFT) +#define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \ + RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT) +#define QEDR_RESP_RDMA_IMM (QEDR_RESP_IMM | QEDR_RESP_RDMA) + +static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info) +{ + info->cons = (info->cons + 1) % info->max_wr; + info->wqe_cons++; +} + +static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info) +{ + info->prod = (info->prod + 1) % info->max_wr; +} + +static inline int qedr_get_dmac(struct qedr_dev *dev, + struct ib_ah_attr *ah_attr, u8 *mac_addr) +{ + union ib_gid zero_sgid = { { 0 } }; + struct in6_addr in6; + + if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) { + DP_ERR(dev, "Local port GID not supported\n"); + eth_zero_addr(mac_addr); + return -EINVAL; + } + + memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); + ether_addr_copy(mac_addr, ah_attr->dmac); + + return 0; +} + +static inline +struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct qedr_ucontext, ibucontext); +} + +static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct qedr_dev, ibdev); +} + +static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct qedr_pd, ibpd); +} + +static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct qedr_cq, ibcq); +} + +static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct qedr_qp, ibqp); +} + +static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah) +{ + return container_of(ibah, struct qedr_ah, ibah); +} + +static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct qedr_mr, ibmr); +} +#endif diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c new file mode 100644 index 000000000000..63890ebb72bd --- /dev/null +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -0,0 +1,622 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/dma-mapping.h> +#include <linux/crc32.h> +#include <linux/iommu.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/udp.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_user_verbs.h> +#include <rdma/iw_cm.h> +#include <rdma/ib_umem.h> +#include <rdma/ib_addr.h> +#include <rdma/ib_cache.h> + +#include "qedr_hsi.h" +#include <linux/qed/qed_if.h> +#include <linux/qed/qed_roce_if.h> +#include "qedr.h" +#include "qedr_hsi.h" +#include "verbs.h" +#include <rdma/qedr-abi.h> +#include "qedr_hsi.h" +#include "qedr_cm.h" + +void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info) +{ + info->gsi_cons = (info->gsi_cons + 1) % info->max_wr; +} + +void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp, + struct ib_qp_init_attr *attrs) +{ + dev->gsi_qp_created = 1; + dev->gsi_sqcq = get_qedr_cq(attrs->send_cq); + dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq); + dev->gsi_qp = qp; +} + +void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) +{ + struct qedr_dev *dev = (struct qedr_dev *)_qdev; + struct qedr_cq *cq = dev->gsi_sqcq; + struct qedr_qp *qp = dev->gsi_qp; + unsigned long flags; + + DP_DEBUG(dev, QEDR_MSG_GSI, + "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n", + dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons, + cq->ibcq.comp_handler ? "Yes" : "No"); + + dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr, + pkt->header.baddr); + kfree(pkt); + + spin_lock_irqsave(&qp->q_lock, flags); + qedr_inc_sw_gsi_cons(&qp->sq); + spin_unlock_irqrestore(&qp->q_lock, flags); + + if (cq->ibcq.comp_handler) { + spin_lock_irqsave(&cq->comp_handler_lock, flags); + (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); + spin_unlock_irqrestore(&cq->comp_handler_lock, flags); + } +} + +void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, + struct qed_roce_ll2_rx_params *params) +{ + struct qedr_dev *dev = (struct qedr_dev *)_dev; + struct qedr_cq *cq = dev->gsi_rqcq; + struct qedr_qp *qp = dev->gsi_qp; + unsigned long flags; + + spin_lock_irqsave(&qp->q_lock, flags); + + qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc; + qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id; + qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len; + ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac); + + qedr_inc_sw_gsi_cons(&qp->rq); + + spin_unlock_irqrestore(&qp->q_lock, flags); + + if (cq->ibcq.comp_handler) { + spin_lock_irqsave(&cq->comp_handler_lock, flags); + (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); + spin_unlock_irqrestore(&cq->comp_handler_lock, flags); + } +} + +static void qedr_destroy_gsi_cq(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs) +{ + struct qed_rdma_destroy_cq_in_params iparams; + struct qed_rdma_destroy_cq_out_params oparams; + struct qedr_cq *cq; + + cq = get_qedr_cq(attrs->send_cq); + iparams.icid = cq->icid; + dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams); + dev->ops->common->chain_free(dev->cdev, &cq->pbl); + + cq = get_qedr_cq(attrs->recv_cq); + /* if a dedicated recv_cq was used, delete it too */ + if (iparams.icid != cq->icid) { + iparams.icid = cq->icid; + dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams); + dev->ops->common->chain_free(dev->cdev, &cq->pbl); + } +} + +static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs) +{ + if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) { + DP_ERR(dev, + " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n", + attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE); + return -EINVAL; + } + + if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) { + DP_ERR(dev, + " create gsi qp: failed. max_recv_wr is too large %d>%d\n", + attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR); + return -EINVAL; + } + + if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) { + DP_ERR(dev, + " create gsi qp: failed. max_send_wr is too large %d>%d\n", + attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR); + return -EINVAL; + } + + return 0; +} + +struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs, + struct qedr_qp *qp) +{ + struct qed_roce_ll2_params ll2_params; + int rc; + + rc = qedr_check_gsi_qp_attrs(dev, attrs); + if (rc) + return ERR_PTR(rc); + + /* configure and start LL2 */ + memset(&ll2_params, 0, sizeof(ll2_params)); + ll2_params.max_tx_buffers = attrs->cap.max_send_wr; + ll2_params.max_rx_buffers = attrs->cap.max_recv_wr; + ll2_params.cbs.tx_cb = qedr_ll2_tx_cb; + ll2_params.cbs.rx_cb = qedr_ll2_rx_cb; + ll2_params.cb_cookie = (void *)dev; + ll2_params.mtu = dev->ndev->mtu; + ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr); + rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params); + if (rc) { + DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc); + return ERR_PTR(rc); + } + + /* create QP */ + qp->ibqp.qp_num = 1; + qp->rq.max_wr = attrs->cap.max_recv_wr; + qp->sq.max_wr = attrs->cap.max_send_wr; + + qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id), + GFP_KERNEL); + if (!qp->rqe_wr_id) + goto err; + qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id), + GFP_KERNEL); + if (!qp->wqe_wr_id) + goto err; + + qedr_store_gsi_qp_cq(dev, qp, attrs); + ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); + + /* the GSI CQ is handled by the driver so remove it from the FW */ + qedr_destroy_gsi_cq(dev, attrs); + dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI; + dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI; + + DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp); + + return &qp->ibqp; + +err: + kfree(qp->rqe_wr_id); + + rc = dev->ops->roce_ll2_stop(dev->cdev); + if (rc) + DP_ERR(dev, "create gsi qp: failed destroy on create\n"); + + return ERR_PTR(-ENOMEM); +} + +int qedr_destroy_gsi_qp(struct qedr_dev *dev) +{ + int rc; + + rc = dev->ops->roce_ll2_stop(dev->cdev); + if (rc) + DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc); + else + DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n"); + + return rc; +} + +#define QEDR_MAX_UD_HEADER_SIZE (100) +#define QEDR_GSI_QPN (1) +static inline int qedr_gsi_build_header(struct qedr_dev *dev, + struct qedr_qp *qp, + struct ib_send_wr *swr, + struct ib_ud_header *udh, + int *roce_mode) +{ + bool has_vlan = false, has_grh_ipv6 = true; + struct ib_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr; + struct ib_global_route *grh = &ah_attr->grh; + union ib_gid sgid; + int send_size = 0; + u16 vlan_id = 0; + u16 ether_type; + struct ib_gid_attr sgid_attr; + int rc; + int ip_ver = 0; + + bool has_udp = false; + int i; + + send_size = 0; + for (i = 0; i < swr->num_sge; ++i) + send_size += swr->sg_list[i].length; + + rc = ib_get_cached_gid(qp->ibqp.device, ah_attr->port_num, + grh->sgid_index, &sgid, &sgid_attr); + if (rc) { + DP_ERR(dev, + "gsi post send: failed to get cached GID (port=%d, ix=%d)\n", + ah_attr->port_num, grh->sgid_index); + return rc; + } + + vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); + if (vlan_id < VLAN_CFI_MASK) + has_vlan = true; + if (sgid_attr.ndev) + dev_put(sgid_attr.ndev); + + if (!memcmp(&sgid, &zgid, sizeof(sgid))) { + DP_ERR(dev, "gsi post send: GID not found GID index %d\n", + ah_attr->grh.sgid_index); + return -ENOENT; + } + + has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP); + if (!has_udp) { + /* RoCE v1 */ + ether_type = ETH_P_ROCE; + *roce_mode = ROCE_V1; + } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) { + /* RoCE v2 IPv4 */ + ip_ver = 4; + ether_type = ETH_P_IP; + has_grh_ipv6 = false; + *roce_mode = ROCE_V2_IPV4; + } else { + /* RoCE v2 IPv6 */ + ip_ver = 6; + ether_type = ETH_P_IPV6; + *roce_mode = ROCE_V2_IPV6; + } + + rc = ib_ud_header_init(send_size, false, true, has_vlan, + has_grh_ipv6, ip_ver, has_udp, 0, udh); + if (rc) { + DP_ERR(dev, "gsi post send: failed to init header\n"); + return rc; + } + + /* ENET + VLAN headers */ + ether_addr_copy(udh->eth.dmac_h, ah_attr->dmac); + ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr); + if (has_vlan) { + udh->eth.type = htons(ETH_P_8021Q); + udh->vlan.tag = htons(vlan_id); + udh->vlan.type = htons(ether_type); + } else { + udh->eth.type = htons(ether_type); + } + + /* BTH */ + udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED); + udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT; + udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn); + udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1)); + udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY; + + /* DETH */ + udh->deth.qkey = htonl(0x80010000); + udh->deth.source_qpn = htonl(QEDR_GSI_QPN); + + if (has_grh_ipv6) { + /* GRH / IPv6 header */ + udh->grh.traffic_class = grh->traffic_class; + udh->grh.flow_label = grh->flow_label; + udh->grh.hop_limit = grh->hop_limit; + udh->grh.destination_gid = grh->dgid; + memcpy(&udh->grh.source_gid.raw, &sgid.raw, + sizeof(udh->grh.source_gid.raw)); + } else { + /* IPv4 header */ + u32 ipv4_addr; + + udh->ip4.protocol = IPPROTO_UDP; + udh->ip4.tos = htonl(ah_attr->grh.flow_label); + udh->ip4.frag_off = htons(IP_DF); + udh->ip4.ttl = ah_attr->grh.hop_limit; + + ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw); + udh->ip4.saddr = ipv4_addr; + ipv4_addr = qedr_get_ipv4_from_gid(ah_attr->grh.dgid.raw); + udh->ip4.daddr = ipv4_addr; + /* note: checksum is calculated by the device */ + } + + /* UDP */ + if (has_udp) { + udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT); + udh->udp.dport = htons(ROCE_V2_UDP_DPORT); + udh->udp.csum = 0; + /* UDP length is untouched hence is zero */ + } + return 0; +} + +static inline int qedr_gsi_build_packet(struct qedr_dev *dev, + struct qedr_qp *qp, + struct ib_send_wr *swr, + struct qed_roce_ll2_packet **p_packet) +{ + u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE]; + struct qed_roce_ll2_packet *packet; + struct pci_dev *pdev = dev->pdev; + int roce_mode, header_size; + struct ib_ud_header udh; + int i, rc; + + *p_packet = NULL; + + rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode); + if (rc) + return rc; + + header_size = ib_ud_header_pack(&udh, &ud_header_buffer); + + packet = kzalloc(sizeof(*packet), GFP_ATOMIC); + if (!packet) + return -ENOMEM; + + packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size, + &packet->header.baddr, + GFP_ATOMIC); + if (!packet->header.vaddr) { + kfree(packet); + return -ENOMEM; + } + + if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) + packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; + else + packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; + + packet->roce_mode = roce_mode; + memcpy(packet->header.vaddr, ud_header_buffer, header_size); + packet->header.len = header_size; + packet->n_seg = swr->num_sge; + for (i = 0; i < packet->n_seg; i++) { + packet->payload[i].baddr = swr->sg_list[i].addr; + packet->payload[i].len = swr->sg_list[i].length; + } + + *p_packet = packet; + + return 0; +} + +int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct qed_roce_ll2_packet *pkt = NULL; + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct qed_roce_ll2_tx_params params; + struct qedr_dev *dev = qp->dev; + unsigned long flags; + int rc; + + if (qp->state != QED_ROCE_QP_STATE_RTS) { + *bad_wr = wr; + DP_ERR(dev, + "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n", + qp->state); + return -EINVAL; + } + + if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) { + DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n", + wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE); + rc = -EINVAL; + goto err; + } + + if (wr->opcode != IB_WR_SEND) { + DP_ERR(dev, + "gsi post send: failed due to unsupported opcode %d\n", + wr->opcode); + rc = -EINVAL; + goto err; + } + + memset(¶ms, 0, sizeof(params)); + + spin_lock_irqsave(&qp->q_lock, flags); + + rc = qedr_gsi_build_packet(dev, qp, wr, &pkt); + if (rc) { + spin_unlock_irqrestore(&qp->q_lock, flags); + goto err; + } + + rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, ¶ms); + if (!rc) { + qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; + qedr_inc_sw_prod(&qp->sq); + DP_DEBUG(qp->dev, QEDR_MSG_GSI, + "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n", + wr->opcode, in_irq(), irqs_disabled(), wr->wr_id); + } else { + if (rc == QED_ROCE_TX_HEAD_FAILURE) { + /* TX failed while posting header - release resources */ + dma_free_coherent(&dev->pdev->dev, pkt->header.len, + pkt->header.vaddr, pkt->header.baddr); + kfree(pkt); + } else if (rc == QED_ROCE_TX_FRAG_FAILURE) { + /* NTD since TX failed while posting a fragment. We will + * release the resources on TX callback + */ + } + + DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc); + rc = -EAGAIN; + *bad_wr = wr; + } + + spin_unlock_irqrestore(&qp->q_lock, flags); + + if (wr->next) { + DP_ERR(dev, + "gsi post send: failed second WR. Only one WR may be passed at a time\n"); + *bad_wr = wr->next; + rc = -EINVAL; + } + + return rc; + +err: + *bad_wr = wr; + return rc; +} + +int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct qedr_dev *dev = get_qedr_dev(ibqp->device); + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct qed_roce_ll2_buffer buf; + unsigned long flags; + int status = 0; + int rc; + + if ((qp->state != QED_ROCE_QP_STATE_RTR) && + (qp->state != QED_ROCE_QP_STATE_RTS)) { + *bad_wr = wr; + DP_ERR(dev, + "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n", + qp->state); + return -EINVAL; + } + + memset(&buf, 0, sizeof(buf)); + + spin_lock_irqsave(&qp->q_lock, flags); + + while (wr) { + if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) { + DP_ERR(dev, + "gsi post recv: failed to post rx buffer. too many sges %d>%d\n", + wr->num_sge, QEDR_GSI_MAX_RECV_SGE); + goto err; + } + + buf.baddr = wr->sg_list[0].addr; + buf.len = wr->sg_list[0].length; + + rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1); + if (rc) { + DP_ERR(dev, + "gsi post recv: failed to post rx buffer (rc=%d)\n", + rc); + goto err; + } + + memset(&qp->rqe_wr_id[qp->rq.prod], 0, + sizeof(qp->rqe_wr_id[qp->rq.prod])); + qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0]; + qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id; + + qedr_inc_sw_prod(&qp->rq); + + wr = wr->next; + } + + spin_unlock_irqrestore(&qp->q_lock, flags); + + return status; +err: + spin_unlock_irqrestore(&qp->q_lock, flags); + *bad_wr = wr; + return -ENOMEM; +} + +int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + struct qedr_dev *dev = get_qedr_dev(ibcq->device); + struct qedr_cq *cq = get_qedr_cq(ibcq); + struct qedr_qp *qp = dev->gsi_qp; + unsigned long flags; + int i = 0; + + spin_lock_irqsave(&cq->cq_lock, flags); + + while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) { + memset(&wc[i], 0, sizeof(*wc)); + + wc[i].qp = &qp->ibqp; + wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; + wc[i].opcode = IB_WC_RECV; + wc[i].pkey_index = 0; + wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ? + IB_WC_GENERAL_ERR : IB_WC_SUCCESS; + /* 0 - currently only one recv sg is supported */ + wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length; + wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; + ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); + wc[i].wc_flags |= IB_WC_WITH_SMAC; + if (qp->rqe_wr_id[qp->rq.cons].vlan_id) { + wc[i].wc_flags |= IB_WC_WITH_VLAN; + wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id; + } + + qedr_inc_sw_cons(&qp->rq); + i++; + } + + while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) { + memset(&wc[i], 0, sizeof(*wc)); + + wc[i].qp = &qp->ibqp; + wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id; + wc[i].opcode = IB_WC_SEND; + wc[i].status = IB_WC_SUCCESS; + + qedr_inc_sw_cons(&qp->sq); + i++; + } + + spin_unlock_irqrestore(&cq->cq_lock, flags); + + DP_DEBUG(dev, QEDR_MSG_GSI, + "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n", + num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons, + qp->sq.gsi_cons, qp->ibqp.qp_num); + + return i; +} diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_cm.h new file mode 100644 index 000000000000..9ba6e15cd93f --- /dev/null +++ b/drivers/infiniband/hw/qedr/qedr_cm.h @@ -0,0 +1,61 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef LINUX_QEDR_CM_H_ +#define LINUX_QEDR_CM_H_ + +#define QEDR_GSI_MAX_RECV_WR (4096) +#define QEDR_GSI_MAX_SEND_WR (4096) + +#define QEDR_GSI_MAX_RECV_SGE (1) /* LL2 FW limitation */ + +#define ETH_P_ROCE (0x8915) +#define QEDR_ROCE_V2_UDP_SPORT (0000) + +static inline u32 qedr_get_ipv4_from_gid(u8 *gid) +{ + return *(u32 *)(void *)&gid[12]; +} + +/* RDMA CM */ +int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); +int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr); +struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs, + struct qedr_qp *qp); +void qedr_store_gsi_qp_cq(struct qedr_dev *dev, + struct qedr_qp *qp, struct ib_qp_init_attr *attrs); +int qedr_destroy_gsi_qp(struct qedr_dev *dev); +void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info); +#endif diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/infiniband/hw/qedr/qedr_hsi.h new file mode 100644 index 000000000000..66d27521373f --- /dev/null +++ b/drivers/infiniband/hw/qedr/qedr_hsi.h @@ -0,0 +1,56 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __QED_HSI_ROCE__ +#define __QED_HSI_ROCE__ + +#include <linux/qed/common_hsi.h> +#include <linux/qed/roce_common.h> +#include "qedr_hsi_rdma.h" + +/* Affiliated asynchronous events / errors enumeration */ +enum roce_async_events_type { + ROCE_ASYNC_EVENT_NONE = 0, + ROCE_ASYNC_EVENT_COMM_EST = 1, + ROCE_ASYNC_EVENT_SQ_DRAINED, + ROCE_ASYNC_EVENT_SRQ_LIMIT, + ROCE_ASYNC_EVENT_LAST_WQE_REACHED, + ROCE_ASYNC_EVENT_CQ_ERR, + ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR, + ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR, + ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR, + ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR, + ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR, + ROCE_ASYNC_EVENT_SRQ_EMPTY, + MAX_ROCE_ASYNC_EVENTS_TYPE +}; + +#endif /* __QED_HSI_ROCE__ */ diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h new file mode 100644 index 000000000000..5c98d2055cad --- /dev/null +++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h @@ -0,0 +1,748 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __QED_HSI_RDMA__ +#define __QED_HSI_RDMA__ + +#include <linux/qed/rdma_common.h> + +/* rdma completion notification queue element */ +struct rdma_cnqe { + struct regpair cq_handle; +}; + +struct rdma_cqe_responder { + struct regpair srq_wr_id; + struct regpair qp_handle; + __le32 imm_data_or_inv_r_Key; + __le32 length; + __le32 imm_data_hi; + __le16 rq_cons; + u8 flags; +#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1 +#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0 +#define RDMA_CQE_RESPONDER_TYPE_MASK 0x3 +#define RDMA_CQE_RESPONDER_TYPE_SHIFT 1 +#define RDMA_CQE_RESPONDER_INV_FLG_MASK 0x1 +#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT 3 +#define RDMA_CQE_RESPONDER_IMM_FLG_MASK 0x1 +#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT 4 +#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK 0x1 +#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT 5 +#define RDMA_CQE_RESPONDER_RESERVED2_MASK 0x3 +#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT 6 + u8 status; +}; + +struct rdma_cqe_requester { + __le16 sq_cons; + __le16 reserved0; + __le32 reserved1; + struct regpair qp_handle; + struct regpair reserved2; + __le32 reserved3; + __le16 reserved4; + u8 flags; +#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1 +#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0 +#define RDMA_CQE_REQUESTER_TYPE_MASK 0x3 +#define RDMA_CQE_REQUESTER_TYPE_SHIFT 1 +#define RDMA_CQE_REQUESTER_RESERVED5_MASK 0x1F +#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT 3 + u8 status; +}; + +struct rdma_cqe_common { + struct regpair reserved0; + struct regpair qp_handle; + __le16 reserved1[7]; + u8 flags; +#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK 0x1 +#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0 +#define RDMA_CQE_COMMON_TYPE_MASK 0x3 +#define RDMA_CQE_COMMON_TYPE_SHIFT 1 +#define RDMA_CQE_COMMON_RESERVED2_MASK 0x1F +#define RDMA_CQE_COMMON_RESERVED2_SHIFT 3 + u8 status; +}; + +/* rdma completion queue element */ +union rdma_cqe { + struct rdma_cqe_responder resp; + struct rdma_cqe_requester req; + struct rdma_cqe_common cmn; +}; + +/* * CQE requester status enumeration */ +enum rdma_cqe_requester_status_enum { + RDMA_CQE_REQ_STS_OK, + RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR, + RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR, + RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR, + RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR, + RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR, + RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR, + RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR, + RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR, + RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR, + RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR, + RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR, + MAX_RDMA_CQE_REQUESTER_STATUS_ENUM +}; + +/* CQE responder status enumeration */ +enum rdma_cqe_responder_status_enum { + RDMA_CQE_RESP_STS_OK, + RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR, + RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR, + RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR, + RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR, + RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR, + RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR, + RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR, + MAX_RDMA_CQE_RESPONDER_STATUS_ENUM +}; + +/* CQE type enumeration */ +enum rdma_cqe_type { + RDMA_CQE_TYPE_REQUESTER, + RDMA_CQE_TYPE_RESPONDER_RQ, + RDMA_CQE_TYPE_RESPONDER_SRQ, + RDMA_CQE_TYPE_INVALID, + MAX_RDMA_CQE_TYPE +}; + +struct rdma_sq_sge { + __le32 length; + struct regpair addr; + __le32 l_key; +}; + +struct rdma_rq_sge { + struct regpair addr; + __le32 length; + __le32 flags; +#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF +#define RDMA_RQ_SGE_L_KEY_SHIFT 0 +#define RDMA_RQ_SGE_NUM_SGES_MASK 0x7 +#define RDMA_RQ_SGE_NUM_SGES_SHIFT 26 +#define RDMA_RQ_SGE_RESERVED0_MASK 0x7 +#define RDMA_RQ_SGE_RESERVED0_SHIFT 29 +}; + +struct rdma_srq_sge { + struct regpair addr; + __le32 length; + __le32 l_key; +}; + +/* Rdma doorbell data for SQ and RQ */ +struct rdma_pwm_val16_data { + __le16 icid; + __le16 value; +}; + +union rdma_pwm_val16_data_union { + struct rdma_pwm_val16_data as_struct; + __le32 as_dword; +}; + +/* Rdma doorbell data for CQ */ +struct rdma_pwm_val32_data { + __le16 icid; + u8 agg_flags; + u8 params; +#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3 +#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0 +#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 +#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2 +#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F +#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3 + __le32 value; +}; + +/* DIF Block size options */ +enum rdma_dif_block_size { + RDMA_DIF_BLOCK_512 = 0, + RDMA_DIF_BLOCK_4096 = 1, + MAX_RDMA_DIF_BLOCK_SIZE +}; + +/* DIF CRC initial value */ +enum rdma_dif_crc_seed { + RDMA_DIF_CRC_SEED_0000 = 0, + RDMA_DIF_CRC_SEED_FFFF = 1, + MAX_RDMA_DIF_CRC_SEED +}; + +/* RDMA DIF Error Result Structure */ +struct rdma_dif_error_result { + __le32 error_intervals; + __le32 dif_error_1st_interval; + u8 flags; +#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK 0x1 +#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT 0 +#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK 0x1 +#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1 +#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK 0x1 +#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2 +#define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK 0xF +#define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT 3 +#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK 0x1 +#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT 7 + u8 reserved1[55]; +}; + +/* DIF IO direction */ +enum rdma_dif_io_direction_flg { + RDMA_DIF_DIR_RX = 0, + RDMA_DIF_DIR_TX = 1, + MAX_RDMA_DIF_IO_DIRECTION_FLG +}; + +/* RDMA DIF Runt Result Structure */ +struct rdma_dif_runt_result { + __le16 guard_tag; + __le16 reserved[3]; +}; + +/* Memory window type enumeration */ +enum rdma_mw_type { + RDMA_MW_TYPE_1, + RDMA_MW_TYPE_2A, + MAX_RDMA_MW_TYPE +}; + +struct rdma_sq_atomic_wqe { + __le32 reserved1; + __le32 length; + __le32 xrc_srq; + u8 req_type; + u8 flags; +#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; + struct regpair remote_va; + __le32 r_key; + __le32 reserved2; + struct regpair cmp_data; + struct regpair swap_data; +}; + +/* First element (16 bytes) of atomic wqe */ +struct rdma_sq_atomic_wqe_1st { + __le32 reserved1; + __le32 length; + __le32 xrc_srq; + u8 req_type; + u8 flags; +#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT 0 +#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT 3 +#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK 0x7 +#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT 5 + u8 wqe_size; + u8 prev_wqe_size; +}; + +/* Second element (16 bytes) of atomic wqe */ +struct rdma_sq_atomic_wqe_2nd { + struct regpair remote_va; + __le32 r_key; + __le32 reserved2; +}; + +/* Third element (16 bytes) of atomic wqe */ +struct rdma_sq_atomic_wqe_3rd { + struct regpair cmp_data; + struct regpair swap_data; +}; + +struct rdma_sq_bind_wqe { + struct regpair addr; + __le32 l_key; + u8 req_type; + u8 flags; +#define RDMA_SQ_BIND_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_BIND_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7 +#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5 + u8 wqe_size; + u8 prev_wqe_size; + u8 bind_ctrl; +#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1 +#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0 +#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1 +#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1 +#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F +#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2 + u8 access_ctrl; +#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1 +#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0 +#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK 0x1 +#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT 1 +#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK 0x1 +#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2 +#define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK 0x1 +#define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT 3 +#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK 0x1 +#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT 4 +#define RDMA_SQ_BIND_WQE_RESERVED2_MASK 0x7 +#define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT 5 + u8 reserved3; + u8 length_hi; + __le32 length_lo; + __le32 parent_l_key; + __le32 reserved4; +}; + +/* First element (16 bytes) of bind wqe */ +struct rdma_sq_bind_wqe_1st { + struct regpair addr; + __le32 l_key; + u8 req_type; + u8 flags; +#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT 0 +#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT 3 +#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK 0x7 +#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT 5 + u8 wqe_size; + u8 prev_wqe_size; +}; + +/* Second element (16 bytes) of bind wqe */ +struct rdma_sq_bind_wqe_2nd { + u8 bind_ctrl; +#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0 +#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1 +#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F +#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2 + u8 access_ctrl; +#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0 +#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT 1 +#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2 +#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT 3 +#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT 4 +#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK 0x7 +#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT 5 + u8 reserved3; + u8 length_hi; + __le32 length_lo; + __le32 parent_l_key; + __le32 reserved4; +}; + +/* Structure with only the SQ WQE common + * fields. Size is of one SQ element (16B) + */ +struct rdma_sq_common_wqe { + __le32 reserved1[3]; + u8 req_type; + u8 flags; +#define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_COMMON_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_COMMON_WQE_RESERVED0_MASK 0x7 +#define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT 5 + u8 wqe_size; + u8 prev_wqe_size; +}; + +struct rdma_sq_fmr_wqe { + struct regpair addr; + __le32 l_key; + u8 req_type; + u8 flags; +#define RDMA_SQ_FMR_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; + u8 fmr_ctrl; +#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT 5 +#define RDMA_SQ_FMR_WQE_BIND_EN_MASK 0x1 +#define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT 6 +#define RDMA_SQ_FMR_WQE_RESERVED1_MASK 0x1 +#define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT 7 + u8 access_ctrl; +#define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK 0x1 +#define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT 0 +#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK 0x1 +#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT 2 +#define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK 0x1 +#define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT 3 +#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT 4 +#define RDMA_SQ_FMR_WQE_RESERVED2_MASK 0x7 +#define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT 5 + u8 reserved3; + u8 length_hi; + __le32 length_lo; + struct regpair pbl_addr; + __le32 dif_base_ref_tag; + __le16 dif_app_tag; + __le16 dif_app_tag_mask; + __le16 dif_runt_crc_value; + __le16 dif_flags; +#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 +#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF +#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7 + __le32 Reserved5; +}; + +/* First element (16 bytes) of fmr wqe */ +struct rdma_sq_fmr_wqe_1st { + struct regpair addr; + __le32 l_key; + u8 req_type; + u8 flags; +#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT 3 +#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK 0x3 +#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; +}; + +/* Second element (16 bytes) of fmr wqe */ +struct rdma_sq_fmr_wqe_2nd { + u8 fmr_ctrl; +#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT 5 +#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT 6 +#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT 7 + u8 access_ctrl; +#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT 0 +#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2 +#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT 3 +#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT 4 +#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK 0x7 +#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT 5 + u8 reserved3; + u8 length_hi; + __le32 length_lo; + struct regpair pbl_addr; +}; + +/* Third element (16 bytes) of fmr wqe */ +struct rdma_sq_fmr_wqe_3rd { + __le32 dif_base_ref_tag; + __le16 dif_app_tag; + __le16 dif_app_tag_mask; + __le16 dif_runt_crc_value; + __le16 dif_flags; +#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 +#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF +#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7 + __le32 Reserved5; +}; + +struct rdma_sq_local_inv_wqe { + struct regpair reserved; + __le32 inv_l_key; + u8 req_type; + u8 flags; +#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; +}; + +struct rdma_sq_rdma_wqe { + __le32 imm_data; + __le32 length; + __le32 xrc_srq; + u8 req_type; + u8 flags; +#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; + struct regpair remote_va; + __le32 r_key; + u8 dif_flags; +#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0 +#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1 +#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2 +#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F +#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3 + u8 reserved2[3]; +}; + +/* First element (16 bytes) of rdma wqe */ +struct rdma_sq_rdma_wqe_1st { + __le32 imm_data; + __le32 length; + __le32 xrc_srq; + u8 req_type; + u8 flags; +#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT 0 +#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT 3 +#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x3 +#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; +}; + +/* Second element (16 bytes) of rdma wqe */ +struct rdma_sq_rdma_wqe_2nd { + struct regpair remote_va; + __le32 r_key; + u8 dif_flags; +#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT 0 +#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1 +#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT 2 +#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK 0x1F +#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT 3 + u8 reserved2[3]; +}; + +/* SQ WQE req type enumeration */ +enum rdma_sq_req_type { + RDMA_SQ_REQ_TYPE_SEND, + RDMA_SQ_REQ_TYPE_SEND_WITH_IMM, + RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE, + RDMA_SQ_REQ_TYPE_RDMA_WR, + RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM, + RDMA_SQ_REQ_TYPE_RDMA_RD, + RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP, + RDMA_SQ_REQ_TYPE_ATOMIC_ADD, + RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE, + RDMA_SQ_REQ_TYPE_FAST_MR, + RDMA_SQ_REQ_TYPE_BIND, + RDMA_SQ_REQ_TYPE_INVALID, + MAX_RDMA_SQ_REQ_TYPE +}; + +struct rdma_sq_send_wqe { + __le32 inv_key_or_imm_data; + __le32 length; + __le32 xrc_srq; + u8 req_type; + u8 flags; +#define RDMA_SQ_SEND_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_SEND_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_SEND_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; + __le32 reserved1[4]; +}; + +struct rdma_sq_send_wqe_1st { + __le32 inv_key_or_imm_data; + __le32 length; + __le32 xrc_srq; + u8 req_type; + u8 flags; +#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT 0 +#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT 3 +#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK 0x7 +#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT 5 + u8 wqe_size; + u8 prev_wqe_size; +}; + +struct rdma_sq_send_wqe_2st { + __le32 reserved1[4]; +}; + +#endif /* __QED_HSI_RDMA__ */ diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c new file mode 100644 index 000000000000..a61514296767 --- /dev/null +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -0,0 +1,3547 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/dma-mapping.h> +#include <linux/crc32.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/udp.h> +#include <linux/iommu.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_user_verbs.h> +#include <rdma/iw_cm.h> +#include <rdma/ib_umem.h> +#include <rdma/ib_addr.h> +#include <rdma/ib_cache.h> + +#include "qedr_hsi.h" +#include <linux/qed/qed_if.h> +#include "qedr.h" +#include "verbs.h" +#include <rdma/qedr-abi.h> +#include "qedr_cm.h" + +#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) + +int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) +{ + if (index > QEDR_ROCE_PKEY_TABLE_LEN) + return -EINVAL; + + *pkey = QEDR_ROCE_PKEY_DEFAULT; + return 0; +} + +int qedr_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *sgid) +{ + struct qedr_dev *dev = get_qedr_dev(ibdev); + int rc = 0; + + if (!rdma_cap_roce_gid_table(ibdev, port)) + return -ENODEV; + + rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL); + if (rc == -EAGAIN) { + memcpy(sgid, &zgid, sizeof(*sgid)); + return 0; + } + + DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index, + sgid->global.interface_id, sgid->global.subnet_prefix); + + return rc; +} + +int qedr_add_gid(struct ib_device *device, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr, void **context) +{ + if (!rdma_cap_roce_gid_table(device, port_num)) + return -EINVAL; + + if (port_num > QEDR_MAX_PORT) + return -EINVAL; + + if (!context) + return -EINVAL; + + return 0; +} + +int qedr_del_gid(struct ib_device *device, u8 port_num, + unsigned int index, void **context) +{ + if (!rdma_cap_roce_gid_table(device, port_num)) + return -EINVAL; + + if (port_num > QEDR_MAX_PORT) + return -EINVAL; + + if (!context) + return -EINVAL; + + return 0; +} + +int qedr_query_device(struct ib_device *ibdev, + struct ib_device_attr *attr, struct ib_udata *udata) +{ + struct qedr_dev *dev = get_qedr_dev(ibdev); + struct qedr_device_attr *qattr = &dev->attr; + + if (!dev->rdma_ctx) { + DP_ERR(dev, + "qedr_query_device called with invalid params rdma_ctx=%p\n", + dev->rdma_ctx); + return -EINVAL; + } + + memset(attr, 0, sizeof(*attr)); + + attr->fw_ver = qattr->fw_ver; + attr->sys_image_guid = qattr->sys_image_guid; + attr->max_mr_size = qattr->max_mr_size; + attr->page_size_cap = qattr->page_size_caps; + attr->vendor_id = qattr->vendor_id; + attr->vendor_part_id = qattr->vendor_part_id; + attr->hw_ver = qattr->hw_ver; + attr->max_qp = qattr->max_qp; + attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe); + attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | + IB_DEVICE_RC_RNR_NAK_GEN | + IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS; + + attr->max_sge = qattr->max_sge; + attr->max_sge_rd = qattr->max_sge; + attr->max_cq = qattr->max_cq; + attr->max_cqe = qattr->max_cqe; + attr->max_mr = qattr->max_mr; + attr->max_mw = qattr->max_mw; + attr->max_pd = qattr->max_pd; + attr->atomic_cap = dev->atomic_cap; + attr->max_fmr = qattr->max_fmr; + attr->max_map_per_fmr = 16; + attr->max_qp_init_rd_atom = + 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1); + attr->max_qp_rd_atom = + min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1), + attr->max_qp_init_rd_atom); + + attr->max_srq = qattr->max_srq; + attr->max_srq_sge = qattr->max_srq_sge; + attr->max_srq_wr = qattr->max_srq_wr; + + attr->local_ca_ack_delay = qattr->dev_ack_delay; + attr->max_fast_reg_page_list_len = qattr->max_mr / 8; + attr->max_pkeys = QEDR_ROCE_PKEY_MAX; + attr->max_ah = qattr->max_ah; + + return 0; +} + +#define QEDR_SPEED_SDR (1) +#define QEDR_SPEED_DDR (2) +#define QEDR_SPEED_QDR (4) +#define QEDR_SPEED_FDR10 (8) +#define QEDR_SPEED_FDR (16) +#define QEDR_SPEED_EDR (32) + +static inline void get_link_speed_and_width(int speed, u8 *ib_speed, + u8 *ib_width) +{ + switch (speed) { + case 1000: + *ib_speed = QEDR_SPEED_SDR; + *ib_width = IB_WIDTH_1X; + break; + case 10000: + *ib_speed = QEDR_SPEED_QDR; + *ib_width = IB_WIDTH_1X; + break; + + case 20000: + *ib_speed = QEDR_SPEED_DDR; + *ib_width = IB_WIDTH_4X; + break; + + case 25000: + *ib_speed = QEDR_SPEED_EDR; + *ib_width = IB_WIDTH_1X; + break; + + case 40000: + *ib_speed = QEDR_SPEED_QDR; + *ib_width = IB_WIDTH_4X; + break; + + case 50000: + *ib_speed = QEDR_SPEED_QDR; + *ib_width = IB_WIDTH_4X; + break; + + case 100000: + *ib_speed = QEDR_SPEED_EDR; + *ib_width = IB_WIDTH_4X; + break; + + default: + /* Unsupported */ + *ib_speed = QEDR_SPEED_SDR; + *ib_width = IB_WIDTH_1X; + } +} + +int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr) +{ + struct qedr_dev *dev; + struct qed_rdma_port *rdma_port; + + dev = get_qedr_dev(ibdev); + if (port > 1) { + DP_ERR(dev, "invalid_port=0x%x\n", port); + return -EINVAL; + } + + if (!dev->rdma_ctx) { + DP_ERR(dev, "rdma_ctx is NULL\n"); + return -EINVAL; + } + + rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx); + memset(attr, 0, sizeof(*attr)); + + if (rdma_port->port_state == QED_RDMA_PORT_UP) { + attr->state = IB_PORT_ACTIVE; + attr->phys_state = 5; + } else { + attr->state = IB_PORT_DOWN; + attr->phys_state = 3; + } + attr->max_mtu = IB_MTU_4096; + attr->active_mtu = iboe_get_mtu(dev->ndev->mtu); + attr->lid = 0; + attr->lmc = 0; + attr->sm_lid = 0; + attr->sm_sl = 0; + attr->port_cap_flags = IB_PORT_IP_BASED_GIDS; + attr->gid_tbl_len = QEDR_MAX_SGID; + attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN; + attr->bad_pkey_cntr = rdma_port->pkey_bad_counter; + attr->qkey_viol_cntr = 0; + get_link_speed_and_width(rdma_port->link_speed, + &attr->active_speed, &attr->active_width); + attr->max_msg_sz = rdma_port->max_msg_size; + attr->max_vl_num = 4; + + return 0; +} + +int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask, + struct ib_port_modify *props) +{ + struct qedr_dev *dev; + + dev = get_qedr_dev(ibdev); + if (port > 1) { + DP_ERR(dev, "invalid_port=0x%x\n", port); + return -EINVAL; + } + + return 0; +} + +static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr, + unsigned long len) +{ + struct qedr_mm *mm; + + mm = kzalloc(sizeof(*mm), GFP_KERNEL); + if (!mm) + return -ENOMEM; + + mm->key.phy_addr = phy_addr; + /* This function might be called with a length which is not a multiple + * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel + * forces this granularity by increasing the requested size if needed. + * When qedr_mmap is called, it will search the list with the updated + * length as a key. To prevent search failures, the length is rounded up + * in advance to PAGE_SIZE. + */ + mm->key.len = roundup(len, PAGE_SIZE); + INIT_LIST_HEAD(&mm->entry); + + mutex_lock(&uctx->mm_list_lock); + list_add(&mm->entry, &uctx->mm_head); + mutex_unlock(&uctx->mm_list_lock); + + DP_DEBUG(uctx->dev, QEDR_MSG_MISC, + "added (addr=0x%llx,len=0x%lx) for ctx=%p\n", + (unsigned long long)mm->key.phy_addr, + (unsigned long)mm->key.len, uctx); + + return 0; +} + +static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr, + unsigned long len) +{ + bool found = false; + struct qedr_mm *mm; + + mutex_lock(&uctx->mm_list_lock); + list_for_each_entry(mm, &uctx->mm_head, entry) { + if (len != mm->key.len || phy_addr != mm->key.phy_addr) + continue; + + found = true; + break; + } + mutex_unlock(&uctx->mm_list_lock); + DP_DEBUG(uctx->dev, QEDR_MSG_MISC, + "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n", + mm->key.phy_addr, mm->key.len, uctx, found); + + return found; +} + +struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + int rc; + struct qedr_ucontext *ctx; + struct qedr_alloc_ucontext_resp uresp; + struct qedr_dev *dev = get_qedr_dev(ibdev); + struct qed_rdma_add_user_out_params oparams; + + if (!udata) + return ERR_PTR(-EFAULT); + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams); + if (rc) { + DP_ERR(dev, + "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n", + rc); + goto err; + } + + ctx->dpi = oparams.dpi; + ctx->dpi_addr = oparams.dpi_addr; + ctx->dpi_phys_addr = oparams.dpi_phys_addr; + ctx->dpi_size = oparams.dpi_size; + INIT_LIST_HEAD(&ctx->mm_head); + mutex_init(&ctx->mm_list_lock); + + memset(&uresp, 0, sizeof(uresp)); + + uresp.db_pa = ctx->dpi_phys_addr; + uresp.db_size = ctx->dpi_size; + uresp.max_send_wr = dev->attr.max_sqe; + uresp.max_recv_wr = dev->attr.max_rqe; + uresp.max_srq_wr = dev->attr.max_srq_wr; + uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE; + uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE; + uresp.sges_per_srq_wr = dev->attr.max_srq_sge; + uresp.max_cqes = QEDR_MAX_CQES; + + rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (rc) + goto err; + + ctx->dev = dev; + + rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size); + if (rc) + goto err; + + DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n", + &ctx->ibucontext); + return &ctx->ibucontext; + +err: + kfree(ctx); + return ERR_PTR(rc); +} + +int qedr_dealloc_ucontext(struct ib_ucontext *ibctx) +{ + struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx); + struct qedr_mm *mm, *tmp; + int status = 0; + + DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n", + uctx); + uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi); + + list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { + DP_DEBUG(uctx->dev, QEDR_MSG_MISC, + "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n", + mm->key.phy_addr, mm->key.len, uctx); + list_del(&mm->entry); + kfree(mm); + } + + kfree(uctx); + return status; +} + +int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +{ + struct qedr_ucontext *ucontext = get_qedr_ucontext(context); + struct qedr_dev *dev = get_qedr_dev(context->device); + unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; + u64 unmapped_db = dev->db_phys_addr; + unsigned long len = (vma->vm_end - vma->vm_start); + int rc = 0; + bool found; + + DP_DEBUG(dev, QEDR_MSG_INIT, + "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n", + vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len); + if (vma->vm_start & (PAGE_SIZE - 1)) { + DP_ERR(dev, "Vma_start not page aligned = %ld\n", + vma->vm_start); + return -EINVAL; + } + + found = qedr_search_mmap(ucontext, vm_page, len); + if (!found) { + DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n", + vma->vm_pgoff); + return -EINVAL; + } + + DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); + + if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + + dev->db_size))) { + DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); + if (vma->vm_flags & VM_READ) { + DP_ERR(dev, "Trying to map doorbell bar for read\n"); + return -EPERM; + } + + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + PAGE_SIZE, vma->vm_page_prot); + } else { + DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n"); + rc = remap_pfn_range(vma, vma->vm_start, + vma->vm_pgoff, len, vma->vm_page_prot); + } + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc); + return rc; +} + +struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, struct ib_udata *udata) +{ + struct qedr_dev *dev = get_qedr_dev(ibdev); + struct qedr_ucontext *uctx = NULL; + struct qedr_alloc_pd_uresp uresp; + struct qedr_pd *pd; + u16 pd_id; + int rc; + + DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n", + (udata && context) ? "User Lib" : "Kernel"); + + if (!dev->rdma_ctx) { + DP_ERR(dev, "invlaid RDMA context\n"); + return ERR_PTR(-EINVAL); + } + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); + + uresp.pd_id = pd_id; + pd->pd_id = pd_id; + + if (udata && context) { + rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (rc) + DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); + uctx = get_qedr_ucontext(context); + uctx->pd = pd; + pd->uctx = uctx; + } + + return &pd->ibpd; +} + +int qedr_dealloc_pd(struct ib_pd *ibpd) +{ + struct qedr_dev *dev = get_qedr_dev(ibpd->device); + struct qedr_pd *pd = get_qedr_pd(ibpd); + + if (!pd) + pr_err("Invalid PD received in dealloc_pd\n"); + + DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id); + dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id); + + kfree(pd); + + return 0; +} + +static void qedr_free_pbl(struct qedr_dev *dev, + struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl) +{ + struct pci_dev *pdev = dev->pdev; + int i; + + for (i = 0; i < pbl_info->num_pbls; i++) { + if (!pbl[i].va) + continue; + dma_free_coherent(&pdev->dev, pbl_info->pbl_size, + pbl[i].va, pbl[i].pa); + } + + kfree(pbl); +} + +#define MIN_FW_PBL_PAGE_SIZE (4 * 1024) +#define MAX_FW_PBL_PAGE_SIZE (64 * 1024) + +#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64)) +#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE) +#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE) + +static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev, + struct qedr_pbl_info *pbl_info, + gfp_t flags) +{ + struct pci_dev *pdev = dev->pdev; + struct qedr_pbl *pbl_table; + dma_addr_t *pbl_main_tbl; + dma_addr_t pa; + void *va; + int i; + + pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags); + if (!pbl_table) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < pbl_info->num_pbls; i++) { + va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, + &pa, flags); + if (!va) + goto err; + + memset(va, 0, pbl_info->pbl_size); + pbl_table[i].va = va; + pbl_table[i].pa = pa; + } + + /* Two-Layer PBLs, if we have more than one pbl we need to initialize + * the first one with physical pointers to all of the rest + */ + pbl_main_tbl = (dma_addr_t *)pbl_table[0].va; + for (i = 0; i < pbl_info->num_pbls - 1; i++) + pbl_main_tbl[i] = pbl_table[i + 1].pa; + + return pbl_table; + +err: + for (i--; i >= 0; i--) + dma_free_coherent(&pdev->dev, pbl_info->pbl_size, + pbl_table[i].va, pbl_table[i].pa); + + qedr_free_pbl(dev, pbl_info, pbl_table); + + return ERR_PTR(-ENOMEM); +} + +static int qedr_prepare_pbl_tbl(struct qedr_dev *dev, + struct qedr_pbl_info *pbl_info, + u32 num_pbes, int two_layer_capable) +{ + u32 pbl_capacity; + u32 pbl_size; + u32 num_pbls; + + if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) { + if (num_pbes > MAX_PBES_TWO_LAYER) { + DP_ERR(dev, "prepare pbl table: too many pages %d\n", + num_pbes); + return -EINVAL; + } + + /* calculate required pbl page size */ + pbl_size = MIN_FW_PBL_PAGE_SIZE; + pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) * + NUM_PBES_ON_PAGE(pbl_size); + + while (pbl_capacity < num_pbes) { + pbl_size *= 2; + pbl_capacity = pbl_size / sizeof(u64); + pbl_capacity = pbl_capacity * pbl_capacity; + } + + num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size)); + num_pbls++; /* One for the layer0 ( points to the pbls) */ + pbl_info->two_layered = true; + } else { + /* One layered PBL */ + num_pbls = 1; + pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE, + roundup_pow_of_two((num_pbes * sizeof(u64)))); + pbl_info->two_layered = false; + } + + pbl_info->num_pbls = num_pbls; + pbl_info->pbl_size = pbl_size; + pbl_info->num_pbes = num_pbes; + + DP_DEBUG(dev, QEDR_MSG_MR, + "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n", + pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size); + + return 0; +} + +static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, + struct qedr_pbl *pbl, + struct qedr_pbl_info *pbl_info) +{ + int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; + struct qedr_pbl *pbl_tbl; + struct scatterlist *sg; + struct regpair *pbe; + int entry; + u32 addr; + + if (!pbl_info->num_pbes) + return; + + /* If we have a two layered pbl, the first pbl points to the rest + * of the pbls and the first entry lays on the second pbl in the table + */ + if (pbl_info->two_layered) + pbl_tbl = &pbl[1]; + else + pbl_tbl = pbl; + + pbe = (struct regpair *)pbl_tbl->va; + if (!pbe) { + DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n"); + return; + } + + pbe_cnt = 0; + + shift = ilog2(umem->page_size); + + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { + pages = sg_dma_len(sg) >> shift; + for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { + /* store the page address in pbe */ + pbe->lo = cpu_to_le32(sg_dma_address(sg) + + umem->page_size * pg_cnt); + addr = upper_32_bits(sg_dma_address(sg) + + umem->page_size * pg_cnt); + pbe->hi = cpu_to_le32(addr); + pbe_cnt++; + total_num_pbes++; + pbe++; + + if (total_num_pbes == pbl_info->num_pbes) + return; + + /* If the given pbl is full storing the pbes, + * move to next pbl. + */ + if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { + pbl_tbl++; + pbe = (struct regpair *)pbl_tbl->va; + pbe_cnt = 0; + } + } + } +} + +static int qedr_copy_cq_uresp(struct qedr_dev *dev, + struct qedr_cq *cq, struct ib_udata *udata) +{ + struct qedr_create_cq_uresp uresp; + int rc; + + memset(&uresp, 0, sizeof(uresp)); + + uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); + uresp.icid = cq->icid; + + rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (rc) + DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid); + + return rc; +} + +static void consume_cqe(struct qedr_cq *cq) +{ + if (cq->latest_cqe == cq->toggle_cqe) + cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK; + + cq->latest_cqe = qed_chain_consume(&cq->pbl); +} + +static inline int qedr_align_cq_entries(int entries) +{ + u64 size, aligned_size; + + /* We allocate an extra entry that we don't report to the FW. */ + size = (entries + 1) * QEDR_CQE_SIZE; + aligned_size = ALIGN(size, PAGE_SIZE); + + return aligned_size / QEDR_CQE_SIZE; +} + +static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, + struct qedr_dev *dev, + struct qedr_userq *q, + u64 buf_addr, size_t buf_len, + int access, int dmasync) +{ + int page_cnt; + int rc; + + q->buf_addr = buf_addr; + q->buf_len = buf_len; + q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync); + if (IS_ERR(q->umem)) { + DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n", + PTR_ERR(q->umem)); + return PTR_ERR(q->umem); + } + + page_cnt = ib_umem_page_count(q->umem); + rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); + if (rc) + goto err0; + + q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL); + if (IS_ERR_OR_NULL(q->pbl_tbl)) + goto err0; + + qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); + + return 0; + +err0: + ib_umem_release(q->umem); + + return rc; +} + +static inline void qedr_init_cq_params(struct qedr_cq *cq, + struct qedr_ucontext *ctx, + struct qedr_dev *dev, int vector, + int chain_entries, int page_cnt, + u64 pbl_ptr, + struct qed_rdma_create_cq_in_params + *params) +{ + memset(params, 0, sizeof(*params)); + params->cq_handle_hi = upper_32_bits((uintptr_t)cq); + params->cq_handle_lo = lower_32_bits((uintptr_t)cq); + params->cnq_id = vector; + params->cq_size = chain_entries - 1; + params->dpi = (ctx) ? ctx->dpi : dev->dpi; + params->pbl_num_pages = page_cnt; + params->pbl_ptr = pbl_ptr; + params->pbl_two_level = 0; +} + +static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags) +{ + /* Flush data before signalling doorbell */ + wmb(); + cq->db.data.agg_flags = flags; + cq->db.data.value = cpu_to_le32(cons); + writeq(cq->db.raw, cq->db_addr); + + /* Make sure write would stick */ + mmiowb(); +} + +int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) +{ + struct qedr_cq *cq = get_qedr_cq(ibcq); + unsigned long sflags; + + if (cq->cq_type == QEDR_CQ_TYPE_GSI) + return 0; + + spin_lock_irqsave(&cq->cq_lock, sflags); + + cq->arm_flags = 0; + + if (flags & IB_CQ_SOLICITED) + cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD; + + if (flags & IB_CQ_NEXT_COMP) + cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD; + + doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags); + + spin_unlock_irqrestore(&cq->cq_lock, sflags); + + return 0; +} + +struct ib_cq *qedr_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *ib_ctx, struct ib_udata *udata) +{ + struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx); + struct qed_rdma_destroy_cq_out_params destroy_oparams; + struct qed_rdma_destroy_cq_in_params destroy_iparams; + struct qedr_dev *dev = get_qedr_dev(ibdev); + struct qed_rdma_create_cq_in_params params; + struct qedr_create_cq_ureq ureq; + int vector = attr->comp_vector; + int entries = attr->cqe; + struct qedr_cq *cq; + int chain_entries; + int page_cnt; + u64 pbl_ptr; + u16 icid; + int rc; + + DP_DEBUG(dev, QEDR_MSG_INIT, + "create_cq: called from %s. entries=%d, vector=%d\n", + udata ? "User Lib" : "Kernel", entries, vector); + + if (entries > QEDR_MAX_CQES) { + DP_ERR(dev, + "create cq: the number of entries %d is too high. Must be equal or below %d.\n", + entries, QEDR_MAX_CQES); + return ERR_PTR(-EINVAL); + } + + chain_entries = qedr_align_cq_entries(entries); + chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES); + + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) + return ERR_PTR(-ENOMEM); + + if (udata) { + memset(&ureq, 0, sizeof(ureq)); + if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) { + DP_ERR(dev, + "create cq: problem copying data from user space\n"); + goto err0; + } + + if (!ureq.len) { + DP_ERR(dev, + "create cq: cannot create a cq with 0 entries\n"); + goto err0; + } + + cq->cq_type = QEDR_CQ_TYPE_USER; + + rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr, + ureq.len, IB_ACCESS_LOCAL_WRITE, 1); + if (rc) + goto err0; + + pbl_ptr = cq->q.pbl_tbl->pa; + page_cnt = cq->q.pbl_info.num_pbes; + } else { + cq->cq_type = QEDR_CQ_TYPE_KERNEL; + + rc = dev->ops->common->chain_alloc(dev->cdev, + QED_CHAIN_USE_TO_CONSUME, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U32, + chain_entries, + sizeof(union rdma_cqe), + &cq->pbl); + if (rc) + goto err1; + + page_cnt = qed_chain_get_page_cnt(&cq->pbl); + pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl); + } + + qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt, + pbl_ptr, ¶ms); + + rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid); + if (rc) + goto err2; + + cq->icid = icid; + cq->sig = QEDR_CQ_MAGIC_NUMBER; + spin_lock_init(&cq->cq_lock); + + if (ib_ctx) { + rc = qedr_copy_cq_uresp(dev, cq, udata); + if (rc) + goto err3; + } else { + /* Generate doorbell address. */ + cq->db_addr = dev->db_addr + + DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); + cq->db.data.icid = cq->icid; + cq->db.data.params = DB_AGG_CMD_SET << + RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT; + + /* point to the very last element, passing it we will toggle */ + cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl); + cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK; + cq->latest_cqe = NULL; + consume_cqe(cq); + cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl); + } + + DP_DEBUG(dev, QEDR_MSG_CQ, + "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n", + cq->icid, cq, params.cq_size); + + return &cq->ibcq; + +err3: + destroy_iparams.icid = cq->icid; + dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams, + &destroy_oparams); +err2: + if (udata) + qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl); + else + dev->ops->common->chain_free(dev->cdev, &cq->pbl); +err1: + if (udata) + ib_umem_release(cq->q.umem); +err0: + kfree(cq); + return ERR_PTR(-EINVAL); +} + +int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata) +{ + struct qedr_dev *dev = get_qedr_dev(ibcq->device); + struct qedr_cq *cq = get_qedr_cq(ibcq); + + DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq); + + return 0; +} + +int qedr_destroy_cq(struct ib_cq *ibcq) +{ + struct qedr_dev *dev = get_qedr_dev(ibcq->device); + struct qed_rdma_destroy_cq_out_params oparams; + struct qed_rdma_destroy_cq_in_params iparams; + struct qedr_cq *cq = get_qedr_cq(ibcq); + + DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid); + + /* GSIs CQs are handled by driver, so they don't exist in the FW */ + if (cq->cq_type != QEDR_CQ_TYPE_GSI) { + iparams.icid = cq->icid; + dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams); + dev->ops->common->chain_free(dev->cdev, &cq->pbl); + } + + if (ibcq->uobject && ibcq->uobject->context) { + qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl); + ib_umem_release(cq->q.umem); + } + + kfree(cq); + + return 0; +} + +static inline int get_gid_info_from_table(struct ib_qp *ibqp, + struct ib_qp_attr *attr, + int attr_mask, + struct qed_rdma_modify_qp_in_params + *qp_params) +{ + enum rdma_network_type nw_type; + struct ib_gid_attr gid_attr; + union ib_gid gid; + u32 ipv4_addr; + int rc = 0; + int i; + + rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num, + attr->ah_attr.grh.sgid_index, &gid, &gid_attr); + if (rc) + return rc; + + if (!memcmp(&gid, &zgid, sizeof(gid))) + return -ENOENT; + + if (gid_attr.ndev) { + qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev); + + dev_put(gid_attr.ndev); + nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid); + switch (nw_type) { + case RDMA_NETWORK_IPV6: + memcpy(&qp_params->sgid.bytes[0], &gid.raw[0], + sizeof(qp_params->sgid)); + memcpy(&qp_params->dgid.bytes[0], + &attr->ah_attr.grh.dgid, + sizeof(qp_params->dgid)); + qp_params->roce_mode = ROCE_V2_IPV6; + SET_FIELD(qp_params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1); + break; + case RDMA_NETWORK_IB: + memcpy(&qp_params->sgid.bytes[0], &gid.raw[0], + sizeof(qp_params->sgid)); + memcpy(&qp_params->dgid.bytes[0], + &attr->ah_attr.grh.dgid, + sizeof(qp_params->dgid)); + qp_params->roce_mode = ROCE_V1; + break; + case RDMA_NETWORK_IPV4: + memset(&qp_params->sgid, 0, sizeof(qp_params->sgid)); + memset(&qp_params->dgid, 0, sizeof(qp_params->dgid)); + ipv4_addr = qedr_get_ipv4_from_gid(gid.raw); + qp_params->sgid.ipv4_addr = ipv4_addr; + ipv4_addr = + qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw); + qp_params->dgid.ipv4_addr = ipv4_addr; + SET_FIELD(qp_params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1); + qp_params->roce_mode = ROCE_V2_IPV4; + break; + } + } + + for (i = 0; i < 4; i++) { + qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]); + qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]); + } + + if (qp_params->vlan_id >= VLAN_CFI_MASK) + qp_params->vlan_id = 0; + + return 0; +} + +static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp) +{ + qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl); + ib_umem_release(qp->usq.umem); +} + +static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp) +{ + qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl); + ib_umem_release(qp->urq.umem); +} + +static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp) +{ + dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl); + kfree(qp->wqe_wr_id); +} + +static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp) +{ + dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl); + kfree(qp->rqe_wr_id); +} + +static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, + struct ib_qp_init_attr *attrs) +{ + struct qedr_device_attr *qattr = &dev->attr; + + /* QP0... attrs->qp_type == IB_QPT_GSI */ + if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) { + DP_DEBUG(dev, QEDR_MSG_QP, + "create qp: unsupported qp type=0x%x requested\n", + attrs->qp_type); + return -EINVAL; + } + + if (attrs->cap.max_send_wr > qattr->max_sqe) { + DP_ERR(dev, + "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n", + attrs->cap.max_send_wr, qattr->max_sqe); + return -EINVAL; + } + + if (attrs->cap.max_inline_data > qattr->max_inline) { + DP_ERR(dev, + "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n", + attrs->cap.max_inline_data, qattr->max_inline); + return -EINVAL; + } + + if (attrs->cap.max_send_sge > qattr->max_sge) { + DP_ERR(dev, + "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n", + attrs->cap.max_send_sge, qattr->max_sge); + return -EINVAL; + } + + if (attrs->cap.max_recv_sge > qattr->max_sge) { + DP_ERR(dev, + "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n", + attrs->cap.max_recv_sge, qattr->max_sge); + return -EINVAL; + } + + /* Unprivileged user space cannot create special QP */ + if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { + DP_ERR(dev, + "create qp: userspace can't create special QPs of type=0x%x\n", + attrs->qp_type); + return -EINVAL; + } + + return 0; +} + +static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp, + struct qedr_qp *qp) +{ + uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD); + uresp->rq_icid = qp->icid; +} + +static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp, + struct qedr_qp *qp) +{ + uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); + uresp->sq_icid = qp->icid + 1; +} + +static int qedr_copy_qp_uresp(struct qedr_dev *dev, + struct qedr_qp *qp, struct ib_udata *udata) +{ + struct qedr_create_qp_uresp uresp; + int rc; + + memset(&uresp, 0, sizeof(uresp)); + qedr_copy_sq_uresp(&uresp, qp); + qedr_copy_rq_uresp(&uresp, qp); + + uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE; + uresp.qp_id = qp->qp_id; + + rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (rc) + DP_ERR(dev, + "create qp: failed a copy to user space with qp icid=0x%x.\n", + qp->icid); + + return rc; +} + +static void qedr_set_qp_init_params(struct qedr_dev *dev, + struct qedr_qp *qp, + struct qedr_pd *pd, + struct ib_qp_init_attr *attrs) +{ + qp->pd = pd; + + spin_lock_init(&qp->q_lock); + + qp->qp_type = attrs->qp_type; + qp->max_inline_data = attrs->cap.max_inline_data; + qp->sq.max_sges = attrs->cap.max_send_sge; + qp->state = QED_ROCE_QP_STATE_RESET; + qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; + qp->sq_cq = get_qedr_cq(attrs->send_cq); + qp->rq_cq = get_qedr_cq(attrs->recv_cq); + qp->dev = dev; + + DP_DEBUG(dev, QEDR_MSG_QP, + "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n", + pd->pd_id, qp->qp_type, qp->max_inline_data, + qp->state, qp->signaled, (attrs->srq) ? 1 : 0); + DP_DEBUG(dev, QEDR_MSG_QP, + "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n", + qp->sq.max_sges, qp->sq_cq->icid); + qp->rq.max_sges = attrs->cap.max_recv_sge; + DP_DEBUG(dev, QEDR_MSG_QP, + "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n", + qp->rq.max_sges, qp->rq_cq->icid); +} + +static inline void +qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params, + struct qedr_create_qp_ureq *ureq) +{ + /* QP handle to be written in CQE */ + params->qp_handle_lo = ureq->qp_handle_lo; + params->qp_handle_hi = ureq->qp_handle_hi; +} + +static inline void +qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp) +{ + qp->sq.db = dev->db_addr + + DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); + qp->sq.db_data.data.icid = qp->icid + 1; +} + +static inline void +qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp) +{ + qp->rq.db = dev->db_addr + + DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD); + qp->rq.db_data.data.icid = qp->icid; +} + +static inline int +qedr_init_qp_kernel_params_rq(struct qedr_dev *dev, + struct qedr_qp *qp, struct ib_qp_init_attr *attrs) +{ + /* Allocate driver internal RQ array */ + qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id), + GFP_KERNEL); + if (!qp->rqe_wr_id) + return -ENOMEM; + + DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr); + + return 0; +} + +static inline int +qedr_init_qp_kernel_params_sq(struct qedr_dev *dev, + struct qedr_qp *qp, + struct ib_qp_init_attr *attrs, + struct qed_rdma_create_qp_in_params *params) +{ + u32 temp_max_wr; + + /* Allocate driver internal SQ array */ + temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier; + temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe); + + /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */ + qp->sq.max_wr = (u16)temp_max_wr; + qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id), + GFP_KERNEL); + if (!qp->wqe_wr_id) + return -ENOMEM; + + DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr); + + /* QP handle to be written in CQE */ + params->qp_handle_lo = lower_32_bits((uintptr_t)qp); + params->qp_handle_hi = upper_32_bits((uintptr_t)qp); + + return 0; +} + +static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev, + struct qedr_qp *qp, + struct ib_qp_init_attr *attrs) +{ + u32 n_sq_elems, n_sq_entries; + int rc; + + /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in + * the ring. The ring should allow at least a single WR, even if the + * user requested none, due to allocation issues. + */ + n_sq_entries = attrs->cap.max_send_wr; + n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe); + n_sq_entries = max_t(u32, n_sq_entries, 1); + n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE; + rc = dev->ops->common->chain_alloc(dev->cdev, + QED_CHAIN_USE_TO_PRODUCE, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U32, + n_sq_elems, + QEDR_SQE_ELEMENT_SIZE, + &qp->sq.pbl); + if (rc) { + DP_ERR(dev, "failed to allocate QP %p SQ\n", qp); + return rc; + } + + DP_DEBUG(dev, QEDR_MSG_SQ, + "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n", + qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr, + n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc); + return 0; +} + +static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev, + struct qedr_qp *qp, + struct ib_qp_init_attr *attrs) +{ + u32 n_rq_elems, n_rq_entries; + int rc; + + /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in + * the ring. There ring should allow at least a single WR, even if the + * user requested none, due to allocation issues. + */ + n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1); + n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE; + rc = dev->ops->common->chain_alloc(dev->cdev, + QED_CHAIN_USE_TO_CONSUME_PRODUCE, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U32, + n_rq_elems, + QEDR_RQE_ELEMENT_SIZE, + &qp->rq.pbl); + + if (rc) { + DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp); + return -ENOMEM; + } + + DP_DEBUG(dev, QEDR_MSG_RQ, + "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n", + qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr, + n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc); + + /* n_rq_entries < u16 so the casting is safe */ + qp->rq.max_wr = (u16)n_rq_entries; + + return 0; +} + +static inline void +qedr_init_qp_in_params_sq(struct qedr_dev *dev, + struct qedr_pd *pd, + struct qedr_qp *qp, + struct ib_qp_init_attr *attrs, + struct ib_udata *udata, + struct qed_rdma_create_qp_in_params *params) +{ + /* QP handle to be written in an async event */ + params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp); + params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp); + + params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR); + params->fmr_and_reserved_lkey = !udata; + params->pd = pd->pd_id; + params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi; + params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid; + params->max_sq_sges = 0; + params->stats_queue = 0; + + if (udata) { + params->sq_num_pages = qp->usq.pbl_info.num_pbes; + params->sq_pbl_ptr = qp->usq.pbl_tbl->pa; + } else { + params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl); + params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl); + } +} + +static inline void +qedr_init_qp_in_params_rq(struct qedr_qp *qp, + struct ib_qp_init_attr *attrs, + struct ib_udata *udata, + struct qed_rdma_create_qp_in_params *params) +{ + params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid; + params->srq_id = 0; + params->use_srq = false; + + if (udata) { + params->rq_num_pages = qp->urq.pbl_info.num_pbes; + params->rq_pbl_ptr = qp->urq.pbl_tbl->pa; + } else { + params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl); + params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl); + } +} + +static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp) +{ + DP_DEBUG(dev, QEDR_MSG_QP, + "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n", + qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr, + qp->urq.buf_len); +} + +static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx, + struct qedr_dev *dev, + struct qedr_qp *qp, + struct qedr_create_qp_ureq *ureq) +{ + int rc; + + /* SQ - read access only (0), dma sync not required (0) */ + rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr, + ureq->sq_len, 0, 0); + if (rc) + return rc; + + /* RQ - read access only (0), dma sync not required (0) */ + rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr, + ureq->rq_len, 0, 0); + + if (rc) + qedr_cleanup_user_sq(dev, qp); + return rc; +} + +static inline int +qedr_init_kernel_qp(struct qedr_dev *dev, + struct qedr_qp *qp, + struct ib_qp_init_attr *attrs, + struct qed_rdma_create_qp_in_params *params) +{ + int rc; + + rc = qedr_init_qp_kernel_sq(dev, qp, attrs); + if (rc) { + DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp); + return rc; + } + + rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params); + if (rc) { + dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl); + DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp); + return rc; + } + + rc = qedr_init_qp_kernel_rq(dev, qp, attrs); + if (rc) { + qedr_cleanup_kernel_sq(dev, qp); + DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp); + return rc; + } + + rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs); + if (rc) { + DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp); + qedr_cleanup_kernel_sq(dev, qp); + dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl); + return rc; + } + + return rc; +} + +struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, + struct ib_qp_init_attr *attrs, + struct ib_udata *udata) +{ + struct qedr_dev *dev = get_qedr_dev(ibpd->device); + struct qed_rdma_create_qp_out_params out_params; + struct qed_rdma_create_qp_in_params in_params; + struct qedr_pd *pd = get_qedr_pd(ibpd); + struct ib_ucontext *ib_ctx = NULL; + struct qedr_ucontext *ctx = NULL; + struct qedr_create_qp_ureq ureq; + struct qedr_qp *qp; + int rc = 0; + + DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n", + udata ? "user library" : "kernel", pd); + + rc = qedr_check_qp_attrs(ibpd, dev, attrs); + if (rc) + return ERR_PTR(rc); + + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) + return ERR_PTR(-ENOMEM); + + if (attrs->srq) + return ERR_PTR(-EINVAL); + + DP_DEBUG(dev, QEDR_MSG_QP, + "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n", + get_qedr_cq(attrs->send_cq), + get_qedr_cq(attrs->send_cq)->icid, + get_qedr_cq(attrs->recv_cq), + get_qedr_cq(attrs->recv_cq)->icid); + + qedr_set_qp_init_params(dev, qp, pd, attrs); + + if (attrs->qp_type == IB_QPT_GSI) { + if (udata) { + DP_ERR(dev, + "create qp: unexpected udata when creating GSI QP\n"); + goto err0; + } + return qedr_create_gsi_qp(dev, attrs, qp); + } + + memset(&in_params, 0, sizeof(in_params)); + + if (udata) { + if (!(udata && ibpd->uobject && ibpd->uobject->context)) + goto err0; + + ib_ctx = ibpd->uobject->context; + ctx = get_qedr_ucontext(ib_ctx); + + memset(&ureq, 0, sizeof(ureq)); + if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) { + DP_ERR(dev, + "create qp: problem copying data from user space\n"); + goto err0; + } + + rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq); + if (rc) + goto err0; + + qedr_init_qp_user_params(&in_params, &ureq); + } else { + rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params); + if (rc) + goto err0; + } + + qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params); + qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params); + + qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx, + &in_params, &out_params); + + if (!qp->qed_qp) + goto err1; + + qp->qp_id = out_params.qp_id; + qp->icid = out_params.icid; + qp->ibqp.qp_num = qp->qp_id; + + if (udata) { + rc = qedr_copy_qp_uresp(dev, qp, udata); + if (rc) + goto err2; + + qedr_qp_user_print(dev, qp); + } else { + qedr_init_qp_kernel_doorbell_sq(dev, qp); + qedr_init_qp_kernel_doorbell_rq(dev, qp); + } + + DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n", + udata ? "user" : "kernel", qp); + + return &qp->ibqp; + +err2: + rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); + if (rc) + DP_ERR(dev, "create qp: fatal fault. rc=%d", rc); +err1: + if (udata) { + qedr_cleanup_user_sq(dev, qp); + qedr_cleanup_user_rq(dev, qp); + } else { + qedr_cleanup_kernel_sq(dev, qp); + qedr_cleanup_kernel_rq(dev, qp); + } + +err0: + kfree(qp); + + return ERR_PTR(-EFAULT); +} + +enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) +{ + switch (qp_state) { + case QED_ROCE_QP_STATE_RESET: + return IB_QPS_RESET; + case QED_ROCE_QP_STATE_INIT: + return IB_QPS_INIT; + case QED_ROCE_QP_STATE_RTR: + return IB_QPS_RTR; + case QED_ROCE_QP_STATE_RTS: + return IB_QPS_RTS; + case QED_ROCE_QP_STATE_SQD: + return IB_QPS_SQD; + case QED_ROCE_QP_STATE_ERR: + return IB_QPS_ERR; + case QED_ROCE_QP_STATE_SQE: + return IB_QPS_SQE; + } + return IB_QPS_ERR; +} + +enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) +{ + switch (qp_state) { + case IB_QPS_RESET: + return QED_ROCE_QP_STATE_RESET; + case IB_QPS_INIT: + return QED_ROCE_QP_STATE_INIT; + case IB_QPS_RTR: + return QED_ROCE_QP_STATE_RTR; + case IB_QPS_RTS: + return QED_ROCE_QP_STATE_RTS; + case IB_QPS_SQD: + return QED_ROCE_QP_STATE_SQD; + case IB_QPS_ERR: + return QED_ROCE_QP_STATE_ERR; + default: + return QED_ROCE_QP_STATE_ERR; + } +} + +static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph) +{ + qed_chain_reset(&qph->pbl); + qph->prod = 0; + qph->cons = 0; + qph->wqe_cons = 0; + qph->db_data.data.value = cpu_to_le16(0); +} + +static int qedr_update_qp_state(struct qedr_dev *dev, + struct qedr_qp *qp, + enum qed_roce_qp_state new_state) +{ + int status = 0; + + if (new_state == qp->state) + return 1; + + switch (qp->state) { + case QED_ROCE_QP_STATE_RESET: + switch (new_state) { + case QED_ROCE_QP_STATE_INIT: + qp->prev_wqe_size = 0; + qedr_reset_qp_hwq_info(&qp->sq); + qedr_reset_qp_hwq_info(&qp->rq); + break; + default: + status = -EINVAL; + break; + }; + break; + case QED_ROCE_QP_STATE_INIT: + switch (new_state) { + case QED_ROCE_QP_STATE_RTR: + /* Update doorbell (in case post_recv was + * done before move to RTR) + */ + wmb(); + writel(qp->rq.db_data.raw, qp->rq.db); + /* Make sure write takes effect */ + mmiowb(); + break; + case QED_ROCE_QP_STATE_ERR: + break; + default: + /* Invalid state change. */ + status = -EINVAL; + break; + }; + break; + case QED_ROCE_QP_STATE_RTR: + /* RTR->XXX */ + switch (new_state) { + case QED_ROCE_QP_STATE_RTS: + break; + case QED_ROCE_QP_STATE_ERR: + break; + default: + /* Invalid state change. */ + status = -EINVAL; + break; + }; + break; + case QED_ROCE_QP_STATE_RTS: + /* RTS->XXX */ + switch (new_state) { + case QED_ROCE_QP_STATE_SQD: + break; + case QED_ROCE_QP_STATE_ERR: + break; + default: + /* Invalid state change. */ + status = -EINVAL; + break; + }; + break; + case QED_ROCE_QP_STATE_SQD: + /* SQD->XXX */ + switch (new_state) { + case QED_ROCE_QP_STATE_RTS: + case QED_ROCE_QP_STATE_ERR: + break; + default: + /* Invalid state change. */ + status = -EINVAL; + break; + }; + break; + case QED_ROCE_QP_STATE_ERR: + /* ERR->XXX */ + switch (new_state) { + case QED_ROCE_QP_STATE_RESET: + break; + default: + status = -EINVAL; + break; + }; + break; + default: + status = -EINVAL; + break; + }; + + return status; +} + +int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct qed_rdma_modify_qp_in_params qp_params = { 0 }; + struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev); + enum ib_qp_state old_qp_state, new_qp_state; + int rc = 0; + + DP_DEBUG(dev, QEDR_MSG_QP, + "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask, + attr->qp_state); + + old_qp_state = qedr_get_ibqp_state(qp->state); + if (attr_mask & IB_QP_STATE) + new_qp_state = attr->qp_state; + else + new_qp_state = old_qp_state; + + if (!ib_modify_qp_is_ok + (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask, + IB_LINK_LAYER_ETHERNET)) { + DP_ERR(dev, + "modify qp: invalid attribute mask=0x%x specified for\n" + "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n", + attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state, + new_qp_state); + rc = -EINVAL; + goto err; + } + + /* Translate the masks... */ + if (attr_mask & IB_QP_STATE) { + SET_FIELD(qp_params.modify_flags, + QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1); + qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state); + } + + if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) + qp_params.sqd_async = true; + + if (attr_mask & IB_QP_PKEY_INDEX) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_PKEY, 1); + if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) { + rc = -EINVAL; + goto err; + } + + qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT; + } + + if (attr_mask & IB_QP_QKEY) + qp->qkey = attr->qkey; + + if (attr_mask & IB_QP_ACCESS_FLAGS) { + SET_FIELD(qp_params.modify_flags, + QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1); + qp_params.incoming_rdma_read_en = attr->qp_access_flags & + IB_ACCESS_REMOTE_READ; + qp_params.incoming_rdma_write_en = attr->qp_access_flags & + IB_ACCESS_REMOTE_WRITE; + qp_params.incoming_atomic_en = attr->qp_access_flags & + IB_ACCESS_REMOTE_ATOMIC; + } + + if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) { + if (attr_mask & IB_QP_PATH_MTU) { + if (attr->path_mtu < IB_MTU_256 || + attr->path_mtu > IB_MTU_4096) { + pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n"); + rc = -EINVAL; + goto err; + } + qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu), + ib_mtu_enum_to_int(iboe_get_mtu + (dev->ndev->mtu))); + } + + if (!qp->mtu) { + qp->mtu = + ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu)); + pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu); + } + + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1); + + qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class; + qp_params.flow_label = attr->ah_attr.grh.flow_label; + qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit; + + qp->sgid_idx = attr->ah_attr.grh.sgid_index; + + rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params); + if (rc) { + DP_ERR(dev, + "modify qp: problems with GID index %d (rc=%d)\n", + attr->ah_attr.grh.sgid_index, rc); + return rc; + } + + rc = qedr_get_dmac(dev, &attr->ah_attr, + qp_params.remote_mac_addr); + if (rc) + return rc; + + qp_params.use_local_mac = true; + ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr); + + DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n", + qp_params.dgid.dwords[0], qp_params.dgid.dwords[1], + qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]); + DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n", + qp_params.sgid.dwords[0], qp_params.sgid.dwords[1], + qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); + DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", + qp_params.remote_mac_addr); +; + + qp_params.mtu = qp->mtu; + qp_params.lb_indication = false; + } + + if (!qp_params.mtu) { + /* Stay with current MTU */ + if (qp->mtu) + qp_params.mtu = qp->mtu; + else + qp_params.mtu = + ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu)); + } + + if (attr_mask & IB_QP_TIMEOUT) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1); + + qp_params.ack_timeout = attr->timeout; + if (attr->timeout) { + u32 temp; + + temp = 4096 * (1UL << attr->timeout) / 1000 / 1000; + /* FW requires [msec] */ + qp_params.ack_timeout = temp; + } else { + /* Infinite */ + qp_params.ack_timeout = 0; + } + } + if (attr_mask & IB_QP_RETRY_CNT) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1); + qp_params.retry_cnt = attr->retry_cnt; + } + + if (attr_mask & IB_QP_RNR_RETRY) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1); + qp_params.rnr_retry_cnt = attr->rnr_retry; + } + + if (attr_mask & IB_QP_RQ_PSN) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1); + qp_params.rq_psn = attr->rq_psn; + qp->rq_psn = attr->rq_psn; + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) { + rc = -EINVAL; + DP_ERR(dev, + "unsupported max_rd_atomic=%d, supported=%d\n", + attr->max_rd_atomic, + dev->attr.max_qp_req_rd_atomic_resc); + goto err; + } + + SET_FIELD(qp_params.modify_flags, + QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1); + qp_params.max_rd_atomic_req = attr->max_rd_atomic; + } + + if (attr_mask & IB_QP_MIN_RNR_TIMER) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1); + qp_params.min_rnr_nak_timer = attr->min_rnr_timer; + } + + if (attr_mask & IB_QP_SQ_PSN) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1); + qp_params.sq_psn = attr->sq_psn; + qp->sq_psn = attr->sq_psn; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + if (attr->max_dest_rd_atomic > + dev->attr.max_qp_resp_rd_atomic_resc) { + DP_ERR(dev, + "unsupported max_dest_rd_atomic=%d, supported=%d\n", + attr->max_dest_rd_atomic, + dev->attr.max_qp_resp_rd_atomic_resc); + + rc = -EINVAL; + goto err; + } + + SET_FIELD(qp_params.modify_flags, + QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1); + qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic; + } + + if (attr_mask & IB_QP_DEST_QPN) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1); + + qp_params.dest_qp = attr->dest_qp_num; + qp->dest_qp_num = attr->dest_qp_num; + } + + if (qp->qp_type != IB_QPT_GSI) + rc = dev->ops->rdma_modify_qp(dev->rdma_ctx, + qp->qed_qp, &qp_params); + + if (attr_mask & IB_QP_STATE) { + if ((qp->qp_type != IB_QPT_GSI) && (!udata)) + qedr_update_qp_state(dev, qp, qp_params.new_state); + qp->state = qp_params.new_state; + } + +err: + return rc; +} + +static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params) +{ + int ib_qp_acc_flags = 0; + + if (params->incoming_rdma_write_en) + ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; + if (params->incoming_rdma_read_en) + ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ; + if (params->incoming_atomic_en) + ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC; + ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; + return ib_qp_acc_flags; +} + +int qedr_query_qp(struct ib_qp *ibqp, + struct ib_qp_attr *qp_attr, + int attr_mask, struct ib_qp_init_attr *qp_init_attr) +{ + struct qed_rdma_query_qp_out_params params; + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct qedr_dev *dev = qp->dev; + int rc = 0; + + memset(¶ms, 0, sizeof(params)); + + rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms); + if (rc) + goto err; + + memset(qp_attr, 0, sizeof(*qp_attr)); + memset(qp_init_attr, 0, sizeof(*qp_init_attr)); + + qp_attr->qp_state = qedr_get_ibqp_state(params.state); + qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); + qp_attr->path_mtu = iboe_get_mtu(params.mtu); + qp_attr->path_mig_state = IB_MIG_MIGRATED; + qp_attr->rq_psn = params.rq_psn; + qp_attr->sq_psn = params.sq_psn; + qp_attr->dest_qp_num = params.dest_qp; + + qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms); + + qp_attr->cap.max_send_wr = qp->sq.max_wr; + qp_attr->cap.max_recv_wr = qp->rq.max_wr; + qp_attr->cap.max_send_sge = qp->sq.max_sges; + qp_attr->cap.max_recv_sge = qp->rq.max_sges; + qp_attr->cap.max_inline_data = qp->max_inline_data; + qp_init_attr->cap = qp_attr->cap; + + memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0], + sizeof(qp_attr->ah_attr.grh.dgid.raw)); + + qp_attr->ah_attr.grh.flow_label = params.flow_label; + qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; + qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl; + qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos; + + qp_attr->ah_attr.ah_flags = IB_AH_GRH; + qp_attr->ah_attr.port_num = 1; + qp_attr->ah_attr.sl = 0; + qp_attr->timeout = params.timeout; + qp_attr->rnr_retry = params.rnr_retry; + qp_attr->retry_cnt = params.retry_cnt; + qp_attr->min_rnr_timer = params.min_rnr_nak_timer; + qp_attr->pkey_index = params.pkey_index; + qp_attr->port_num = 1; + qp_attr->ah_attr.src_path_bits = 0; + qp_attr->ah_attr.static_rate = 0; + qp_attr->alt_pkey_index = 0; + qp_attr->alt_port_num = 0; + qp_attr->alt_timeout = 0; + memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); + + qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0; + qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic; + qp_attr->max_rd_atomic = params.max_rd_atomic; + qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0; + + DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n", + qp_attr->cap.max_inline_data); + +err: + return rc; +} + +int qedr_destroy_qp(struct ib_qp *ibqp) +{ + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct qedr_dev *dev = qp->dev; + struct ib_qp_attr attr; + int attr_mask = 0; + int rc = 0; + + DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n", + qp, qp->qp_type); + + if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR | + QED_ROCE_QP_STATE_INIT)) { + attr.qp_state = IB_QPS_ERR; + attr_mask |= IB_QP_STATE; + + /* Change the QP state to ERROR */ + qedr_modify_qp(ibqp, &attr, attr_mask, NULL); + } + + if (qp->qp_type != IB_QPT_GSI) { + rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); + if (rc) + return rc; + } else { + qedr_destroy_gsi_qp(dev); + } + + if (ibqp->uobject && ibqp->uobject->context) { + qedr_cleanup_user_sq(dev, qp); + qedr_cleanup_user_rq(dev, qp); + } else { + qedr_cleanup_kernel_sq(dev, qp); + qedr_cleanup_kernel_rq(dev, qp); + } + + kfree(qp); + + return rc; +} + +struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) +{ + struct qedr_ah *ah; + + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); + if (!ah) + return ERR_PTR(-ENOMEM); + + ah->attr = *attr; + + return &ah->ibah; +} + +int qedr_destroy_ah(struct ib_ah *ibah) +{ + struct qedr_ah *ah = get_qedr_ah(ibah); + + kfree(ah); + return 0; +} + +static void free_mr_info(struct qedr_dev *dev, struct mr_info *info) +{ + struct qedr_pbl *pbl, *tmp; + + if (info->pbl_table) + list_add_tail(&info->pbl_table->list_entry, + &info->free_pbl_list); + + if (!list_empty(&info->inuse_pbl_list)) + list_splice(&info->inuse_pbl_list, &info->free_pbl_list); + + list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) { + list_del(&pbl->list_entry); + qedr_free_pbl(dev, &info->pbl_info, pbl); + } +} + +static int init_mr_info(struct qedr_dev *dev, struct mr_info *info, + size_t page_list_len, bool two_layered) +{ + struct qedr_pbl *tmp; + int rc; + + INIT_LIST_HEAD(&info->free_pbl_list); + INIT_LIST_HEAD(&info->inuse_pbl_list); + + rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info, + page_list_len, two_layered); + if (rc) + goto done; + + info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL); + if (!info->pbl_table) { + rc = -ENOMEM; + goto done; + } + + DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n", + &info->pbl_table->pa); + + /* in usual case we use 2 PBLs, so we add one to free + * list and allocating another one + */ + tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL); + if (!tmp) { + DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n"); + goto done; + } + + list_add_tail(&tmp->list_entry, &info->free_pbl_list); + + DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa); + +done: + if (rc) + free_mr_info(dev, info); + + return rc; +} + +struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, + u64 usr_addr, int acc, struct ib_udata *udata) +{ + struct qedr_dev *dev = get_qedr_dev(ibpd->device); + struct qedr_mr *mr; + struct qedr_pd *pd; + int rc = -ENOMEM; + + pd = get_qedr_pd(ibpd); + DP_DEBUG(dev, QEDR_MSG_MR, + "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n", + pd->pd_id, start, len, usr_addr, acc); + + if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) + return ERR_PTR(-EINVAL); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(rc); + + mr->type = QEDR_MR_USER; + + mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); + if (IS_ERR(mr->umem)) { + rc = -EFAULT; + goto err0; + } + + rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1); + if (rc) + goto err1; + + qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, + &mr->info.pbl_info); + + rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); + if (rc) { + DP_ERR(dev, "roce alloc tid returned an error %d\n", rc); + goto err1; + } + + /* Index only, 18 bit long, lkey = itid << 8 | key */ + mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR; + mr->hw_mr.key = 0; + mr->hw_mr.pd = pd->pd_id; + mr->hw_mr.local_read = 1; + mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; + mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; + mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; + mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; + mr->hw_mr.mw_bind = false; + mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa; + mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered; + mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size); + mr->hw_mr.page_size_log = ilog2(mr->umem->page_size); + mr->hw_mr.fbo = ib_umem_offset(mr->umem); + mr->hw_mr.length = len; + mr->hw_mr.vaddr = usr_addr; + mr->hw_mr.zbva = false; + mr->hw_mr.phy_mr = false; + mr->hw_mr.dma_mr = false; + + rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr); + if (rc) { + DP_ERR(dev, "roce register tid returned an error %d\n", rc); + goto err2; + } + + mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; + if (mr->hw_mr.remote_write || mr->hw_mr.remote_read || + mr->hw_mr.remote_atomic) + mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; + + DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n", + mr->ibmr.lkey); + return &mr->ibmr; + +err2: + dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); +err1: + qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table); +err0: + kfree(mr); + return ERR_PTR(rc); +} + +int qedr_dereg_mr(struct ib_mr *ib_mr) +{ + struct qedr_mr *mr = get_qedr_mr(ib_mr); + struct qedr_dev *dev = get_qedr_dev(ib_mr->device); + int rc = 0; + + rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid); + if (rc) + return rc; + + dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); + + if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR)) + qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table); + + /* it could be user registered memory. */ + if (mr->umem) + ib_umem_release(mr->umem); + + kfree(mr); + + return rc; +} + +struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len) +{ + struct qedr_pd *pd = get_qedr_pd(ibpd); + struct qedr_dev *dev = get_qedr_dev(ibpd->device); + struct qedr_mr *mr; + int rc = -ENOMEM; + + DP_DEBUG(dev, QEDR_MSG_MR, + "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id, + max_page_list_len); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(rc); + + mr->dev = dev; + mr->type = QEDR_MR_FRMR; + + rc = init_mr_info(dev, &mr->info, max_page_list_len, 1); + if (rc) + goto err0; + + rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); + if (rc) { + DP_ERR(dev, "roce alloc tid returned an error %d\n", rc); + goto err0; + } + + /* Index only, 18 bit long, lkey = itid << 8 | key */ + mr->hw_mr.tid_type = QED_RDMA_TID_FMR; + mr->hw_mr.key = 0; + mr->hw_mr.pd = pd->pd_id; + mr->hw_mr.local_read = 1; + mr->hw_mr.local_write = 0; + mr->hw_mr.remote_read = 0; + mr->hw_mr.remote_write = 0; + mr->hw_mr.remote_atomic = 0; + mr->hw_mr.mw_bind = false; + mr->hw_mr.pbl_ptr = 0; + mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered; + mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size); + mr->hw_mr.fbo = 0; + mr->hw_mr.length = 0; + mr->hw_mr.vaddr = 0; + mr->hw_mr.zbva = false; + mr->hw_mr.phy_mr = true; + mr->hw_mr.dma_mr = false; + + rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr); + if (rc) { + DP_ERR(dev, "roce register tid returned an error %d\n", rc); + goto err1; + } + + mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; + mr->ibmr.rkey = mr->ibmr.lkey; + + DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey); + return mr; + +err1: + dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); +err0: + kfree(mr); + return ERR_PTR(rc); +} + +struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, + enum ib_mr_type mr_type, u32 max_num_sg) +{ + struct qedr_dev *dev; + struct qedr_mr *mr; + + if (mr_type != IB_MR_TYPE_MEM_REG) + return ERR_PTR(-EINVAL); + + mr = __qedr_alloc_mr(ibpd, max_num_sg); + + if (IS_ERR(mr)) + return ERR_PTR(-EINVAL); + + dev = mr->dev; + + return &mr->ibmr; +} + +static int qedr_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct qedr_mr *mr = get_qedr_mr(ibmr); + struct qedr_pbl *pbl_table; + struct regpair *pbe; + u32 pbes_in_page; + + if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) { + DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages); + return -ENOMEM; + } + + DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n", + mr->npages, addr); + + pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64); + pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page); + pbe = (struct regpair *)pbl_table->va; + pbe += mr->npages % pbes_in_page; + pbe->lo = cpu_to_le32((u32)addr); + pbe->hi = cpu_to_le32((u32)upper_32_bits(addr)); + + mr->npages++; + + return 0; +} + +static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info) +{ + int work = info->completed - info->completed_handled - 1; + + DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work); + while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) { + struct qedr_pbl *pbl; + + /* Free all the page list that are possible to be freed + * (all the ones that were invalidated), under the assumption + * that if an FMR was completed successfully that means that + * if there was an invalidate operation before it also ended + */ + pbl = list_first_entry(&info->inuse_pbl_list, + struct qedr_pbl, list_entry); + list_del(&pbl->list_entry); + list_add_tail(&pbl->list_entry, &info->free_pbl_list); + info->completed_handled++; + } +} + +int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset) +{ + struct qedr_mr *mr = get_qedr_mr(ibmr); + + mr->npages = 0; + + handle_completed_mrs(mr->dev, &mr->info); + return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page); +} + +struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc) +{ + struct qedr_dev *dev = get_qedr_dev(ibpd->device); + struct qedr_pd *pd = get_qedr_pd(ibpd); + struct qedr_mr *mr; + int rc; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + mr->type = QEDR_MR_DMA; + + rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); + if (rc) { + DP_ERR(dev, "roce alloc tid returned an error %d\n", rc); + goto err1; + } + + /* index only, 18 bit long, lkey = itid << 8 | key */ + mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR; + mr->hw_mr.pd = pd->pd_id; + mr->hw_mr.local_read = 1; + mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; + mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; + mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; + mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; + mr->hw_mr.dma_mr = true; + + rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr); + if (rc) { + DP_ERR(dev, "roce register tid returned an error %d\n", rc); + goto err2; + } + + mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; + if (mr->hw_mr.remote_write || mr->hw_mr.remote_read || + mr->hw_mr.remote_atomic) + mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; + + DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey); + return &mr->ibmr; + +err2: + dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); +err1: + kfree(mr); + return ERR_PTR(rc); +} + +static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq) +{ + return (((wq->prod + 1) % wq->max_wr) == wq->cons); +} + +static int sge_data_len(struct ib_sge *sg_list, int num_sge) +{ + int i, len = 0; + + for (i = 0; i < num_sge; i++) + len += sg_list[i].length; + + return len; +} + +static void swap_wqe_data64(u64 *p) +{ + int i; + + for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++) + *p = cpu_to_be64(cpu_to_le64(*p)); +} + +static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev, + struct qedr_qp *qp, u8 *wqe_size, + struct ib_send_wr *wr, + struct ib_send_wr **bad_wr, u8 *bits, + u8 bit) +{ + u32 data_size = sge_data_len(wr->sg_list, wr->num_sge); + char *seg_prt, *wqe; + int i, seg_siz; + + if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) { + DP_ERR(dev, "Too much inline data in WR: %d\n", data_size); + *bad_wr = wr; + return 0; + } + + if (!data_size) + return data_size; + + *bits |= bit; + + seg_prt = NULL; + wqe = NULL; + seg_siz = 0; + + /* Copy data inline */ + for (i = 0; i < wr->num_sge; i++) { + u32 len = wr->sg_list[i].length; + void *src = (void *)(uintptr_t)wr->sg_list[i].addr; + + while (len > 0) { + u32 cur; + + /* New segment required */ + if (!seg_siz) { + wqe = (char *)qed_chain_produce(&qp->sq.pbl); + seg_prt = wqe; + seg_siz = sizeof(struct rdma_sq_common_wqe); + (*wqe_size)++; + } + + /* Calculate currently allowed length */ + cur = min_t(u32, len, seg_siz); + memcpy(seg_prt, src, cur); + + /* Update segment variables */ + seg_prt += cur; + seg_siz -= cur; + + /* Update sge variables */ + src += cur; + len -= cur; + + /* Swap fully-completed segments */ + if (!seg_siz) + swap_wqe_data64((u64 *)wqe); + } + } + + /* swap last not completed segment */ + if (seg_siz) + swap_wqe_data64((u64 *)wqe); + + return data_size; +} + +#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \ + do { \ + DMA_REGPAIR_LE(sge->addr, vaddr); \ + (sge)->length = cpu_to_le32(vlength); \ + (sge)->flags = cpu_to_le32(vflags); \ + } while (0) + +#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \ + do { \ + DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \ + (hdr)->num_sges = num_sge; \ + } while (0) + +#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \ + do { \ + DMA_REGPAIR_LE(sge->addr, vaddr); \ + (sge)->length = cpu_to_le32(vlength); \ + (sge)->l_key = cpu_to_le32(vlkey); \ + } while (0) + +static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size, + struct ib_send_wr *wr) +{ + u32 data_size = 0; + int i; + + for (i = 0; i < wr->num_sge; i++) { + struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl); + + DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr); + sge->l_key = cpu_to_le32(wr->sg_list[i].lkey); + sge->length = cpu_to_le32(wr->sg_list[i].length); + data_size += wr->sg_list[i].length; + } + + if (wqe_size) + *wqe_size += wr->num_sge; + + return data_size; +} + +static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev, + struct qedr_qp *qp, + struct rdma_sq_rdma_wqe_1st *rwqe, + struct rdma_sq_rdma_wqe_2nd *rwqe2, + struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey); + DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr); + + if (wr->send_flags & IB_SEND_INLINE) { + u8 flags = 0; + + SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1); + return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr, + bad_wr, &rwqe->flags, flags); + } + + return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr); +} + +static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev, + struct qedr_qp *qp, + struct rdma_sq_send_wqe_1st *swqe, + struct rdma_sq_send_wqe_2st *swqe2, + struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + memset(swqe2, 0, sizeof(*swqe2)); + if (wr->send_flags & IB_SEND_INLINE) { + u8 flags = 0; + + SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1); + return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr, + bad_wr, &swqe->flags, flags); + } + + return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr); +} + +static int qedr_prepare_reg(struct qedr_qp *qp, + struct rdma_sq_fmr_wqe_1st *fwqe1, + struct ib_reg_wr *wr) +{ + struct qedr_mr *mr = get_qedr_mr(wr->mr); + struct rdma_sq_fmr_wqe_2nd *fwqe2; + + fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl); + fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova); + fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova); + fwqe1->l_key = wr->key; + + SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ, + !!(wr->access & IB_ACCESS_REMOTE_READ)); + SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE, + !!(wr->access & IB_ACCESS_REMOTE_WRITE)); + SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC, + !!(wr->access & IB_ACCESS_REMOTE_ATOMIC)); + SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1); + SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE, + !!(wr->access & IB_ACCESS_LOCAL_WRITE)); + fwqe2->fmr_ctrl = 0; + + SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG, + ilog2(mr->ibmr.page_size) - 12); + + fwqe2->length_hi = 0; + fwqe2->length_lo = mr->ibmr.length; + fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa); + fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa); + + qp->wqe_wr_id[qp->sq.prod].mr = mr; + + return 0; +} + +enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) +{ + switch (opcode) { + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + return IB_WC_RDMA_WRITE; + case IB_WR_SEND_WITH_IMM: + case IB_WR_SEND: + case IB_WR_SEND_WITH_INV: + return IB_WC_SEND; + case IB_WR_RDMA_READ: + return IB_WC_RDMA_READ; + case IB_WR_ATOMIC_CMP_AND_SWP: + return IB_WC_COMP_SWAP; + case IB_WR_ATOMIC_FETCH_AND_ADD: + return IB_WC_FETCH_ADD; + case IB_WR_REG_MR: + return IB_WC_REG_MR; + case IB_WR_LOCAL_INV: + return IB_WC_LOCAL_INV; + default: + return IB_WC_SEND; + } +} + +inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) +{ + int wq_is_full, err_wr, pbl_is_full; + struct qedr_dev *dev = qp->dev; + + /* prevent SQ overflow and/or processing of a bad WR */ + err_wr = wr->num_sge > qp->sq.max_sges; + wq_is_full = qedr_wq_is_full(&qp->sq); + pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) < + QEDR_MAX_SQE_ELEMENTS_PER_SQE; + if (wq_is_full || err_wr || pbl_is_full) { + if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) { + DP_ERR(dev, + "error: WQ is full. Post send on QP %p failed (this error appears only once)\n", + qp); + qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL; + } + + if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) { + DP_ERR(dev, + "error: WR is bad. Post send on QP %p failed (this error appears only once)\n", + qp); + qp->err_bitmap |= QEDR_QP_ERR_BAD_SR; + } + + if (pbl_is_full && + !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) { + DP_ERR(dev, + "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n", + qp); + qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL; + } + return false; + } + return true; +} + +int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct qedr_dev *dev = get_qedr_dev(ibqp->device); + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct rdma_sq_atomic_wqe_1st *awqe1; + struct rdma_sq_atomic_wqe_2nd *awqe2; + struct rdma_sq_atomic_wqe_3rd *awqe3; + struct rdma_sq_send_wqe_2st *swqe2; + struct rdma_sq_local_inv_wqe *iwqe; + struct rdma_sq_rdma_wqe_2nd *rwqe2; + struct rdma_sq_send_wqe_1st *swqe; + struct rdma_sq_rdma_wqe_1st *rwqe; + struct rdma_sq_fmr_wqe_1st *fwqe1; + struct rdma_sq_common_wqe *wqe; + u32 length; + int rc = 0; + bool comp; + + if (!qedr_can_post_send(qp, wr)) { + *bad_wr = wr; + return -ENOMEM; + } + + wqe = qed_chain_produce(&qp->sq.pbl); + qp->wqe_wr_id[qp->sq.prod].signaled = + !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled; + + wqe->flags = 0; + SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG, + !!(wr->send_flags & IB_SEND_SOLICITED)); + comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled; + SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp); + SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG, + !!(wr->send_flags & IB_SEND_FENCE)); + wqe->prev_wqe_size = qp->prev_wqe_size; + + qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode); + + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: + wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; + swqe = (struct rdma_sq_send_wqe_1st *)wqe; + swqe->wqe_size = 2; + swqe2 = qed_chain_produce(&qp->sq.pbl); + + swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data); + length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2, + wr, bad_wr); + swqe->length = cpu_to_le32(length); + qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; + qp->prev_wqe_size = swqe->wqe_size; + qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length; + break; + case IB_WR_SEND: + wqe->req_type = RDMA_SQ_REQ_TYPE_SEND; + swqe = (struct rdma_sq_send_wqe_1st *)wqe; + + swqe->wqe_size = 2; + swqe2 = qed_chain_produce(&qp->sq.pbl); + length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2, + wr, bad_wr); + swqe->length = cpu_to_le32(length); + qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; + qp->prev_wqe_size = swqe->wqe_size; + qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length; + break; + case IB_WR_SEND_WITH_INV: + wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE; + swqe = (struct rdma_sq_send_wqe_1st *)wqe; + swqe2 = qed_chain_produce(&qp->sq.pbl); + swqe->wqe_size = 2; + swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey); + length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2, + wr, bad_wr); + swqe->length = cpu_to_le32(length); + qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; + qp->prev_wqe_size = swqe->wqe_size; + qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length; + break; + + case IB_WR_RDMA_WRITE_WITH_IMM: + wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; + rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; + + rwqe->wqe_size = 2; + rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data)); + rwqe2 = qed_chain_produce(&qp->sq.pbl); + length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2, + wr, bad_wr); + rwqe->length = cpu_to_le32(length); + qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; + qp->prev_wqe_size = rwqe->wqe_size; + qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length; + break; + case IB_WR_RDMA_WRITE: + wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR; + rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; + + rwqe->wqe_size = 2; + rwqe2 = qed_chain_produce(&qp->sq.pbl); + length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2, + wr, bad_wr); + rwqe->length = cpu_to_le32(length); + qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; + qp->prev_wqe_size = rwqe->wqe_size; + qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length; + break; + case IB_WR_RDMA_READ_WITH_INV: + DP_ERR(dev, + "RDMA READ WITH INVALIDATE not supported\n"); + *bad_wr = wr; + rc = -EINVAL; + break; + + case IB_WR_RDMA_READ: + wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD; + rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; + + rwqe->wqe_size = 2; + rwqe2 = qed_chain_produce(&qp->sq.pbl); + length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2, + wr, bad_wr); + rwqe->length = cpu_to_le32(length); + qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; + qp->prev_wqe_size = rwqe->wqe_size; + qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length; + break; + + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe; + awqe1->wqe_size = 4; + + awqe2 = qed_chain_produce(&qp->sq.pbl); + DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr); + awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey); + + awqe3 = qed_chain_produce(&qp->sq.pbl); + + if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { + wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD; + DMA_REGPAIR_LE(awqe3->swap_data, + atomic_wr(wr)->compare_add); + } else { + wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP; + DMA_REGPAIR_LE(awqe3->swap_data, + atomic_wr(wr)->swap); + DMA_REGPAIR_LE(awqe3->cmp_data, + atomic_wr(wr)->compare_add); + } + + qedr_prepare_sq_sges(qp, NULL, wr); + + qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size; + qp->prev_wqe_size = awqe1->wqe_size; + break; + + case IB_WR_LOCAL_INV: + iwqe = (struct rdma_sq_local_inv_wqe *)wqe; + iwqe->wqe_size = 1; + + iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE; + iwqe->inv_l_key = wr->ex.invalidate_rkey; + qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size; + qp->prev_wqe_size = iwqe->wqe_size; + break; + case IB_WR_REG_MR: + DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n"); + wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR; + fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe; + fwqe1->wqe_size = 2; + + rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr)); + if (rc) { + DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc); + *bad_wr = wr; + break; + } + + qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size; + qp->prev_wqe_size = fwqe1->wqe_size; + break; + default: + DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode); + rc = -EINVAL; + *bad_wr = wr; + break; + } + + if (*bad_wr) { + u16 value; + + /* Restore prod to its position before + * this WR was processed + */ + value = le16_to_cpu(qp->sq.db_data.data.value); + qed_chain_set_prod(&qp->sq.pbl, value, wqe); + + /* Restore prev_wqe_size */ + qp->prev_wqe_size = wqe->prev_wqe_size; + rc = -EINVAL; + DP_ERR(dev, "POST SEND FAILED\n"); + } + + return rc; +} + +int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct qedr_dev *dev = get_qedr_dev(ibqp->device); + struct qedr_qp *qp = get_qedr_qp(ibqp); + unsigned long flags; + int rc = 0; + + *bad_wr = NULL; + + if (qp->qp_type == IB_QPT_GSI) + return qedr_gsi_post_send(ibqp, wr, bad_wr); + + spin_lock_irqsave(&qp->q_lock, flags); + + if ((qp->state == QED_ROCE_QP_STATE_RESET) || + (qp->state == QED_ROCE_QP_STATE_ERR)) { + spin_unlock_irqrestore(&qp->q_lock, flags); + *bad_wr = wr; + DP_DEBUG(dev, QEDR_MSG_CQ, + "QP in wrong state! QP icid=0x%x state %d\n", + qp->icid, qp->state); + return -EINVAL; + } + + if (!wr) { + DP_ERR(dev, "Got an empty post send.\n"); + return -EINVAL; + } + + while (wr) { + rc = __qedr_post_send(ibqp, wr, bad_wr); + if (rc) + break; + + qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; + + qedr_inc_sw_prod(&qp->sq); + + qp->sq.db_data.data.value++; + + wr = wr->next; + } + + /* Trigger doorbell + * If there was a failure in the first WR then it will be triggered in + * vane. However this is not harmful (as long as the producer value is + * unchanged). For performance reasons we avoid checking for this + * redundant doorbell. + */ + wmb(); + writel(qp->sq.db_data.raw, qp->sq.db); + + /* Make sure write sticks */ + mmiowb(); + + spin_unlock_irqrestore(&qp->q_lock, flags); + + return rc; +} + +int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct qedr_dev *dev = qp->dev; + unsigned long flags; + int status = 0; + + if (qp->qp_type == IB_QPT_GSI) + return qedr_gsi_post_recv(ibqp, wr, bad_wr); + + spin_lock_irqsave(&qp->q_lock, flags); + + if ((qp->state == QED_ROCE_QP_STATE_RESET) || + (qp->state == QED_ROCE_QP_STATE_ERR)) { + spin_unlock_irqrestore(&qp->q_lock, flags); + *bad_wr = wr; + return -EINVAL; + } + + while (wr) { + int i; + + if (qed_chain_get_elem_left_u32(&qp->rq.pbl) < + QEDR_MAX_RQE_ELEMENTS_PER_RQE || + wr->num_sge > qp->rq.max_sges) { + DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n", + qed_chain_get_elem_left_u32(&qp->rq.pbl), + QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge, + qp->rq.max_sges); + status = -ENOMEM; + *bad_wr = wr; + break; + } + for (i = 0; i < wr->num_sge; i++) { + u32 flags = 0; + struct rdma_rq_sge *rqe = + qed_chain_produce(&qp->rq.pbl); + + /* First one must include the number + * of SGE in the list + */ + if (!i) + SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, + wr->num_sge); + + SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, + wr->sg_list[i].lkey); + + RQ_SGE_SET(rqe, wr->sg_list[i].addr, + wr->sg_list[i].length, flags); + } + + /* Special case of no sges. FW requires between 1-4 sges... + * in this case we need to post 1 sge with length zero. this is + * because rdma write with immediate consumes an RQ. + */ + if (!wr->num_sge) { + u32 flags = 0; + struct rdma_rq_sge *rqe = + qed_chain_produce(&qp->rq.pbl); + + /* First one must include the number + * of SGE in the list + */ + SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0); + SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1); + + RQ_SGE_SET(rqe, 0, 0, flags); + i = 1; + } + + qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id; + qp->rqe_wr_id[qp->rq.prod].wqe_size = i; + + qedr_inc_sw_prod(&qp->rq); + + /* Flush all the writes before signalling doorbell */ + wmb(); + + qp->rq.db_data.data.value++; + + writel(qp->rq.db_data.raw, qp->rq.db); + + /* Make sure write sticks */ + mmiowb(); + + wr = wr->next; + } + + spin_unlock_irqrestore(&qp->q_lock, flags); + + return status; +} + +static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe) +{ + struct rdma_cqe_requester *resp_cqe = &cqe->req; + + return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) == + cq->pbl_toggle; +} + +static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe) +{ + struct rdma_cqe_requester *resp_cqe = &cqe->req; + struct qedr_qp *qp; + + qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi, + resp_cqe->qp_handle.lo, + u64); + return qp; +} + +static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe) +{ + struct rdma_cqe_requester *resp_cqe = &cqe->req; + + return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE); +} + +/* Return latest CQE (needs processing) */ +static union rdma_cqe *get_cqe(struct qedr_cq *cq) +{ + return cq->latest_cqe; +} + +/* In fmr we need to increase the number of fmr completed counter for the fmr + * algorithm determining whether we can free a pbl or not. + * we need to perform this whether the work request was signaled or not. for + * this purpose we call this function from the condition that checks if a wr + * should be skipped, to make sure we don't miss it ( possibly this fmr + * operation was not signalted) + */ +static inline void qedr_chk_if_fmr(struct qedr_qp *qp) +{ + if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR) + qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; +} + +static int process_req(struct qedr_dev *dev, struct qedr_qp *qp, + struct qedr_cq *cq, int num_entries, + struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status, + int force) +{ + u16 cnt = 0; + + while (num_entries && qp->sq.wqe_cons != hw_cons) { + if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) { + qedr_chk_if_fmr(qp); + /* skip WC */ + goto next_cqe; + } + + /* fill WC */ + wc->status = status; + wc->wc_flags = 0; + wc->src_qp = qp->id; + wc->qp = &qp->ibqp; + + wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id; + wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode; + + switch (wc->opcode) { + case IB_WC_RDMA_WRITE: + wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; + break; + case IB_WC_COMP_SWAP: + case IB_WC_FETCH_ADD: + wc->byte_len = 8; + break; + case IB_WC_REG_MR: + qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; + break; + default: + break; + } + + num_entries--; + wc++; + cnt++; +next_cqe: + while (qp->wqe_wr_id[qp->sq.cons].wqe_size--) + qed_chain_consume(&qp->sq.pbl); + qedr_inc_sw_cons(&qp->sq); + } + + return cnt; +} + +static int qedr_poll_cq_req(struct qedr_dev *dev, + struct qedr_qp *qp, struct qedr_cq *cq, + int num_entries, struct ib_wc *wc, + struct rdma_cqe_requester *req) +{ + int cnt = 0; + + switch (req->status) { + case RDMA_CQE_REQ_STS_OK: + cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, + IB_WC_SUCCESS, 0); + break; + case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, + IB_WC_WR_FLUSH_ERR, 0); + break; + default: + /* process all WQE before the cosumer */ + qp->state = QED_ROCE_QP_STATE_ERR; + cnt = process_req(dev, qp, cq, num_entries, wc, + req->sq_cons - 1, IB_WC_SUCCESS, 0); + wc += cnt; + /* if we have extra WC fill it with actual error info */ + if (cnt < num_entries) { + enum ib_wc_status wc_status; + + switch (req->status) { + case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_BAD_RESP_ERR; + break; + case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_LOC_LEN_ERR; + break; + case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_LOC_QP_OP_ERR; + break; + case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_LOC_PROT_ERR; + break; + case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_MW_BIND_ERR; + break; + case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_REM_INV_REQ_ERR; + break; + case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_REM_ACCESS_ERR; + break; + case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_REM_OP_ERR; + break; + case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_RNR_RETRY_EXC_ERR; + break; + case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR: + DP_ERR(dev, + "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_RETRY_EXC_ERR; + break; + default: + DP_ERR(dev, + "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_GENERAL_ERR; + } + cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons, + wc_status, 1); + } + } + + return cnt; +} + +static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp, + struct qedr_cq *cq, struct ib_wc *wc, + struct rdma_cqe_responder *resp, u64 wr_id) +{ + enum ib_wc_status wc_status = IB_WC_SUCCESS; + u8 flags; + + wc->opcode = IB_WC_RECV; + wc->wc_flags = 0; + + switch (resp->status) { + case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR: + wc_status = IB_WC_LOC_ACCESS_ERR; + break; + case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR: + wc_status = IB_WC_LOC_LEN_ERR; + break; + case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR: + wc_status = IB_WC_LOC_QP_OP_ERR; + break; + case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR: + wc_status = IB_WC_LOC_PROT_ERR; + break; + case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR: + wc_status = IB_WC_MW_BIND_ERR; + break; + case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR: + wc_status = IB_WC_REM_INV_RD_REQ_ERR; + break; + case RDMA_CQE_RESP_STS_OK: + wc_status = IB_WC_SUCCESS; + wc->byte_len = le32_to_cpu(resp->length); + + flags = resp->flags & QEDR_RESP_RDMA_IMM; + + if (flags == QEDR_RESP_RDMA_IMM) + wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; + + if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) { + wc->ex.imm_data = + le32_to_cpu(resp->imm_data_or_inv_r_Key); + wc->wc_flags |= IB_WC_WITH_IMM; + } + break; + default: + wc->status = IB_WC_GENERAL_ERR; + DP_ERR(dev, "Invalid CQE status detected\n"); + } + + /* fill WC */ + wc->status = wc_status; + wc->src_qp = qp->id; + wc->qp = &qp->ibqp; + wc->wr_id = wr_id; +} + +static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp, + struct qedr_cq *cq, struct ib_wc *wc, + struct rdma_cqe_responder *resp) +{ + u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; + + __process_resp_one(dev, qp, cq, wc, resp, wr_id); + + while (qp->rqe_wr_id[qp->rq.cons].wqe_size--) + qed_chain_consume(&qp->rq.pbl); + qedr_inc_sw_cons(&qp->rq); + + return 1; +} + +static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq, + int num_entries, struct ib_wc *wc, u16 hw_cons) +{ + u16 cnt = 0; + + while (num_entries && qp->rq.wqe_cons != hw_cons) { + /* fill WC */ + wc->status = IB_WC_WR_FLUSH_ERR; + wc->wc_flags = 0; + wc->src_qp = qp->id; + wc->byte_len = 0; + wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; + wc->qp = &qp->ibqp; + num_entries--; + wc++; + cnt++; + while (qp->rqe_wr_id[qp->rq.cons].wqe_size--) + qed_chain_consume(&qp->rq.pbl); + qedr_inc_sw_cons(&qp->rq); + } + + return cnt; +} + +static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp, + struct rdma_cqe_responder *resp, int *update) +{ + if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) { + consume_cqe(cq); + *update |= 1; + } +} + +static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp, + struct qedr_cq *cq, int num_entries, + struct ib_wc *wc, struct rdma_cqe_responder *resp, + int *update) +{ + int cnt; + + if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) { + cnt = process_resp_flush(qp, cq, num_entries, wc, + resp->rq_cons); + try_consume_resp_cqe(cq, qp, resp, update); + } else { + cnt = process_resp_one(dev, qp, cq, wc, resp); + consume_cqe(cq); + *update |= 1; + } + + return cnt; +} + +static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp, + struct rdma_cqe_requester *req, int *update) +{ + if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) { + consume_cqe(cq); + *update |= 1; + } +} + +int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + struct qedr_dev *dev = get_qedr_dev(ibcq->device); + struct qedr_cq *cq = get_qedr_cq(ibcq); + union rdma_cqe *cqe = cq->latest_cqe; + u32 old_cons, new_cons; + unsigned long flags; + int update = 0; + int done = 0; + + if (cq->cq_type == QEDR_CQ_TYPE_GSI) + return qedr_gsi_poll_cq(ibcq, num_entries, wc); + + spin_lock_irqsave(&cq->cq_lock, flags); + old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); + while (num_entries && is_valid_cqe(cq, cqe)) { + struct qedr_qp *qp; + int cnt = 0; + + /* prevent speculative reads of any field of CQE */ + rmb(); + + qp = cqe_get_qp(cqe); + if (!qp) { + WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe); + break; + } + + wc->qp = &qp->ibqp; + + switch (cqe_get_type(cqe)) { + case RDMA_CQE_TYPE_REQUESTER: + cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc, + &cqe->req); + try_consume_req_cqe(cq, qp, &cqe->req, &update); + break; + case RDMA_CQE_TYPE_RESPONDER_RQ: + cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc, + &cqe->resp, &update); + break; + case RDMA_CQE_TYPE_INVALID: + default: + DP_ERR(dev, "Error: invalid CQE type = %d\n", + cqe_get_type(cqe)); + } + num_entries -= cnt; + wc += cnt; + done += cnt; + + cqe = get_cqe(cq); + } + new_cons = qed_chain_get_cons_idx_u32(&cq->pbl); + + cq->cq_cons += new_cons - old_cons; + + if (update) + /* doorbell notifies abount latest VALID entry, + * but chain already point to the next INVALID one + */ + doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags); + + spin_unlock_irqrestore(&cq->cq_lock, flags); + return done; +} + +int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags, + u8 port_num, + const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *mad_hdr, + size_t in_mad_size, struct ib_mad_hdr *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index) +{ + struct qedr_dev *dev = get_qedr_dev(ibdev); + + DP_DEBUG(dev, QEDR_MSG_GSI, + "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n", + mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod, + mad_hdr->class_specific, mad_hdr->class_version, + mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status); + return IB_MAD_RESULT_SUCCESS; +} + +int qedr_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = qedr_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | + RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + + return 0; +} diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h new file mode 100644 index 000000000000..a9b5e67bb81e --- /dev/null +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -0,0 +1,101 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __QEDR_VERBS_H__ +#define __QEDR_VERBS_H__ + +int qedr_query_device(struct ib_device *ibdev, + struct ib_device_attr *attr, struct ib_udata *udata); +int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props); +int qedr_modify_port(struct ib_device *, u8 port, int mask, + struct ib_port_modify *props); + +int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid); + +int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey); + +struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *); +int qedr_dealloc_ucontext(struct ib_ucontext *); + +int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma); +int qedr_del_gid(struct ib_device *device, u8 port_num, + unsigned int index, void **context); +int qedr_add_gid(struct ib_device *device, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr, void **context); +struct ib_pd *qedr_alloc_pd(struct ib_device *, + struct ib_ucontext *, struct ib_udata *); +int qedr_dealloc_pd(struct ib_pd *pd); + +struct ib_cq *qedr_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *ib_ctx, + struct ib_udata *udata); +int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); +int qedr_destroy_cq(struct ib_cq *); +int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); +struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs, + struct ib_udata *); +int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_qp_init_attr *); +int qedr_destroy_qp(struct ib_qp *ibqp); + +struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr); +int qedr_destroy_ah(struct ib_ah *ibah); + +int qedr_dereg_mr(struct ib_mr *); +struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc); + +struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length, + u64 virt, int acc, struct ib_udata *); + +int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset); + +struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg); +int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc); +int qedr_post_send(struct ib_qp *, struct ib_send_wr *, + struct ib_send_wr **bad_wr); +int qedr_post_recv(struct ib_qp *, struct ib_recv_wr *, + struct ib_recv_wr **bad_wr); +int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags, + u8 port_num, const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in_mad, + size_t in_mad_size, struct ib_mad_hdr *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index); + +int qedr_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable); +#endif diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 936f07a4e35f..6d7de9bfed9a 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -103,6 +103,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = { 6-byte ALPS packet */ #define ALPS_STICK_BITS 0x100 /* separate stick button bits */ #define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ +#define ALPS_DUALPOINT_WITH_PRESSURE 0x400 /* device can report trackpoint pressure */ static const struct alps_model_info alps_model_data[] = { { { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Toshiba Salellite Pro M10 */ @@ -1156,15 +1157,28 @@ static unsigned char alps_get_pkt_id_ss4_v2(unsigned char *byte) { unsigned char pkt_id = SS4_PACKET_ID_IDLE; - if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 && - (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 && byte[5] == 0x00) { - pkt_id = SS4_PACKET_ID_IDLE; - } else if (!(byte[3] & 0x10)) { - pkt_id = SS4_PACKET_ID_ONE; - } else if (!(byte[3] & 0x20)) { + switch (byte[3] & 0x30) { + case 0x00: + if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 && + (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 && + byte[5] == 0x00) { + pkt_id = SS4_PACKET_ID_IDLE; + } else { + pkt_id = SS4_PACKET_ID_ONE; + } + break; + case 0x10: + /* two-finger finger positions */ pkt_id = SS4_PACKET_ID_TWO; - } else { + break; + case 0x20: + /* stick pointer */ + pkt_id = SS4_PACKET_ID_STICK; + break; + case 0x30: + /* third and fourth finger positions */ pkt_id = SS4_PACKET_ID_MULTI; + break; } return pkt_id; @@ -1185,7 +1199,13 @@ static int alps_decode_ss4_v2(struct alps_fields *f, f->mt[0].x = SS4_1F_X_V2(p); f->mt[0].y = SS4_1F_Y_V2(p); f->pressure = ((SS4_1F_Z_V2(p)) * 2) & 0x7f; - f->fingers = 1; + /* + * When a button is held the device will give us events + * with x, y, and pressure of 0. This causes annoying jumps + * if a touch is released while the button is held. + * Handle this by claiming zero contacts. + */ + f->fingers = f->pressure > 0 ? 1 : 0; f->first_mp = 0; f->is_mp = 0; break; @@ -1246,16 +1266,40 @@ static int alps_decode_ss4_v2(struct alps_fields *f, } break; + case SS4_PACKET_ID_STICK: + if (!(priv->flags & ALPS_DUALPOINT)) { + psmouse_warn(psmouse, + "Rejected trackstick packet from non DualPoint device"); + } else { + int x = (s8)(((p[0] & 1) << 7) | (p[1] & 0x7f)); + int y = (s8)(((p[3] & 1) << 7) | (p[2] & 0x7f)); + int pressure = (s8)(p[4] & 0x7f); + + input_report_rel(priv->dev2, REL_X, x); + input_report_rel(priv->dev2, REL_Y, -y); + input_report_abs(priv->dev2, ABS_PRESSURE, pressure); + } + break; + case SS4_PACKET_ID_IDLE: default: memset(f, 0, sizeof(struct alps_fields)); break; } - f->left = !!(SS4_BTN_V2(p) & 0x01); - if (!(priv->flags & ALPS_BUTTONPAD)) { - f->right = !!(SS4_BTN_V2(p) & 0x02); - f->middle = !!(SS4_BTN_V2(p) & 0x04); + /* handle buttons */ + if (pkt_id == SS4_PACKET_ID_STICK) { + f->ts_left = !!(SS4_BTN_V2(p) & 0x01); + if (!(priv->flags & ALPS_BUTTONPAD)) { + f->ts_right = !!(SS4_BTN_V2(p) & 0x02); + f->ts_middle = !!(SS4_BTN_V2(p) & 0x04); + } + } else { + f->left = !!(SS4_BTN_V2(p) & 0x01); + if (!(priv->flags & ALPS_BUTTONPAD)) { + f->right = !!(SS4_BTN_V2(p) & 0x02); + f->middle = !!(SS4_BTN_V2(p) & 0x04); + } } return 0; @@ -1266,6 +1310,7 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse) struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; struct input_dev *dev = psmouse->dev; + struct input_dev *dev2 = priv->dev2; struct alps_fields *f = &priv->f; memset(f, 0, sizeof(struct alps_fields)); @@ -1311,6 +1356,13 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse) input_report_abs(dev, ABS_PRESSURE, f->pressure); input_sync(dev); + + if (priv->flags & ALPS_DUALPOINT) { + input_report_key(dev2, BTN_LEFT, f->ts_left); + input_report_key(dev2, BTN_RIGHT, f->ts_right); + input_report_key(dev2, BTN_MIDDLE, f->ts_middle); + input_sync(dev2); + } } static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse) @@ -2695,6 +2747,10 @@ static int alps_set_protocol(struct psmouse *psmouse, if (alps_set_defaults_ss4_v2(psmouse, priv)) return -EIO; + if (priv->fw_ver[1] == 0x1) + priv->flags |= ALPS_DUALPOINT | + ALPS_DUALPOINT_WITH_PRESSURE; + break; } @@ -2767,6 +2823,9 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x14 && ec[1] == 0x02) { protocol = &alps_v8_protocol_data; + } else if (e7[0] == 0x73 && e7[1] == 0x03 && + e7[2] == 0x28 && ec[1] == 0x01) { + protocol = &alps_v8_protocol_data; } else { psmouse_dbg(psmouse, "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec); @@ -2949,6 +3008,10 @@ int alps_init(struct psmouse *psmouse) input_set_capability(dev2, EV_REL, REL_X); input_set_capability(dev2, EV_REL, REL_Y); + if (priv->flags & ALPS_DUALPOINT_WITH_PRESSURE) { + input_set_capability(dev2, EV_ABS, ABS_PRESSURE); + input_set_abs_params(dev2, ABS_PRESSURE, 0, 127, 0, 0); + } input_set_capability(dev2, EV_KEY, BTN_LEFT); input_set_capability(dev2, EV_KEY, BTN_RIGHT); input_set_capability(dev2, EV_KEY, BTN_MIDDLE); diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index d37f814dc447..b9417e2d7ad3 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -37,12 +37,14 @@ * or there's button activities. * SS4_PACKET_ID_TWO: There's two or more fingers on touchpad * SS4_PACKET_ID_MULTI: There's three or more fingers on touchpad + * SS4_PACKET_ID_STICK: A stick pointer packet */ enum SS4_PACKET_ID { SS4_PACKET_ID_IDLE = 0, SS4_PACKET_ID_ONE, SS4_PACKET_ID_TWO, SS4_PACKET_ID_MULTI, + SS4_PACKET_ID_STICK, }; #define SS4_COUNT_PER_ELECTRODE 256 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 08e252a42480..db7d1d666ac1 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1134,7 +1134,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * System76 Pangolin 0x250f01 ? 2 hw buttons * (*) + 3 trackpoint buttons * (**) + 0 trackpoint buttons - * Note: Lenovo L430 and Lenovo L430 have the same fw_version/caps + * Note: Lenovo L430 and Lenovo L530 have the same fw_version/caps */ static void elantech_set_buttonpad_prop(struct psmouse *psmouse) { @@ -1159,6 +1159,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), }, }, + { + /* Fujitsu H760 also has a middle button */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), + }, + }, #endif { } }; @@ -1503,10 +1510,10 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { }, }, { - /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ + /* Fujitsu H760 does not work with crc_enabled == 0 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"), + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), }, }, { @@ -1517,6 +1524,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { }, }, { + /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"), + }, + }, + { + /* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"), + }, + }, + { /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c index 6f2e0e4f0296..1ebc2c1debae 100644 --- a/drivers/input/rmi4/rmi_i2c.c +++ b/drivers/input/rmi4/rmi_i2c.c @@ -221,6 +221,21 @@ static const struct of_device_id rmi_i2c_of_match[] = { MODULE_DEVICE_TABLE(of, rmi_i2c_of_match); #endif +static void rmi_i2c_regulator_bulk_disable(void *data) +{ + struct rmi_i2c_xport *rmi_i2c = data; + + regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies), + rmi_i2c->supplies); +} + +static void rmi_i2c_unregister_transport(void *data) +{ + struct rmi_i2c_xport *rmi_i2c = data; + + rmi_unregister_transport_device(&rmi_i2c->xport); +} + static int rmi_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -264,6 +279,12 @@ static int rmi_i2c_probe(struct i2c_client *client, if (retval < 0) return retval; + retval = devm_add_action_or_reset(&client->dev, + rmi_i2c_regulator_bulk_disable, + rmi_i2c); + if (retval) + return retval; + of_property_read_u32(client->dev.of_node, "syna,startup-delay-ms", &rmi_i2c->startup_delay); @@ -294,6 +315,11 @@ static int rmi_i2c_probe(struct i2c_client *client, client->addr); return retval; } + retval = devm_add_action_or_reset(&client->dev, + rmi_i2c_unregister_transport, + rmi_i2c); + if (retval) + return retval; retval = rmi_i2c_init_irq(client); if (retval < 0) @@ -304,17 +330,6 @@ static int rmi_i2c_probe(struct i2c_client *client, return 0; } -static int rmi_i2c_remove(struct i2c_client *client) -{ - struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client); - - rmi_unregister_transport_device(&rmi_i2c->xport); - regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies), - rmi_i2c->supplies); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int rmi_i2c_suspend(struct device *dev) { @@ -431,7 +446,6 @@ static struct i2c_driver rmi_i2c_driver = { }, .id_table = rmi_id, .probe = rmi_i2c_probe, - .remove = rmi_i2c_remove, }; module_i2c_driver(rmi_i2c_driver); diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c index 55bd1b34970c..4ebef607e214 100644 --- a/drivers/input/rmi4/rmi_spi.c +++ b/drivers/input/rmi4/rmi_spi.c @@ -396,6 +396,13 @@ static inline int rmi_spi_of_probe(struct spi_device *spi, } #endif +static void rmi_spi_unregister_transport(void *data) +{ + struct rmi_spi_xport *rmi_spi = data; + + rmi_unregister_transport_device(&rmi_spi->xport); +} + static int rmi_spi_probe(struct spi_device *spi) { struct rmi_spi_xport *rmi_spi; @@ -464,6 +471,11 @@ static int rmi_spi_probe(struct spi_device *spi) dev_err(&spi->dev, "failed to register transport.\n"); return retval; } + retval = devm_add_action_or_reset(&spi->dev, + rmi_spi_unregister_transport, + rmi_spi); + if (retval) + return retval; retval = rmi_spi_init_irq(spi); if (retval < 0) @@ -473,15 +485,6 @@ static int rmi_spi_probe(struct spi_device *spi) return 0; } -static int rmi_spi_remove(struct spi_device *spi) -{ - struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi); - - rmi_unregister_transport_device(&rmi_spi->xport); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int rmi_spi_suspend(struct device *dev) { @@ -577,7 +580,6 @@ static struct spi_driver rmi_spi_driver = { }, .id_table = rmi_id, .probe = rmi_spi_probe, - .remove = rmi_spi_remove, }; module_spi_driver(rmi_spi_driver); diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h index a5eed2ade53d..34da81c006b6 100644 --- a/drivers/input/serio/i8042-io.h +++ b/drivers/input/serio/i8042-io.h @@ -81,7 +81,7 @@ static inline int i8042_platform_init(void) return -EBUSY; #endif - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-ip22io.h b/drivers/input/serio/i8042-ip22io.h index ee1ad27d6ed0..08a1c10a1448 100644 --- a/drivers/input/serio/i8042-ip22io.h +++ b/drivers/input/serio/i8042-ip22io.h @@ -61,7 +61,7 @@ static inline int i8042_platform_init(void) return -EBUSY; #endif - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h index f708c75d16f1..1aabea43329e 100644 --- a/drivers/input/serio/i8042-ppcio.h +++ b/drivers/input/serio/i8042-ppcio.h @@ -44,7 +44,7 @@ static inline void i8042_write_command(int val) static inline int i8042_platform_init(void) { - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h index afcd1c1a05b2..6231d63860ee 100644 --- a/drivers/input/serio/i8042-sparcio.h +++ b/drivers/input/serio/i8042-sparcio.h @@ -130,7 +130,7 @@ static int __init i8042_platform_init(void) } } - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h index 73f5cc124a36..455747552f85 100644 --- a/drivers/input/serio/i8042-unicore32io.h +++ b/drivers/input/serio/i8042-unicore32io.h @@ -61,7 +61,7 @@ static inline int i8042_platform_init(void) if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042")) return -EBUSY; - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 68f5f4a0f1e7..f4bfb4b2d50a 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -510,6 +510,90 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { { } }; +/* + * On some Asus laptops, just running self tests cause problems. + */ +static const struct dmi_system_id i8042_dmi_noselftest_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "R409L"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"), + }, + }, + { } +}; static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { { /* MSI Wind U-100 */ @@ -1072,12 +1156,18 @@ static int __init i8042_platform_init(void) return retval; #if defined(__ia64__) - i8042_reset = true; + i8042_reset = I8042_RESET_ALWAYS; #endif #ifdef CONFIG_X86 - if (dmi_check_system(i8042_dmi_reset_table)) - i8042_reset = true; + /* Honor module parameter when value is not default */ + if (i8042_reset == I8042_RESET_DEFAULT) { + if (dmi_check_system(i8042_dmi_reset_table)) + i8042_reset = I8042_RESET_ALWAYS; + + if (dmi_check_system(i8042_dmi_noselftest_table)) + i8042_reset = I8042_RESET_NEVER; + } if (dmi_check_system(i8042_dmi_noloop_table)) i8042_noloop = true; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 405252a884dd..89abfdb539ac 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -48,9 +48,39 @@ static bool i8042_unlock; module_param_named(unlock, i8042_unlock, bool, 0); MODULE_PARM_DESC(unlock, "Ignore keyboard lock."); -static bool i8042_reset; -module_param_named(reset, i8042_reset, bool, 0); -MODULE_PARM_DESC(reset, "Reset controller during init and cleanup."); +enum i8042_controller_reset_mode { + I8042_RESET_NEVER, + I8042_RESET_ALWAYS, + I8042_RESET_ON_S2RAM, +#define I8042_RESET_DEFAULT I8042_RESET_ON_S2RAM +}; +static enum i8042_controller_reset_mode i8042_reset = I8042_RESET_DEFAULT; +static int i8042_set_reset(const char *val, const struct kernel_param *kp) +{ + enum i8042_controller_reset_mode *arg = kp->arg; + int error; + bool reset; + + if (val) { + error = kstrtobool(val, &reset); + if (error) + return error; + } else { + reset = true; + } + + *arg = reset ? I8042_RESET_ALWAYS : I8042_RESET_NEVER; + return 0; +} + +static const struct kernel_param_ops param_ops_reset_param = { + .flags = KERNEL_PARAM_OPS_FL_NOARG, + .set = i8042_set_reset, +}; +#define param_check_reset_param(name, p) \ + __param_check(name, p, enum i8042_controller_reset_mode) +module_param_named(reset, i8042_reset, reset_param, 0); +MODULE_PARM_DESC(reset, "Reset controller on resume, cleanup or both"); static bool i8042_direct; module_param_named(direct, i8042_direct, bool, 0); @@ -1019,7 +1049,7 @@ static int i8042_controller_init(void) * Reset the controller and reset CRT to the original value set by BIOS. */ -static void i8042_controller_reset(bool force_reset) +static void i8042_controller_reset(bool s2r_wants_reset) { i8042_flush(); @@ -1044,8 +1074,10 @@ static void i8042_controller_reset(bool force_reset) * Reset the controller if requested. */ - if (i8042_reset || force_reset) + if (i8042_reset == I8042_RESET_ALWAYS || + (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) { i8042_controller_selftest(); + } /* * Restore the original control register setting. @@ -1110,7 +1142,7 @@ static void i8042_dritek_enable(void) * before suspending. */ -static int i8042_controller_resume(bool force_reset) +static int i8042_controller_resume(bool s2r_wants_reset) { int error; @@ -1118,7 +1150,8 @@ static int i8042_controller_resume(bool force_reset) if (error) return error; - if (i8042_reset || force_reset) { + if (i8042_reset == I8042_RESET_ALWAYS || + (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) { error = i8042_controller_selftest(); if (error) return error; @@ -1195,7 +1228,7 @@ static int i8042_pm_resume_noirq(struct device *dev) static int i8042_pm_resume(struct device *dev) { - bool force_reset; + bool want_reset; int i; for (i = 0; i < I8042_NUM_PORTS; i++) { @@ -1218,9 +1251,9 @@ static int i8042_pm_resume(struct device *dev) * off control to the platform firmware, otherwise we can simply restore * the mode. */ - force_reset = pm_resume_via_firmware(); + want_reset = pm_resume_via_firmware(); - return i8042_controller_resume(force_reset); + return i8042_controller_resume(want_reset); } static int i8042_pm_thaw(struct device *dev) @@ -1482,7 +1515,7 @@ static int __init i8042_probe(struct platform_device *dev) i8042_platform_device = dev; - if (i8042_reset) { + if (i8042_reset == I8042_RESET_ALWAYS) { error = i8042_controller_selftest(); if (error) return error; diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c index fb5fb9140ca9..552a3773f79d 100644 --- a/drivers/input/touchscreen/melfas_mip4.c +++ b/drivers/input/touchscreen/melfas_mip4.c @@ -157,6 +157,7 @@ struct mip4_ts { char phys[32]; char product_name[16]; + char ic_name[4]; unsigned int max_x; unsigned int max_y; @@ -263,6 +264,18 @@ static int mip4_query_device(struct mip4_ts *ts) dev_dbg(&ts->client->dev, "product name: %.*s\n", (int)sizeof(ts->product_name), ts->product_name); + /* IC name */ + cmd[0] = MIP4_R0_INFO; + cmd[1] = MIP4_R1_INFO_IC_NAME; + error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), + ts->ic_name, sizeof(ts->ic_name)); + if (error) + dev_warn(&ts->client->dev, + "Failed to retrieve IC name: %d\n", error); + else + dev_dbg(&ts->client->dev, "IC name: %.*s\n", + (int)sizeof(ts->ic_name), ts->ic_name); + /* Firmware version */ error = mip4_get_fw_version(ts); if (error) @@ -1326,7 +1339,7 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev, * paired with current firmware in the chip. */ count = snprintf(buf, PAGE_SIZE, "%.*s\n", - (int)sizeof(ts->product_name), ts->product_name); + (int)sizeof(ts->product_name), ts->product_name); mutex_unlock(&ts->input->mutex); @@ -1335,9 +1348,30 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev, static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL); +static ssize_t mip4_sysfs_read_ic_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct mip4_ts *ts = i2c_get_clientdata(client); + size_t count; + + mutex_lock(&ts->input->mutex); + + count = snprintf(buf, PAGE_SIZE, "%.*s\n", + (int)sizeof(ts->ic_name), ts->ic_name); + + mutex_unlock(&ts->input->mutex); + + return count; +} + +static DEVICE_ATTR(ic_name, S_IRUGO, mip4_sysfs_read_ic_name, NULL); + static struct attribute *mip4_attrs[] = { &dev_attr_fw_version.attr, &dev_attr_hw_version.attr, + &dev_attr_ic_name.attr, &dev_attr_update_fw.attr, NULL, }; @@ -1538,6 +1572,6 @@ static struct i2c_driver mip4_driver = { module_i2c_driver(mip4_driver); MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen"); -MODULE_VERSION("2016.03.12"); +MODULE_VERSION("2016.09.28"); MODULE_AUTHOR("Sangwon Jee <jeesw@melfas.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c index efbf0e4304b7..ebc2b0b15f67 100644 --- a/drivers/irqchip/irq-eznps.c +++ b/drivers/irqchip/irq-eznps.c @@ -85,7 +85,7 @@ static void nps400_irq_eoi_global(struct irq_data *irqd) nps_ack_gic(); } -static void nps400_irq_eoi(struct irq_data *irqd) +static void nps400_irq_ack(struct irq_data *irqd) { unsigned int __maybe_unused irq = irqd_to_hwirq(irqd); @@ -103,7 +103,7 @@ static struct irq_chip nps400_irq_chip_percpu = { .name = "NPS400 IC", .irq_mask = nps400_irq_mask, .irq_unmask = nps400_irq_unmask, - .irq_eoi = nps400_irq_eoi, + .irq_ack = nps400_irq_ack, }; static int nps400_irq_map(struct irq_domain *d, unsigned int virq, diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 9b81bd8b929c..19d642eae096 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -153,7 +153,7 @@ static void gic_enable_redist(bool enable) return; /* No PM support in this redistributor */ } - while (count--) { + while (--count) { val = readl_relaxed(rbase + GICR_WAKER); if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) break; diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index 6b304eb39bd2..1aec12c6d9ac 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -38,6 +38,7 @@ static void disable_8259A_irq(struct irq_data *d); static void enable_8259A_irq(struct irq_data *d); static void mask_and_ack_8259A(struct irq_data *d); static void init_8259A(int auto_eoi); +static int (*i8259_poll)(void) = i8259_irq; static struct irq_chip i8259A_chip = { .name = "XT-PIC", @@ -51,6 +52,11 @@ static struct irq_chip i8259A_chip = { * 8259A PIC functions to handle ISA devices: */ +void i8259_set_poll(int (*poll)(void)) +{ + i8259_poll = poll; +} + /* * This contains the irq mask for both 8259A irq controllers, */ @@ -89,24 +95,6 @@ static void enable_8259A_irq(struct irq_data *d) raw_spin_unlock_irqrestore(&i8259A_lock, flags); } -int i8259A_irq_pending(unsigned int irq) -{ - unsigned int mask; - unsigned long flags; - int ret; - - irq -= I8259A_IRQ_BASE; - mask = 1 << irq; - raw_spin_lock_irqsave(&i8259A_lock, flags); - if (irq < 8) - ret = inb(PIC_MASTER_CMD) & mask; - else - ret = inb(PIC_SLAVE_CMD) & (mask >> 8); - raw_spin_unlock_irqrestore(&i8259A_lock, flags); - - return ret; -} - void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); @@ -355,7 +343,7 @@ void __init init_i8259_irqs(void) static void i8259_irq_dispatch(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); - int hwirq = i8259_irq(); + int hwirq = i8259_poll(); unsigned int irq; if (hwirq < 0) @@ -370,13 +358,15 @@ int __init i8259_of_init(struct device_node *node, struct device_node *parent) struct irq_domain *domain; unsigned int parent_irq; + domain = __init_i8259_irqs(node); + parent_irq = irq_of_parse_and_map(node, 0); if (!parent_irq) { pr_err("Failed to map i8259 parent IRQ\n"); + irq_domain_remove(domain); return -ENODEV; } - domain = __init_i8259_irqs(node); irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch, domain); return 0; diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c index 84b01dec277d..033bccb41455 100644 --- a/drivers/irqchip/irq-jcore-aic.c +++ b/drivers/irqchip/irq-jcore-aic.c @@ -25,12 +25,30 @@ static struct irq_chip jcore_aic; +/* + * The J-Core AIC1 and AIC2 are cpu-local interrupt controllers and do + * not distinguish or use distinct irq number ranges for per-cpu event + * interrupts (timer, IPI). Since information to determine whether a + * particular irq number should be treated as per-cpu is not available + * at mapping time, we use a wrapper handler function which chooses + * the right handler at runtime based on whether IRQF_PERCPU was used + * when requesting the irq. + */ + +static void handle_jcore_irq(struct irq_desc *desc) +{ + if (irqd_is_per_cpu(irq_desc_get_irq_data(desc))) + handle_percpu_irq(desc); + else + handle_simple_irq(desc); +} + static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct irq_chip *aic = d->host_data; - irq_set_chip_and_handler(irq, aic, handle_simple_irq); + irq_set_chip_and_handler(irq, aic, handle_jcore_irq); return 0; } diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index 29b3436d0910..367523a3c774 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig @@ -27,7 +27,7 @@ config VIDEO_FIXED_MINOR_RANGES config VIDEO_PCI_SKELETON tristate "Skeleton PCI V4L2 driver" - depends on PCI && BUILD_DOCSRC + depends on PCI depends on VIDEO_V4L2 && VIDEOBUF2_CORE depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG ---help--- diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 3f31ca32f52b..5fa36ebc0640 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -471,9 +471,9 @@ static int bond_check_dev_link(struct bonding *bond, /* Yes, the mii is overlaid on the ifreq.ifr_ifru */ strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ); mii = if_mii(&ifr); - if (IOCTL(slave_dev, &ifr, SIOCGMIIPHY) == 0) { + if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) { mii->reg_num = MII_BMSR; - if (IOCTL(slave_dev, &ifr, SIOCGMIIREG) == 0) + if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0) return mii->val_out & BMSR_LSTATUS; } } diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index bddb198c0b74..380a64115a98 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -693,7 +693,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct) while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) && - loop--) { + --loop) { reg_val = octeon_read_csr64( oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index e28d960997af..2d0cb609adc3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -207,6 +207,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) int ret; char *mac_addr = (char *)addr; struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); + u8 port_num; assert(mac_cb); @@ -221,8 +222,11 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) return ret; } - ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM, - mac_addr, true); + ret = hns_mac_get_inner_port_num(mac_cb, handle->vf_id, &port_num); + if (ret) + return ret; + + ret = hns_mac_set_multi(mac_cb, port_num, mac_addr, true); if (ret) dev_err(handle->owner_dev, "mac add mul_mac:%pM port%d fail, ret = %#x!\n", @@ -678,9 +682,6 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, ret = -EINVAL; } - if (!ret) - hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en); - return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 22e141005cd9..ec8c738af726 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -141,9 +141,10 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) *@port_num:port number * */ -static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, - u8 vmid, u8 *port_num) +int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num) { + int q_num_per_vf, vf_num_per_port; + int vm_queue_id; u8 tmp_port; if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) { @@ -174,6 +175,12 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, return -EINVAL; } + q_num_per_vf = mac_cb->dsaf_dev->rcb_common[0]->max_q_per_vf; + vf_num_per_port = mac_cb->dsaf_dev->rcb_common[0]->max_vfn; + + vm_queue_id = vmid * q_num_per_vf + + vf_num_per_port * q_num_per_vf * mac_cb->mac_id; + switch (mac_cb->dsaf_dev->dsaf_mode) { case DSAF_MODE_ENABLE_FIX: tmp_port = 0; @@ -193,7 +200,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, case DSAF_MODE_DISABLE_6PORT_2VM: case DSAF_MODE_DISABLE_6PORT_4VM: case DSAF_MODE_DISABLE_6PORT_16VM: - tmp_port = vmid; + tmp_port = vm_queue_id; break; default: dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n", diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 4cbdf14f5c16..d3a1f72ece0e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -461,5 +461,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb); int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb, enum hnae_led_state status); void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en); +int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, + u8 vmid, u8 *port_num); #endif /* _HNS_DSAF_MAC_H */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 8e5b3f51b47b..8d70377f6624 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -760,16 +760,6 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) DSAF_CFG_MIX_MODE_S, !!en); } -void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en) -{ - if (AE_IS_VER1(dsaf_dev->dsaf_ver) || - dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG) - return; - - dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, - DSAFV2_SERDES_LBK_EN_B, !!en); -} - /** * hns_dsaf_tbl_stat_en - tbl * @dsaf_id: dsa fabric id diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 35df187e66f1..c494fc52be74 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -466,6 +466,5 @@ void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, u32 *en); int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, u32 en); -void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en); #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index ef1107777c08..f0ed80d6ef9c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -543,6 +543,22 @@ int hns_rcb_set_coalesce_usecs( "error: coalesce_usecs setting supports 0~1023us\n"); return -EINVAL; } + + if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { + if (timeout == 0) + /* set timeout to 0, Disable gap time */ + dsaf_set_reg_field(rcb_common->io_base, + RCB_INT_GAP_TIME_REG + port_idx * 4, + PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B, + 0); + else + /* set timeout non 0, restore gap time to 1 */ + dsaf_set_reg_field(rcb_common->io_base, + RCB_INT_GAP_TIME_REG + port_idx * 4, + PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B, + 1); + } + hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 4b8b803822d1..878950a42e6c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -417,6 +417,7 @@ #define RCB_CFG_OVERTIME_REG 0x9300 #define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304 #define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308 +#define RCB_INT_GAP_TIME_REG 0x9400 #define RCB_PORT_CFG_OVERTIME_REG 0x9430 #define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000 @@ -898,6 +899,9 @@ #define PPE_CNT_CLR_CE_B 0 #define PPE_CNT_CLR_SNAP_EN_B 1 +#define PPE_INT_GAPTIME_B 0 +#define PPE_INT_GAPTIME_M 0x3ff + #define PPE_COMMON_CNT_CLR_CE_B 0 #define PPE_COMMON_CNT_CLR_SNAP_EN_B 1 #define RCB_COM_TSO_MODE_B 0 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 059aaeda46b1..dff7b60345d8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -574,7 +574,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, struct sk_buff *skb; struct hnae_desc *desc; struct hnae_desc_cb *desc_cb; - struct ethhdr *eh; unsigned char *va; int bnum, length, i; int pull_len; @@ -600,7 +599,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, ring->stats.sw_err_cnt++; return -ENOMEM; } - skb_reset_mac_header(skb); prefetchw(skb->data); length = le16_to_cpu(desc->rx.pkt_len); @@ -682,14 +680,6 @@ out_bnum_err: return -EFAULT; } - /* filter out multicast pkt with the same src mac as this port */ - eh = eth_hdr(skb); - if (unlikely(is_multicast_ether_addr(eh->h_dest) && - ether_addr_equal(ndev->dev_addr, eh->h_source))) { - dev_kfree_skb_any(skb); - return -EFAULT; - } - ring->stats.rx_pkts++; ring->stats.rx_bytes += skb->len; @@ -747,25 +737,37 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, ndev->last_rx = jiffies; } +static int hns_desc_unused(struct hnae_ring *ring) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + + return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; +} + static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, int budget, void *v) { struct hnae_ring *ring = ring_data->ring; struct sk_buff *skb; - int num, bnum, ex_num; + int num, bnum; #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 int recv_pkts, recv_bds, clean_count, err; + int unused_count = hns_desc_unused(ring); num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); rmb(); /* make sure num taken effect before the other data is touched */ recv_pkts = 0, recv_bds = 0, clean_count = 0; -recv: + num -= unused_count; + while (recv_pkts < budget && recv_bds < num) { /* reuse or realloc buffers */ - if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { - hns_nic_alloc_rx_buffers(ring_data, clean_count); + if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { + hns_nic_alloc_rx_buffers(ring_data, + clean_count + unused_count); clean_count = 0; + unused_count = hns_desc_unused(ring); } /* poll one pkt */ @@ -786,21 +788,11 @@ recv: recv_pkts++; } - /* make all data has been write before submit */ - if (recv_pkts < budget) { - ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - - if (ex_num > clean_count) { - num += ex_num - clean_count; - rmb(); /*complete read rx ring bd number*/ - goto recv; - } - } - out: /* make all data has been write before submit */ - if (clean_count > 0) - hns_nic_alloc_rx_buffers(ring_data, clean_count); + if (clean_count + unused_count > 0) + hns_nic_alloc_rx_buffers(ring_data, + clean_count + unused_count); return recv_pkts; } @@ -810,6 +802,8 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) struct hnae_ring *ring = ring_data->ring; int num = 0; + ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); + /* for hardware bug fixed */ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); @@ -821,6 +815,20 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) } } +static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) +{ + struct hnae_ring *ring = ring_data->ring; + int num = 0; + + num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); + + if (num == 0) + ring_data->ring->q->handle->dev->ops->toggle_ring_irq( + ring, 0); + else + napi_schedule(&ring_data->napi); +} + static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, int *bytes, int *pkts) { @@ -922,7 +930,11 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; - int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); + int head; + + ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); + + head = readl_relaxed(ring->io_base + RCB_REG_HEAD); if (head != ring->next_to_clean) { ring_data->ring->q->handle->dev->ops->toggle_ring_irq( @@ -932,6 +944,18 @@ static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) } } +static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data) +{ + struct hnae_ring *ring = ring_data->ring; + int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); + + if (head == ring->next_to_clean) + ring_data->ring->q->handle->dev->ops->toggle_ring_irq( + ring, 0); + else + napi_schedule(&ring_data->napi); +} + static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; @@ -963,10 +987,7 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget) if (clean_complete >= 0 && clean_complete < budget) { napi_complete(napi); - ring_data->ring->q->handle->dev->ops->toggle_ring_irq( - ring_data->ring, 0); - if (ring_data->fini_process) - ring_data->fini_process(ring_data); + ring_data->fini_process(ring_data); return 0; } @@ -1562,6 +1583,21 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, return stats; } +static u16 +hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; + struct hns_nic_priv *priv = netdev_priv(ndev); + + /* fix hardware broadcast/multicast packets queue loopback */ + if (!AE_IS_VER1(priv->enet_ver) && + is_multicast_ether_addr(eth_hdr->h_dest)) + return 0; + else + return fallback(ndev, skb); +} + static const struct net_device_ops hns_nic_netdev_ops = { .ndo_open = hns_nic_net_open, .ndo_stop = hns_nic_net_stop, @@ -1577,6 +1613,7 @@ static const struct net_device_ops hns_nic_netdev_ops = { .ndo_poll_controller = hns_nic_poll_controller, #endif .ndo_set_rx_mode = hns_nic_set_rx_mode, + .ndo_select_queue = hns_nic_select_queue, }; static void hns_nic_update_link_status(struct net_device *netdev) @@ -1738,7 +1775,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) rd->queue_index = i; rd->ring = &h->qs[i]->tx_ring; rd->poll_one = hns_nic_tx_poll_one; - rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL; + rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : + hns_nic_tx_fini_pro_v2; netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); @@ -1750,7 +1788,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) rd->ring = &h->qs[i - h->q_num]->rx_ring; rd->poll_one = hns_nic_rx_poll_one; rd->ex_process = hns_nic_rx_up_pro; - rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL; + rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : + hns_nic_rx_fini_pro_v2; netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 47e59bbfd061..87d5c94b2810 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -352,6 +352,13 @@ static int __lb_setup(struct net_device *ndev, break; } + if (!ret) { + if (loop == MAC_LOOP_NONE) + h->dev->ops->set_promisc_mode( + h, ndev->flags & IFF_PROMISC); + else + h->dev->ops->set_promisc_mode(h, 1); + } return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index d4585154151d..cc4fd61914d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -287,7 +287,7 @@ retry: goto retry; } - MLX5_SET64(manage_pages_in, in, pas[i], addr); + MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr); } MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); @@ -344,7 +344,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, if (fwp->func_id != func_id) continue; - MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr); + MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr); i++; } diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 0df1391f9663..1e8339a67f6e 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -107,15 +107,4 @@ config QEDE ---help--- This enables the support for ... -config INFINIBAND_QEDR - tristate "QLogic qede RoCE sources [debug]" - depends on QEDE && 64BIT - select QED_LL2 - default n - ---help--- - This provides a temporary node that allows the compilation - and logical testing of the InfiniBand over Ethernet support - for QLogic QED. This would be replaced by the 'real' option - once the QEDR driver is added [+relocated]. - endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index a6db10717d5c..02a8be2faed7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1517,7 +1517,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev, static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) { struct qed_ll2_info ll2_info; - struct qed_ll2_buffer *buffer; + struct qed_ll2_buffer *buffer, *tmp_buffer; enum qed_ll2_conn_type conn_type; struct qed_ptt *p_ptt; int rc, i; @@ -1587,7 +1587,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) /* Post all Rx buffers to FW */ spin_lock_bh(&cdev->ll2->lock); - list_for_each_entry(buffer, &cdev->ll2->list, list) { + list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) { rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle, buffer->phys_addr, 0, buffer, 1); diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 23430059471c..76831a398bed 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -2947,7 +2947,7 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { .roce_ll2_stats = &qed_roce_ll2_stats, }; -const struct qed_rdma_ops *qed_get_rdma_ops() +const struct qed_rdma_ops *qed_get_rdma_ops(void) { return &qed_rdma_ops_pass; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 4c8c60af7985..6c85b61aaa0b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -650,20 +650,27 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) if (IS_ERR(priv->clk_ptp_ref)) { priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk); priv->clk_ptp_ref = NULL; + netdev_dbg(priv->dev, "PTP uses main clock\n"); } else { clk_prepare_enable(priv->clk_ptp_ref); priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref); + netdev_dbg(priv->dev, "PTP rate %d\n", priv->clk_ptp_rate); } priv->adv_ts = 0; - if (priv->dma_cap.atime_stamp && priv->extend_desc) + /* Check if adv_ts can be enabled for dwmac 4.x core */ + if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp) + priv->adv_ts = 1; + /* Dwmac 3.x core with extend_desc can support adv_ts */ + else if (priv->extend_desc && priv->dma_cap.atime_stamp) priv->adv_ts = 1; - if (netif_msg_hw(priv) && priv->dma_cap.time_stamp) - pr_debug("IEEE 1588-2002 Time Stamp supported\n"); + if (priv->dma_cap.time_stamp) + netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); - if (netif_msg_hw(priv) && priv->adv_ts) - pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n"); + if (priv->adv_ts) + netdev_info(priv->dev, + "IEEE 1588-2008 Advanced Timestamp supported\n"); priv->hw->ptp = &stmmac_ptp; priv->hwts_tx_en = 0; @@ -1702,8 +1709,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) if (init_ptp) { ret = stmmac_init_ptp(priv); - if (ret && ret != -EOPNOTSUPP) - pr_warn("%s: failed PTP initialisation\n", __func__); + if (ret) + netdev_warn(priv->dev, "PTP support cannot init.\n"); } #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 6e3b82972ce8..289d52725a6c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -186,10 +186,12 @@ int stmmac_ptp_register(struct stmmac_priv *priv) priv->device); if (IS_ERR(priv->ptp_clock)) { priv->ptp_clock = NULL; - pr_err("ptp_clock_register() failed on %s\n", priv->dev->name); - } else if (priv->ptp_clock) - pr_debug("Added PTP HW clock successfully on %s\n", - priv->dev->name); + return PTR_ERR(priv->ptp_clock); + } + + spin_lock_init(&priv->ptp_lock); + + netdev_dbg(priv->dev, "Added PTP HW clock successfully\n"); return 0; } diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index ece0ea0f6b38..6c7ec1ddd475 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -610,8 +610,8 @@ err_out_regions: #ifdef CONFIG_PCI if (pdev) pci_release_regions(pdev); -#endif err_out: +#endif if (pdev) pci_disable_device(pdev); return rc; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 35f9f9742a48..c688d68c39aa 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -431,8 +431,7 @@ static void axienet_setoptions(struct net_device *ndev, u32 options) lp->options |= options; } -static void __axienet_device_reset(struct axienet_local *lp, - struct device *dev, off_t offset) +static void __axienet_device_reset(struct axienet_local *lp, off_t offset) { u32 timeout; /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset @@ -468,8 +467,8 @@ static void axienet_device_reset(struct net_device *ndev) u32 axienet_status; struct axienet_local *lp = netdev_priv(ndev); - __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); - __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); + __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET); + __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET); lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; lp->options |= XAE_OPTION_VLAN; @@ -1338,8 +1337,8 @@ static void axienet_dma_err_handler(unsigned long data) axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & ~XAE_MDIO_MC_MDIOEN_MASK)); - __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); - __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); + __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET); + __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET); axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); axienet_mdio_wait_until_ready(lp); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 52eeb2f67276..f0919bd3a563 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -442,8 +442,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) } net_trans_info = get_net_transport_info(skb, &hdr_offset); - if (net_trans_info == TRANSPORT_INFO_NOT_IP) - goto do_send; /* * Setup the sendside checksum offload only if this is not a @@ -478,56 +476,29 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) } lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset; lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; - goto do_send; - } - - if ((skb->ip_summed == CHECKSUM_NONE) || - (skb->ip_summed == CHECKSUM_UNNECESSARY)) - goto do_send; - - rndis_msg_size += NDIS_CSUM_PPI_SIZE; - ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, - TCPIP_CHKSUM_PKTINFO); - - csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi + - ppi->ppi_offset); - - if (net_trans_info & (INFO_IPV4 << 16)) - csum_info->transmit.is_ipv4 = 1; - else - csum_info->transmit.is_ipv6 = 1; - - if (net_trans_info & INFO_TCP) { - csum_info->transmit.tcp_checksum = 1; - csum_info->transmit.tcp_header_offset = hdr_offset; - } else if (net_trans_info & INFO_UDP) { - /* UDP checksum offload is not supported on ws2008r2. - * Furthermore, on ws2012 and ws2012r2, there are some - * issues with udp checksum offload from Linux guests. - * (these are host issues). - * For now compute the checksum here. - */ - struct udphdr *uh; - u16 udp_len; - - ret = skb_cow_head(skb, 0); - if (ret) - goto no_memory; - - uh = udp_hdr(skb); - udp_len = ntohs(uh->len); - uh->check = 0; - uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr, - ip_hdr(skb)->daddr, - udp_len, IPPROTO_UDP, - csum_partial(uh, udp_len, 0)); - if (uh->check == 0) - uh->check = CSUM_MANGLED_0; - - csum_info->transmit.udp_checksum = 0; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (net_trans_info & INFO_TCP) { + rndis_msg_size += NDIS_CSUM_PPI_SIZE; + ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, + TCPIP_CHKSUM_PKTINFO); + + csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi + + ppi->ppi_offset); + + if (net_trans_info & (INFO_IPV4 << 16)) + csum_info->transmit.is_ipv4 = 1; + else + csum_info->transmit.is_ipv6 = 1; + + csum_info->transmit.tcp_checksum = 1; + csum_info->transmit.tcp_header_offset = hdr_offset; + } else { + /* UDP checksum (and other) offload is not supported. */ + if (skb_checksum_help(skb)) + goto drop; + } } -do_send: /* Start filling in the page buffers with the rndis hdr */ rndis_msg->msg_len += rndis_msg_size; packet->total_data_buflen = rndis_msg->msg_len; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index c6f66832a1a6..f424b867f73e 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -608,6 +608,21 @@ void phy_start_machine(struct phy_device *phydev) } /** + * phy_trigger_machine - trigger the state machine to run + * + * @phydev: the phy_device struct + * + * Description: There has been a change in state which requires that the + * state machine runs. + */ + +static void phy_trigger_machine(struct phy_device *phydev) +{ + cancel_delayed_work_sync(&phydev->state_queue); + queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); +} + +/** * phy_stop_machine - stop the PHY state machine tracking * @phydev: target phy_device struct * @@ -639,6 +654,8 @@ static void phy_error(struct phy_device *phydev) mutex_lock(&phydev->lock); phydev->state = PHY_HALTED; mutex_unlock(&phydev->lock); + + phy_trigger_machine(phydev); } /** @@ -800,8 +817,7 @@ void phy_change(struct work_struct *work) } /* reschedule state queue work to run as soon as possible */ - cancel_delayed_work_sync(&phydev->state_queue); - queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); + phy_trigger_machine(phydev); return; ignore: @@ -890,6 +906,8 @@ void phy_start(struct phy_device *phydev) /* if phy was suspended, bring the physical link up again */ if (do_resume) phy_resume(phydev); + + phy_trigger_machine(phydev); } EXPORT_SYMBOL(phy_start); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 9d1fce8a6e84..3ff76c6db4f6 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -59,6 +59,10 @@ enum qmi_wwan_flags { QMI_WWAN_FLAG_RAWIP = 1 << 0, }; +enum qmi_wwan_quirks { + QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */ +}; + static void qmi_wwan_netdev_setup(struct net_device *net) { struct usbnet *dev = netdev_priv(net); @@ -411,9 +415,14 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) * clearing out state the clients might need. * * MDM9x30 is the first QMI chipset with USB3 support. Abuse - * this fact to enable the quirk. + * this fact to enable the quirk for all USB3 devices. + * + * There are also chipsets with the same "set DTR" requirement + * but without USB3 support. Devices based on these chips + * need a quirk flag in the device ID table. */ - if (le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) { + if (dev->driver_info->data & QMI_WWAN_QUIRK_DTR || + le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) { qmi_wwan_manage_power(dev, 1); qmi_wwan_change_dtr(dev, true); } @@ -526,6 +535,16 @@ static const struct driver_info qmi_wwan_info = { .rx_fixup = qmi_wwan_rx_fixup, }; +static const struct driver_info qmi_wwan_info_quirk_dtr = { + .description = "WWAN/QMI device", + .flags = FLAG_WWAN, + .bind = qmi_wwan_bind, + .unbind = qmi_wwan_unbind, + .manage_power = qmi_wwan_manage_power, + .rx_fixup = qmi_wwan_rx_fixup, + .data = QMI_WWAN_QUIRK_DTR, +}; + #define HUAWEI_VENDOR_ID 0x12D1 /* map QMI/wwan function by a fixed interface number */ @@ -533,6 +552,11 @@ static const struct driver_info qmi_wwan_info = { USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ .driver_info = (unsigned long)&qmi_wwan_info +/* devices requiring "set DTR" quirk */ +#define QMI_QUIRK_SET_DTR(vend, prod, num) \ + USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ + .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr + /* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */ #define QMI_GOBI1K_DEVICE(vend, prod) \ QMI_FIXED_INTF(vend, prod, 3) @@ -895,6 +919,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index cf68149cbb55..3ce1f7da8647 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -407,4 +407,8 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb); +#ifdef CONFIG_DEBUG_FS +void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m); +#endif + #endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index 613bac057650..e8c5dddc54ba 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -360,6 +360,74 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, return XEN_NETIF_CTRL_STATUS_SUCCESS; } +#ifdef CONFIG_DEBUG_FS +void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m) +{ + unsigned int i; + + switch (vif->hash.alg) { + case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ: + seq_puts(m, "Hash Algorithm: TOEPLITZ\n"); + break; + + case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE: + seq_puts(m, "Hash Algorithm: NONE\n"); + /* FALLTHRU */ + default: + return; + } + + if (vif->hash.flags) { + seq_puts(m, "\nHash Flags:\n"); + + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) + seq_puts(m, "- IPv4\n"); + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP) + seq_puts(m, "- IPv4 + TCP\n"); + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) + seq_puts(m, "- IPv6\n"); + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP) + seq_puts(m, "- IPv6 + TCP\n"); + } + + seq_puts(m, "\nHash Key:\n"); + + for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) { + unsigned int j, n; + + n = 8; + if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE) + n = XEN_NETBK_MAX_HASH_KEY_SIZE - i; + + seq_printf(m, "[%2u - %2u]: ", i, i + n - 1); + + for (j = 0; j < n; j++, i++) + seq_printf(m, "%02x ", vif->hash.key[i]); + + seq_puts(m, "\n"); + } + + if (vif->hash.size != 0) { + seq_puts(m, "\nHash Mapping:\n"); + + for (i = 0; i < vif->hash.size; ) { + unsigned int j, n; + + n = 8; + if (i + n >= vif->hash.size) + n = vif->hash.size - i; + + seq_printf(m, "[%4u - %4u]: ", i, i + n - 1); + + for (j = 0; j < n; j++, i++) + seq_printf(m, "%4u ", vif->hash.mapping[i]); + + seq_puts(m, "\n"); + } + } +} +#endif /* CONFIG_DEBUG_FS */ + void xenvif_init_hash(struct xenvif *vif) { if (xenvif_hash_cache_size == 0) diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index 8e9ade6ccf18..b1cf7c6f407a 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -337,9 +337,9 @@ static void xenvif_rx_next_chunk(struct xenvif_queue *queue, frag_data += pkt->frag_offset; frag_len -= pkt->frag_offset; - chunk_len = min(frag_len, XEN_PAGE_SIZE - offset); - chunk_len = min(chunk_len, - XEN_PAGE_SIZE - xen_offset_in_page(frag_data)); + chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset); + chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE - + xen_offset_in_page(frag_data)); pkt->frag_offset += chunk_len; @@ -425,6 +425,8 @@ void xenvif_rx_skb(struct xenvif_queue *queue) xenvif_rx_next_skb(queue, &pkt); + queue->last_rx_time = jiffies; + do { struct xen_netif_rx_request *req; struct xen_netif_rx_response *rsp; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 7056404e3cb8..8674e188b697 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -165,7 +165,7 @@ xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count, return count; } -static int xenvif_dump_open(struct inode *inode, struct file *filp) +static int xenvif_io_ring_open(struct inode *inode, struct file *filp) { int ret; void *queue = NULL; @@ -179,13 +179,35 @@ static int xenvif_dump_open(struct inode *inode, struct file *filp) static const struct file_operations xenvif_dbg_io_ring_ops_fops = { .owner = THIS_MODULE, - .open = xenvif_dump_open, + .open = xenvif_io_ring_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = xenvif_write_io_ring, }; +static int xenvif_read_ctrl(struct seq_file *m, void *v) +{ + struct xenvif *vif = m->private; + + xenvif_dump_hash_info(vif, m); + + return 0; +} + +static int xenvif_ctrl_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, xenvif_read_ctrl, inode->i_private); +} + +static const struct file_operations xenvif_dbg_ctrl_ops_fops = { + .owner = THIS_MODULE, + .open = xenvif_ctrl_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static void xenvif_debugfs_addif(struct xenvif *vif) { struct dentry *pfile; @@ -210,6 +232,17 @@ static void xenvif_debugfs_addif(struct xenvif *vif) pr_warn("Creation of io_ring file returned %ld!\n", PTR_ERR(pfile)); } + + if (vif->ctrl_irq) { + pfile = debugfs_create_file("ctrl", + S_IRUSR, + vif->xenvif_dbg_root, + vif, + &xenvif_dbg_ctrl_ops_fops); + if (IS_ERR_OR_NULL(pfile)) + pr_warn("Creation of ctrl file returned %ld!\n", + PTR_ERR(pfile)); + } } else netdev_warn(vif->dev, "Creation of vif debugfs dir returned %ld!\n", diff --git a/drivers/of/platform.c b/drivers/of/platform.c index f811d2796437..e4bf07d20f9b 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -29,6 +29,7 @@ const struct of_device_id of_default_bus_match_table[] = { { .compatible = "simple-bus", }, { .compatible = "simple-mfd", }, + { .compatible = "isa", }, #ifdef CONFIG_ARM_AMBA { .compatible = "arm,amba-bus", }, #endif /* CONFIG_ARM_AMBA */ diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index e4a5b7ee90cf..4fce494271cc 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c @@ -230,20 +230,20 @@ static int advk_pcie_link_up(struct advk_pcie *pcie) static int advk_pcie_wait_for_link(struct advk_pcie *pcie) { + struct device *dev = &pcie->pdev->dev; int retries; /* check if the link is up or not */ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { if (advk_pcie_link_up(pcie)) { - dev_info(&pcie->pdev->dev, "link up\n"); + dev_info(dev, "link up\n"); return 0; } usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); } - dev_err(&pcie->pdev->dev, "link never came up\n"); - + dev_err(dev, "link never came up\n"); return -ETIMEDOUT; } @@ -376,6 +376,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) static void advk_pcie_check_pio_status(struct advk_pcie *pcie) { + struct device *dev = &pcie->pdev->dev; u32 reg; unsigned int status; char *strcomp_status, *str_posted; @@ -407,12 +408,13 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie) else str_posted = "Posted"; - dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n", + dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n", str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS)); } static int advk_pcie_wait_pio(struct advk_pcie *pcie) { + struct device *dev = &pcie->pdev->dev; unsigned long timeout; timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS); @@ -426,7 +428,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie) return 0; } - dev_err(&pcie->pdev->dev, "config read/write timed out\n"); + dev_err(dev, "config read/write timed out\n"); return -ETIMEDOUT; } @@ -560,10 +562,11 @@ static int advk_pcie_alloc_msi(struct advk_pcie *pcie) static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq) { + struct device *dev = &pcie->pdev->dev; + mutex_lock(&pcie->msi_used_lock); if (!test_bit(hwirq, pcie->msi_irq_in_use)) - dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n", - hwirq); + dev_err(dev, "trying to free unused MSI#%d\n", hwirq); else clear_bit(hwirq, pcie->msi_irq_in_use); mutex_unlock(&pcie->msi_used_lock); @@ -910,6 +913,7 @@ out_release_res: static int advk_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct advk_pcie *pcie; struct resource *res; struct pci_bus *bus, *child; @@ -917,31 +921,29 @@ static int advk_pcie_probe(struct platform_device *pdev) struct device_node *msi_node; int ret, irq; - pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie), - GFP_KERNEL); + pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pcie->pdev = pdev; - platform_set_drvdata(pdev, pcie); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pcie->base = devm_ioremap_resource(&pdev->dev, res); + pcie->base = devm_ioremap_resource(dev, res); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); irq = platform_get_irq(pdev, 0); - ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler, + ret = devm_request_irq(dev, irq, advk_pcie_irq_handler, IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie", pcie); if (ret) { - dev_err(&pdev->dev, "Failed to register interrupt\n"); + dev_err(dev, "Failed to register interrupt\n"); return ret; } ret = advk_pcie_parse_request_of_pci_ranges(pcie); if (ret) { - dev_err(&pdev->dev, "Failed to parse resources\n"); + dev_err(dev, "Failed to parse resources\n"); return ret; } @@ -949,24 +951,24 @@ static int advk_pcie_probe(struct platform_device *pdev) ret = advk_pcie_init_irq_domain(pcie); if (ret) { - dev_err(&pdev->dev, "Failed to initialize irq\n"); + dev_err(dev, "Failed to initialize irq\n"); return ret; } ret = advk_pcie_init_msi_irq_domain(pcie); if (ret) { - dev_err(&pdev->dev, "Failed to initialize irq\n"); + dev_err(dev, "Failed to initialize irq\n"); advk_pcie_remove_irq_domain(pcie); return ret; } - msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0); + msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0); if (msi_node) msi = of_pci_find_msi_chip_by_node(msi_node); else msi = NULL; - bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops, + bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops, pcie, &pcie->resources, &pcie->msi); if (!bus) { advk_pcie_remove_msi_irq_domain(pcie); @@ -980,7 +982,6 @@ static int advk_pcie_probe(struct platform_device *pdev) pcie_bus_configure_settings(child); pci_bus_add_devices(bus); - return 0; } diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c index 19223ed2e619..9595fad63f6f 100644 --- a/drivers/pci/host/pci-dra7xx.c +++ b/drivers/pci/host/pci-dra7xx.c @@ -64,11 +64,10 @@ #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF struct dra7xx_pcie { - void __iomem *base; - struct phy **phy; - int phy_count; - struct device *dev; struct pcie_port pp; + void __iomem *base; /* DT ti_conf */ + int phy_count; /* DT phy-names count */ + struct phy **phy; }; #define to_dra7xx_pcie(x) container_of((x), struct dra7xx_pcie, pp) @@ -84,17 +83,6 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, writel(value, pcie->base + offset); } -static inline u32 dra7xx_pcie_readl_rc(struct pcie_port *pp, u32 offset) -{ - return readl(pp->dbi_base + offset); -} - -static inline void dra7xx_pcie_writel_rc(struct pcie_port *pp, u32 offset, - u32 value) -{ - writel(value, pp->dbi_base + offset); -} - static int dra7xx_pcie_link_up(struct pcie_port *pp) { struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); @@ -103,13 +91,14 @@ static int dra7xx_pcie_link_up(struct pcie_port *pp) return !!(reg & LINK_UP); } -static int dra7xx_pcie_establish_link(struct pcie_port *pp) +static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx) { - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); + struct pcie_port *pp = &dra7xx->pp; + struct device *dev = pp->dev; u32 reg; if (dw_pcie_link_up(pp)) { - dev_err(pp->dev, "link is already up\n"); + dev_err(dev, "link is already up\n"); return 0; } @@ -120,10 +109,8 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp) return dw_pcie_wait_for_link(pp); } -static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) +static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) { - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, ~INTERRUPTS); dra7xx_pcie_writel(dra7xx, @@ -142,6 +129,8 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) static void dra7xx_pcie_host_init(struct pcie_port *pp) { + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); + pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR; pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR; pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR; @@ -149,10 +138,10 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp) dw_pcie_setup_rc(pp); - dra7xx_pcie_establish_link(pp); + dra7xx_pcie_establish_link(dra7xx); if (IS_ENABLED(CONFIG_PCI_MSI)) dw_pcie_msi_init(pp); - dra7xx_pcie_enable_interrupts(pp); + dra7xx_pcie_enable_interrupts(dra7xx); } static struct pcie_host_ops dra7xx_pcie_host_ops = { @@ -196,8 +185,8 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) { - struct pcie_port *pp = arg; - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); + struct dra7xx_pcie *dra7xx = arg; + struct pcie_port *pp = &dra7xx->pp; u32 reg; reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); @@ -223,51 +212,51 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) { struct dra7xx_pcie *dra7xx = arg; + struct device *dev = dra7xx->pp.dev; u32 reg; reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); if (reg & ERR_SYS) - dev_dbg(dra7xx->dev, "System Error\n"); + dev_dbg(dev, "System Error\n"); if (reg & ERR_FATAL) - dev_dbg(dra7xx->dev, "Fatal Error\n"); + dev_dbg(dev, "Fatal Error\n"); if (reg & ERR_NONFATAL) - dev_dbg(dra7xx->dev, "Non Fatal Error\n"); + dev_dbg(dev, "Non Fatal Error\n"); if (reg & ERR_COR) - dev_dbg(dra7xx->dev, "Correctable Error\n"); + dev_dbg(dev, "Correctable Error\n"); if (reg & ERR_AXI) - dev_dbg(dra7xx->dev, "AXI tag lookup fatal Error\n"); + dev_dbg(dev, "AXI tag lookup fatal Error\n"); if (reg & ERR_ECRC) - dev_dbg(dra7xx->dev, "ECRC Error\n"); + dev_dbg(dev, "ECRC Error\n"); if (reg & PME_TURN_OFF) - dev_dbg(dra7xx->dev, + dev_dbg(dev, "Power Management Event Turn-Off message received\n"); if (reg & PME_TO_ACK) - dev_dbg(dra7xx->dev, + dev_dbg(dev, "Power Management Turn-Off Ack message received\n"); if (reg & PM_PME) - dev_dbg(dra7xx->dev, - "PM Power Management Event message received\n"); + dev_dbg(dev, "PM Power Management Event message received\n"); if (reg & LINK_REQ_RST) - dev_dbg(dra7xx->dev, "Link Request Reset\n"); + dev_dbg(dev, "Link Request Reset\n"); if (reg & LINK_UP_EVT) - dev_dbg(dra7xx->dev, "Link-up state change\n"); + dev_dbg(dev, "Link-up state change\n"); if (reg & CFG_BME_EVT) - dev_dbg(dra7xx->dev, "CFG 'Bus Master Enable' change\n"); + dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); if (reg & CFG_MSE_EVT) - dev_dbg(dra7xx->dev, "CFG 'Memory Space Enable' change\n"); + dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); @@ -278,13 +267,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, struct platform_device *pdev) { int ret; - struct pcie_port *pp; + struct pcie_port *pp = &dra7xx->pp; + struct device *dev = pp->dev; struct resource *res; - struct device *dev = &pdev->dev; - - pp = &dra7xx->pp; - pp->dev = dev; - pp->ops = &dra7xx_pcie_host_ops; pp->irq = platform_get_irq(pdev, 1); if (pp->irq < 0) { @@ -292,12 +277,11 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, return -EINVAL; } - ret = devm_request_irq(&pdev->dev, pp->irq, - dra7xx_pcie_msi_irq_handler, + ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, IRQF_SHARED | IRQF_NO_THREAD, - "dra7-pcie-msi", pp); + "dra7-pcie-msi", dra7xx); if (ret) { - dev_err(&pdev->dev, "failed to request irq\n"); + dev_err(dev, "failed to request irq\n"); return ret; } @@ -314,7 +298,7 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, ret = dw_pcie_host_init(pp); if (ret) { - dev_err(dra7xx->dev, "failed to initialize host\n"); + dev_err(dev, "failed to initialize host\n"); return ret; } @@ -332,6 +316,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) void __iomem *base; struct resource *res; struct dra7xx_pcie *dra7xx; + struct pcie_port *pp; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; char name[10]; @@ -343,6 +328,10 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) if (!dra7xx) return -ENOMEM; + pp = &dra7xx->pp; + pp->dev = dev; + pp->ops = &dra7xx_pcie_host_ops; + irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "missing IRQ resource\n"); @@ -390,7 +379,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) dra7xx->base = base; dra7xx->phy = phy; - dra7xx->dev = dev; dra7xx->phy_count = phy_count; pm_runtime_enable(dev); @@ -407,7 +395,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) ret = devm_gpio_request_one(dev, gpio_sel, gpio_flags, "pcie_reset"); if (ret) { - dev_err(&pdev->dev, "gpio%d request failed, ret %d\n", + dev_err(dev, "gpio%d request failed, ret %d\n", gpio_sel, ret); goto err_gpio; } @@ -420,12 +408,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) reg &= ~LTSSM_EN; dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); - platform_set_drvdata(pdev, dra7xx); - ret = dra7xx_add_pcie_port(dra7xx, pdev); if (ret < 0) goto err_gpio; + platform_set_drvdata(pdev, dra7xx); return 0; err_gpio: @@ -451,9 +438,9 @@ static int dra7xx_pcie_suspend(struct device *dev) u32 val; /* clear MSE */ - val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND); + val = dw_pcie_readl_rc(pp, PCI_COMMAND); val &= ~PCI_COMMAND_MEMORY; - dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val); + dw_pcie_writel_rc(pp, PCI_COMMAND, val); return 0; } @@ -465,9 +452,9 @@ static int dra7xx_pcie_resume(struct device *dev) u32 val; /* set MSE */ - val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND); + val = dw_pcie_readl_rc(pp, PCI_COMMAND); val |= PCI_COMMAND_MEMORY; - dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val); + dw_pcie_writel_rc(pp, PCI_COMMAND, val); return 0; } diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c index 2e2d7f00b9e8..f1c544bb8b68 100644 --- a/drivers/pci/host/pci-exynos.c +++ b/drivers/pci/host/pci-exynos.c @@ -29,13 +29,13 @@ #define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp) struct exynos_pcie { - void __iomem *elbi_base; - void __iomem *phy_base; - void __iomem *block_base; + struct pcie_port pp; + void __iomem *elbi_base; /* DT 0th resource */ + void __iomem *phy_base; /* DT 1st resource */ + void __iomem *block_base; /* DT 2nd resource */ int reset_gpio; struct clk *clk; struct clk *bus_clk; - struct pcie_port pp; }; /* PCIe ELBI registers */ @@ -102,40 +102,40 @@ struct exynos_pcie { #define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7) #define PCIE_PHY_TRSV3_LVCC 0x31c -static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg) +static void exynos_elb_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg) { - writel(val, pcie->elbi_base + reg); + writel(val, exynos_pcie->elbi_base + reg); } -static inline u32 exynos_elb_readl(struct exynos_pcie *pcie, u32 reg) +static u32 exynos_elb_readl(struct exynos_pcie *exynos_pcie, u32 reg) { - return readl(pcie->elbi_base + reg); + return readl(exynos_pcie->elbi_base + reg); } -static inline void exynos_phy_writel(struct exynos_pcie *pcie, u32 val, u32 reg) +static void exynos_phy_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg) { - writel(val, pcie->phy_base + reg); + writel(val, exynos_pcie->phy_base + reg); } -static inline u32 exynos_phy_readl(struct exynos_pcie *pcie, u32 reg) +static u32 exynos_phy_readl(struct exynos_pcie *exynos_pcie, u32 reg) { - return readl(pcie->phy_base + reg); + return readl(exynos_pcie->phy_base + reg); } -static inline void exynos_blk_writel(struct exynos_pcie *pcie, u32 val, u32 reg) +static void exynos_blk_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg) { - writel(val, pcie->block_base + reg); + writel(val, exynos_pcie->block_base + reg); } -static inline u32 exynos_blk_readl(struct exynos_pcie *pcie, u32 reg) +static u32 exynos_blk_readl(struct exynos_pcie *exynos_pcie, u32 reg) { - return readl(pcie->block_base + reg); + return readl(exynos_pcie->block_base + reg); } -static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on) +static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *exynos_pcie, + bool on) { u32 val; - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); if (on) { val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC); @@ -148,10 +148,10 @@ static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on) } } -static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on) +static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *exynos_pcie, + bool on) { u32 val; - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); if (on) { val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC); @@ -164,10 +164,9 @@ static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on) } } -static void exynos_pcie_assert_core_reset(struct pcie_port *pp) +static void exynos_pcie_assert_core_reset(struct exynos_pcie *exynos_pcie) { u32 val; - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET); val &= ~PCIE_CORE_RESET_ENABLE; @@ -177,10 +176,9 @@ static void exynos_pcie_assert_core_reset(struct pcie_port *pp) exynos_elb_writel(exynos_pcie, 0, PCIE_NONSTICKY_RESET); } -static void exynos_pcie_deassert_core_reset(struct pcie_port *pp) +static void exynos_pcie_deassert_core_reset(struct exynos_pcie *exynos_pcie) { u32 val; - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET); val |= PCIE_CORE_RESET_ENABLE; @@ -193,18 +191,14 @@ static void exynos_pcie_deassert_core_reset(struct pcie_port *pp) exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_MAC_RESET); } -static void exynos_pcie_assert_phy_reset(struct pcie_port *pp) +static void exynos_pcie_assert_phy_reset(struct exynos_pcie *exynos_pcie) { - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); - exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_MAC_RESET); exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_GLOBAL_RESET); } -static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp) +static void exynos_pcie_deassert_phy_reset(struct exynos_pcie *exynos_pcie) { - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); - exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_GLOBAL_RESET); exynos_elb_writel(exynos_pcie, 1, PCIE_PWR_RESET); exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET); @@ -213,10 +207,9 @@ static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp) exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET); } -static void exynos_pcie_power_on_phy(struct pcie_port *pp) +static void exynos_pcie_power_on_phy(struct exynos_pcie *exynos_pcie) { u32 val; - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER); val &= ~PCIE_PHY_COMMON_PD_CMN; @@ -239,10 +232,9 @@ static void exynos_pcie_power_on_phy(struct pcie_port *pp) exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER); } -static void exynos_pcie_power_off_phy(struct pcie_port *pp) +static void exynos_pcie_power_off_phy(struct exynos_pcie *exynos_pcie) { u32 val; - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER); val |= PCIE_PHY_COMMON_PD_CMN; @@ -265,10 +257,8 @@ static void exynos_pcie_power_off_phy(struct pcie_port *pp) exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER); } -static void exynos_pcie_init_phy(struct pcie_port *pp) +static void exynos_pcie_init_phy(struct exynos_pcie *exynos_pcie) { - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); - /* DCC feedback control off */ exynos_phy_writel(exynos_pcie, 0x29, PCIE_PHY_DCC_FEEDBACK); @@ -305,51 +295,41 @@ static void exynos_pcie_init_phy(struct pcie_port *pp) exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV3_LVCC); } -static void exynos_pcie_assert_reset(struct pcie_port *pp) +static void exynos_pcie_assert_reset(struct exynos_pcie *exynos_pcie) { - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + struct pcie_port *pp = &exynos_pcie->pp; + struct device *dev = pp->dev; if (exynos_pcie->reset_gpio >= 0) - devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio, + devm_gpio_request_one(dev, exynos_pcie->reset_gpio, GPIOF_OUT_INIT_HIGH, "RESET"); } -static int exynos_pcie_establish_link(struct pcie_port *pp) +static int exynos_pcie_establish_link(struct exynos_pcie *exynos_pcie) { - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + struct pcie_port *pp = &exynos_pcie->pp; + struct device *dev = pp->dev; u32 val; if (dw_pcie_link_up(pp)) { - dev_err(pp->dev, "Link already up\n"); + dev_err(dev, "Link already up\n"); return 0; } - /* assert reset signals */ - exynos_pcie_assert_core_reset(pp); - exynos_pcie_assert_phy_reset(pp); - - /* de-assert phy reset */ - exynos_pcie_deassert_phy_reset(pp); - - /* power on phy */ - exynos_pcie_power_on_phy(pp); - - /* initialize phy */ - exynos_pcie_init_phy(pp); + exynos_pcie_assert_core_reset(exynos_pcie); + exynos_pcie_assert_phy_reset(exynos_pcie); + exynos_pcie_deassert_phy_reset(exynos_pcie); + exynos_pcie_power_on_phy(exynos_pcie); + exynos_pcie_init_phy(exynos_pcie); /* pulse for common reset */ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_COMMON_RESET); udelay(500); exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET); - /* de-assert core reset */ - exynos_pcie_deassert_core_reset(pp); - - /* setup root complex */ + exynos_pcie_deassert_core_reset(exynos_pcie); dw_pcie_setup_rc(pp); - - /* assert reset signal */ - exynos_pcie_assert_reset(pp); + exynos_pcie_assert_reset(exynos_pcie); /* assert LTSSM enable */ exynos_elb_writel(exynos_pcie, PCIE_ELBI_LTSSM_ENABLE, @@ -361,27 +341,23 @@ static int exynos_pcie_establish_link(struct pcie_port *pp) while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) { val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED); - dev_info(pp->dev, "PLL Locked: 0x%x\n", val); + dev_info(dev, "PLL Locked: 0x%x\n", val); } - /* power off phy */ - exynos_pcie_power_off_phy(pp); - + exynos_pcie_power_off_phy(exynos_pcie); return -ETIMEDOUT; } -static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp) +static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *exynos_pcie) { u32 val; - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE); exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE); } -static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp) +static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *exynos_pcie) { u32 val; - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); /* enable INTX interrupt */ val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | @@ -391,23 +367,24 @@ static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp) static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) { - struct pcie_port *pp = arg; + struct exynos_pcie *exynos_pcie = arg; - exynos_pcie_clear_irq_pulse(pp); + exynos_pcie_clear_irq_pulse(exynos_pcie); return IRQ_HANDLED; } static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg) { - struct pcie_port *pp = arg; + struct exynos_pcie *exynos_pcie = arg; + struct pcie_port *pp = &exynos_pcie->pp; return dw_handle_msi_irq(pp); } -static void exynos_pcie_msi_init(struct pcie_port *pp) +static void exynos_pcie_msi_init(struct exynos_pcie *exynos_pcie) { + struct pcie_port *pp = &exynos_pcie->pp; u32 val; - struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); dw_pcie_msi_init(pp); @@ -417,60 +394,64 @@ static void exynos_pcie_msi_init(struct pcie_port *pp) exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL); } -static void exynos_pcie_enable_interrupts(struct pcie_port *pp) +static void exynos_pcie_enable_interrupts(struct exynos_pcie *exynos_pcie) { - exynos_pcie_enable_irq_pulse(pp); + exynos_pcie_enable_irq_pulse(exynos_pcie); if (IS_ENABLED(CONFIG_PCI_MSI)) - exynos_pcie_msi_init(pp); + exynos_pcie_msi_init(exynos_pcie); } -static inline u32 exynos_pcie_readl_rc(struct pcie_port *pp, - void __iomem *dbi_base) +static u32 exynos_pcie_readl_rc(struct pcie_port *pp, u32 reg) { + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); u32 val; - exynos_pcie_sideband_dbi_r_mode(pp, true); - val = readl(dbi_base); - exynos_pcie_sideband_dbi_r_mode(pp, false); + exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true); + val = readl(pp->dbi_base + reg); + exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false); return val; } -static inline void exynos_pcie_writel_rc(struct pcie_port *pp, - u32 val, void __iomem *dbi_base) +static void exynos_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val) { - exynos_pcie_sideband_dbi_w_mode(pp, true); - writel(val, dbi_base); - exynos_pcie_sideband_dbi_w_mode(pp, false); + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true); + writel(val, pp->dbi_base + reg); + exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false); } static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val) { + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); int ret; - exynos_pcie_sideband_dbi_r_mode(pp, true); + exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true); ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val); - exynos_pcie_sideband_dbi_r_mode(pp, false); + exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false); return ret; } static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val) { + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); int ret; - exynos_pcie_sideband_dbi_w_mode(pp, true); + exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true); ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val); - exynos_pcie_sideband_dbi_w_mode(pp, false); + exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false); return ret; } static int exynos_pcie_link_up(struct pcie_port *pp) { struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); - u32 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP); + u32 val; + val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP); if (val == PCIE_ELBI_LTSSM_ENABLE) return 1; @@ -479,8 +460,10 @@ static int exynos_pcie_link_up(struct pcie_port *pp) static void exynos_pcie_host_init(struct pcie_port *pp) { - exynos_pcie_establish_link(pp); - exynos_pcie_enable_interrupts(pp); + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + exynos_pcie_establish_link(exynos_pcie); + exynos_pcie_enable_interrupts(exynos_pcie); } static struct pcie_host_ops exynos_pcie_host_ops = { @@ -492,36 +475,38 @@ static struct pcie_host_ops exynos_pcie_host_ops = { .host_init = exynos_pcie_host_init, }; -static int __init exynos_add_pcie_port(struct pcie_port *pp, +static int __init exynos_add_pcie_port(struct exynos_pcie *exynos_pcie, struct platform_device *pdev) { + struct pcie_port *pp = &exynos_pcie->pp; + struct device *dev = pp->dev; int ret; pp->irq = platform_get_irq(pdev, 1); if (!pp->irq) { - dev_err(&pdev->dev, "failed to get irq\n"); + dev_err(dev, "failed to get irq\n"); return -ENODEV; } - ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler, - IRQF_SHARED, "exynos-pcie", pp); + ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, + IRQF_SHARED, "exynos-pcie", exynos_pcie); if (ret) { - dev_err(&pdev->dev, "failed to request irq\n"); + dev_err(dev, "failed to request irq\n"); return ret; } if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq(pdev, 0); if (!pp->msi_irq) { - dev_err(&pdev->dev, "failed to get msi irq\n"); + dev_err(dev, "failed to get msi irq\n"); return -ENODEV; } - ret = devm_request_irq(&pdev->dev, pp->msi_irq, + ret = devm_request_irq(dev, pp->msi_irq, exynos_pcie_msi_irq_handler, IRQF_SHARED | IRQF_NO_THREAD, - "exynos-pcie", pp); + "exynos-pcie", exynos_pcie); if (ret) { - dev_err(&pdev->dev, "failed to request msi irq\n"); + dev_err(dev, "failed to request msi irq\n"); return ret; } } @@ -531,7 +516,7 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp, ret = dw_pcie_host_init(pp); if (ret) { - dev_err(&pdev->dev, "failed to initialize host\n"); + dev_err(dev, "failed to initialize host\n"); return ret; } @@ -540,37 +525,36 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp, static int __init exynos_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct exynos_pcie *exynos_pcie; struct pcie_port *pp; - struct device_node *np = pdev->dev.of_node; + struct device_node *np = dev->of_node; struct resource *elbi_base; struct resource *phy_base; struct resource *block_base; int ret; - exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie), - GFP_KERNEL); + exynos_pcie = devm_kzalloc(dev, sizeof(*exynos_pcie), GFP_KERNEL); if (!exynos_pcie) return -ENOMEM; pp = &exynos_pcie->pp; - - pp->dev = &pdev->dev; + pp->dev = dev; exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); - exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie"); + exynos_pcie->clk = devm_clk_get(dev, "pcie"); if (IS_ERR(exynos_pcie->clk)) { - dev_err(&pdev->dev, "Failed to get pcie rc clock\n"); + dev_err(dev, "Failed to get pcie rc clock\n"); return PTR_ERR(exynos_pcie->clk); } ret = clk_prepare_enable(exynos_pcie->clk); if (ret) return ret; - exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus"); + exynos_pcie->bus_clk = devm_clk_get(dev, "pcie_bus"); if (IS_ERR(exynos_pcie->bus_clk)) { - dev_err(&pdev->dev, "Failed to get pcie bus clock\n"); + dev_err(dev, "Failed to get pcie bus clock\n"); ret = PTR_ERR(exynos_pcie->bus_clk); goto fail_clk; } @@ -579,27 +563,27 @@ static int __init exynos_pcie_probe(struct platform_device *pdev) goto fail_clk; elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); - exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base); + exynos_pcie->elbi_base = devm_ioremap_resource(dev, elbi_base); if (IS_ERR(exynos_pcie->elbi_base)) { ret = PTR_ERR(exynos_pcie->elbi_base); goto fail_bus_clk; } phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1); - exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base); + exynos_pcie->phy_base = devm_ioremap_resource(dev, phy_base); if (IS_ERR(exynos_pcie->phy_base)) { ret = PTR_ERR(exynos_pcie->phy_base); goto fail_bus_clk; } block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2); - exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base); + exynos_pcie->block_base = devm_ioremap_resource(dev, block_base); if (IS_ERR(exynos_pcie->block_base)) { ret = PTR_ERR(exynos_pcie->block_base); goto fail_bus_clk; } - ret = exynos_add_pcie_port(pp, pdev); + ret = exynos_add_pcie_port(exynos_pcie, pdev); if (ret < 0) goto fail_bus_clk; diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index ead4a5c3480b..c8cefb078218 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c @@ -39,16 +39,15 @@ enum imx6_pcie_variants { }; struct imx6_pcie { + struct pcie_port pp; /* pp.dbi_base is DT 0th resource */ int reset_gpio; bool gpio_active_high; struct clk *pcie_bus; struct clk *pcie_phy; struct clk *pcie_inbound_axi; struct clk *pcie; - struct pcie_port pp; struct regmap *iomuxc_gpr; enum imx6_pcie_variants variant; - void __iomem *mem_base; u32 tx_deemph_gen1; u32 tx_deemph_gen2_3p5db; u32 tx_deemph_gen2_6db; @@ -96,14 +95,15 @@ struct imx6_pcie { #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) -static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val) +static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val) { + struct pcie_port *pp = &imx6_pcie->pp; u32 val; u32 max_iterations = 10; u32 wait_counter = 0; do { - val = readl(dbi_base + PCIE_PHY_STAT); + val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT); val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; wait_counter++; @@ -116,123 +116,126 @@ static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val) return -ETIMEDOUT; } -static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr) +static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) { + struct pcie_port *pp = &imx6_pcie->pp; u32 val; int ret; val = addr << PCIE_PHY_CTRL_DATA_LOC; - writel(val, dbi_base + PCIE_PHY_CTRL); + dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val); val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); - writel(val, dbi_base + PCIE_PHY_CTRL); + dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val); - ret = pcie_phy_poll_ack(dbi_base, 1); + ret = pcie_phy_poll_ack(imx6_pcie, 1); if (ret) return ret; val = addr << PCIE_PHY_CTRL_DATA_LOC; - writel(val, dbi_base + PCIE_PHY_CTRL); + dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val); - return pcie_phy_poll_ack(dbi_base, 0); + return pcie_phy_poll_ack(imx6_pcie, 0); } /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ -static int pcie_phy_read(void __iomem *dbi_base, int addr, int *data) +static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) { + struct pcie_port *pp = &imx6_pcie->pp; u32 val, phy_ctl; int ret; - ret = pcie_phy_wait_ack(dbi_base, addr); + ret = pcie_phy_wait_ack(imx6_pcie, addr); if (ret) return ret; /* assert Read signal */ phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; - writel(phy_ctl, dbi_base + PCIE_PHY_CTRL); + dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, phy_ctl); - ret = pcie_phy_poll_ack(dbi_base, 1); + ret = pcie_phy_poll_ack(imx6_pcie, 1); if (ret) return ret; - val = readl(dbi_base + PCIE_PHY_STAT); + val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT); *data = val & 0xffff; /* deassert Read signal */ - writel(0x00, dbi_base + PCIE_PHY_CTRL); + dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x00); - return pcie_phy_poll_ack(dbi_base, 0); + return pcie_phy_poll_ack(imx6_pcie, 0); } -static int pcie_phy_write(void __iomem *dbi_base, int addr, int data) +static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) { + struct pcie_port *pp = &imx6_pcie->pp; u32 var; int ret; /* write addr */ /* cap addr */ - ret = pcie_phy_wait_ack(dbi_base, addr); + ret = pcie_phy_wait_ack(imx6_pcie, addr); if (ret) return ret; var = data << PCIE_PHY_CTRL_DATA_LOC; - writel(var, dbi_base + PCIE_PHY_CTRL); + dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var); /* capture data */ var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); - writel(var, dbi_base + PCIE_PHY_CTRL); + dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var); - ret = pcie_phy_poll_ack(dbi_base, 1); + ret = pcie_phy_poll_ack(imx6_pcie, 1); if (ret) return ret; /* deassert cap data */ var = data << PCIE_PHY_CTRL_DATA_LOC; - writel(var, dbi_base + PCIE_PHY_CTRL); + dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var); /* wait for ack de-assertion */ - ret = pcie_phy_poll_ack(dbi_base, 0); + ret = pcie_phy_poll_ack(imx6_pcie, 0); if (ret) return ret; /* assert wr signal */ var = 0x1 << PCIE_PHY_CTRL_WR_LOC; - writel(var, dbi_base + PCIE_PHY_CTRL); + dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var); /* wait for ack */ - ret = pcie_phy_poll_ack(dbi_base, 1); + ret = pcie_phy_poll_ack(imx6_pcie, 1); if (ret) return ret; /* deassert wr signal */ var = data << PCIE_PHY_CTRL_DATA_LOC; - writel(var, dbi_base + PCIE_PHY_CTRL); + dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var); /* wait for ack de-assertion */ - ret = pcie_phy_poll_ack(dbi_base, 0); + ret = pcie_phy_poll_ack(imx6_pcie, 0); if (ret) return ret; - writel(0x0, dbi_base + PCIE_PHY_CTRL); + dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x0); return 0; } -static void imx6_pcie_reset_phy(struct pcie_port *pp) +static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) { u32 tmp; - pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp); + pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); - pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp); + pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); usleep_range(2000, 3000); - pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp); + pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); - pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp); + pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); } /* Added for PCI abort handling */ @@ -242,9 +245,9 @@ static int imx6q_pcie_abort_handler(unsigned long addr, return 0; } -static int imx6_pcie_assert_core_reset(struct pcie_port *pp) +static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) { - struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); + struct pcie_port *pp = &imx6_pcie->pp; u32 val, gpr1, gpr12; switch (imx6_pcie->variant) { @@ -281,10 +284,10 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp) if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) && (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) { - val = readl(pp->dbi_base + PCIE_PL_PFLR); + val = dw_pcie_readl_rc(pp, PCIE_PL_PFLR); val &= ~PCIE_PL_PFLR_LINK_STATE_MASK; val |= PCIE_PL_PFLR_FORCE_LINK; - writel(val, pp->dbi_base + PCIE_PL_PFLR); + dw_pcie_writel_rc(pp, PCIE_PL_PFLR, val); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); @@ -296,20 +299,19 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp) IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); break; } - - return 0; } static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) { struct pcie_port *pp = &imx6_pcie->pp; + struct device *dev = pp->dev; int ret = 0; switch (imx6_pcie->variant) { case IMX6SX: ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); if (ret) { - dev_err(pp->dev, "unable to enable pcie_axi clock\n"); + dev_err(dev, "unable to enable pcie_axi clock\n"); break; } @@ -336,32 +338,33 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) return ret; } -static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) +static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) { - struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); + struct pcie_port *pp = &imx6_pcie->pp; + struct device *dev = pp->dev; int ret; ret = clk_prepare_enable(imx6_pcie->pcie_phy); if (ret) { - dev_err(pp->dev, "unable to enable pcie_phy clock\n"); - goto err_pcie_phy; + dev_err(dev, "unable to enable pcie_phy clock\n"); + return; } ret = clk_prepare_enable(imx6_pcie->pcie_bus); if (ret) { - dev_err(pp->dev, "unable to enable pcie_bus clock\n"); + dev_err(dev, "unable to enable pcie_bus clock\n"); goto err_pcie_bus; } ret = clk_prepare_enable(imx6_pcie->pcie); if (ret) { - dev_err(pp->dev, "unable to enable pcie clock\n"); + dev_err(dev, "unable to enable pcie clock\n"); goto err_pcie; } ret = imx6_pcie_enable_ref_clk(imx6_pcie); if (ret) { - dev_err(pp->dev, "unable to enable pcie ref clock\n"); + dev_err(dev, "unable to enable pcie ref clock\n"); goto err_ref_clk; } @@ -392,7 +395,7 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) break; } - return 0; + return; err_ref_clk: clk_disable_unprepare(imx6_pcie->pcie); @@ -400,14 +403,10 @@ err_pcie: clk_disable_unprepare(imx6_pcie->pcie_bus); err_pcie_bus: clk_disable_unprepare(imx6_pcie->pcie_phy); -err_pcie_phy: - return ret; } -static void imx6_pcie_init_phy(struct pcie_port *pp) +static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) { - struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); - if (imx6_pcie->variant == IMX6SX) regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_RX_EQ_MASK, @@ -439,45 +438,52 @@ static void imx6_pcie_init_phy(struct pcie_port *pp) imx6_pcie->tx_swing_low << 25); } -static int imx6_pcie_wait_for_link(struct pcie_port *pp) +static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) { + struct pcie_port *pp = &imx6_pcie->pp; + struct device *dev = pp->dev; + /* check if the link is up or not */ if (!dw_pcie_wait_for_link(pp)) return 0; - dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", - readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), - readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); + dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", + dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0), + dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1)); return -ETIMEDOUT; } -static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp) +static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) { + struct pcie_port *pp = &imx6_pcie->pp; + struct device *dev = pp->dev; u32 tmp; unsigned int retries; for (retries = 0; retries < 200; retries++) { - tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); + tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL); /* Test if the speed change finished. */ if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) return 0; usleep_range(100, 1000); } - dev_err(pp->dev, "Speed change timeout\n"); + dev_err(dev, "Speed change timeout\n"); return -EINVAL; } static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg) { - struct pcie_port *pp = arg; + struct imx6_pcie *imx6_pcie = arg; + struct pcie_port *pp = &imx6_pcie->pp; return dw_handle_msi_irq(pp); } -static int imx6_pcie_establish_link(struct pcie_port *pp) +static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) { - struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); + struct pcie_port *pp = &imx6_pcie->pp; + struct device *dev = pp->dev; u32 tmp; int ret; @@ -486,76 +492,73 @@ static int imx6_pcie_establish_link(struct pcie_port *pp) * started in Gen2 mode, there is a possibility the devices on the * bus will not be detected at all. This happens with PCIe switches. */ - tmp = readl(pp->dbi_base + PCIE_RC_LCR); + tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR); tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1; - writel(tmp, pp->dbi_base + PCIE_RC_LCR); + dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp); /* Start LTSSM. */ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); - ret = imx6_pcie_wait_for_link(pp); + ret = imx6_pcie_wait_for_link(imx6_pcie); if (ret) { - dev_info(pp->dev, "Link never came up\n"); + dev_info(dev, "Link never came up\n"); goto err_reset_phy; } if (imx6_pcie->link_gen == 2) { /* Allow Gen2 mode after the link is up. */ - tmp = readl(pp->dbi_base + PCIE_RC_LCR); + tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR); tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; - writel(tmp, pp->dbi_base + PCIE_RC_LCR); + dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp); } else { - dev_info(pp->dev, "Link: Gen2 disabled\n"); + dev_info(dev, "Link: Gen2 disabled\n"); } /* * Start Directed Speed Change so the best possible speed both link * partners support can be negotiated. */ - tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); + tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL); tmp |= PORT_LOGIC_SPEED_CHANGE; - writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); + dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); - ret = imx6_pcie_wait_for_speed_change(pp); + ret = imx6_pcie_wait_for_speed_change(imx6_pcie); if (ret) { - dev_err(pp->dev, "Failed to bring link up!\n"); + dev_err(dev, "Failed to bring link up!\n"); goto err_reset_phy; } /* Make sure link training is finished as well! */ - ret = imx6_pcie_wait_for_link(pp); + ret = imx6_pcie_wait_for_link(imx6_pcie); if (ret) { - dev_err(pp->dev, "Failed to bring link up!\n"); + dev_err(dev, "Failed to bring link up!\n"); goto err_reset_phy; } - tmp = readl(pp->dbi_base + PCIE_RC_LCSR); - dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); + tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCSR); + dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); return 0; err_reset_phy: - dev_dbg(pp->dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", - readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), - readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); - imx6_pcie_reset_phy(pp); - + dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", + dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0), + dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1)); + imx6_pcie_reset_phy(imx6_pcie); return ret; } static void imx6_pcie_host_init(struct pcie_port *pp) { - imx6_pcie_assert_core_reset(pp); - - imx6_pcie_init_phy(pp); - - imx6_pcie_deassert_core_reset(pp); + struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); + imx6_pcie_assert_core_reset(imx6_pcie); + imx6_pcie_init_phy(imx6_pcie); + imx6_pcie_deassert_core_reset(imx6_pcie); dw_pcie_setup_rc(pp); - - imx6_pcie_establish_link(pp); + imx6_pcie_establish_link(imx6_pcie); if (IS_ENABLED(CONFIG_PCI_MSI)) dw_pcie_msi_init(pp); @@ -563,7 +566,7 @@ static void imx6_pcie_host_init(struct pcie_port *pp) static int imx6_pcie_link_up(struct pcie_port *pp) { - return readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & + return dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1) & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; } @@ -572,24 +575,26 @@ static struct pcie_host_ops imx6_pcie_host_ops = { .host_init = imx6_pcie_host_init, }; -static int __init imx6_add_pcie_port(struct pcie_port *pp, - struct platform_device *pdev) +static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, + struct platform_device *pdev) { + struct pcie_port *pp = &imx6_pcie->pp; + struct device *dev = pp->dev; int ret; if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); if (pp->msi_irq <= 0) { - dev_err(&pdev->dev, "failed to get MSI irq\n"); + dev_err(dev, "failed to get MSI irq\n"); return -ENODEV; } - ret = devm_request_irq(&pdev->dev, pp->msi_irq, + ret = devm_request_irq(dev, pp->msi_irq, imx6_pcie_msi_handler, IRQF_SHARED | IRQF_NO_THREAD, - "mx6-pcie-msi", pp); + "mx6-pcie-msi", imx6_pcie); if (ret) { - dev_err(&pdev->dev, "failed to request MSI irq\n"); + dev_err(dev, "failed to request MSI irq\n"); return ret; } } @@ -599,7 +604,7 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp, ret = dw_pcie_host_init(pp); if (ret) { - dev_err(&pdev->dev, "failed to initialize host\n"); + dev_err(dev, "failed to initialize host\n"); return ret; } @@ -608,75 +613,72 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp, static int __init imx6_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct imx6_pcie *imx6_pcie; struct pcie_port *pp; - struct device_node *np = pdev->dev.of_node; struct resource *dbi_base; - struct device_node *node = pdev->dev.of_node; + struct device_node *node = dev->of_node; int ret; - imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL); + imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); if (!imx6_pcie) return -ENOMEM; pp = &imx6_pcie->pp; - pp->dev = &pdev->dev; + pp->dev = dev; imx6_pcie->variant = - (enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev); + (enum imx6_pcie_variants)of_device_get_match_data(dev); /* Added for PCI abort handling */ hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, "imprecise external abort"); dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base); + pp->dbi_base = devm_ioremap_resource(dev, dbi_base); if (IS_ERR(pp->dbi_base)) return PTR_ERR(pp->dbi_base); /* Fetch GPIOs */ - imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); - imx6_pcie->gpio_active_high = of_property_read_bool(np, + imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0); + imx6_pcie->gpio_active_high = of_property_read_bool(node, "reset-gpio-active-high"); if (gpio_is_valid(imx6_pcie->reset_gpio)) { - ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio, + ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio, imx6_pcie->gpio_active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, "PCIe reset"); if (ret) { - dev_err(&pdev->dev, "unable to get reset gpio\n"); + dev_err(dev, "unable to get reset gpio\n"); return ret; } } /* Fetch clocks */ - imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy"); + imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); if (IS_ERR(imx6_pcie->pcie_phy)) { - dev_err(&pdev->dev, - "pcie_phy clock source missing or invalid\n"); + dev_err(dev, "pcie_phy clock source missing or invalid\n"); return PTR_ERR(imx6_pcie->pcie_phy); } - imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus"); + imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus"); if (IS_ERR(imx6_pcie->pcie_bus)) { - dev_err(&pdev->dev, - "pcie_bus clock source missing or invalid\n"); + dev_err(dev, "pcie_bus clock source missing or invalid\n"); return PTR_ERR(imx6_pcie->pcie_bus); } - imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie"); + imx6_pcie->pcie = devm_clk_get(dev, "pcie"); if (IS_ERR(imx6_pcie->pcie)) { - dev_err(&pdev->dev, - "pcie clock source missing or invalid\n"); + dev_err(dev, "pcie clock source missing or invalid\n"); return PTR_ERR(imx6_pcie->pcie); } if (imx6_pcie->variant == IMX6SX) { - imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev, + imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, "pcie_inbound_axi"); if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { - dev_err(&pdev->dev, + dev_err(dev, "pcie_incbound_axi clock missing or invalid\n"); return PTR_ERR(imx6_pcie->pcie_inbound_axi); } @@ -686,7 +688,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) imx6_pcie->iomuxc_gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); if (IS_ERR(imx6_pcie->iomuxc_gpr)) { - dev_err(&pdev->dev, "unable to find iomuxc registers\n"); + dev_err(dev, "unable to find iomuxc registers\n"); return PTR_ERR(imx6_pcie->iomuxc_gpr); } @@ -712,12 +714,12 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) imx6_pcie->tx_swing_low = 127; /* Limit link speed */ - ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed", + ret = of_property_read_u32(node, "fsl,max-link-speed", &imx6_pcie->link_gen); if (ret) imx6_pcie->link_gen = 1; - ret = imx6_add_pcie_port(pp, pdev); + ret = imx6_add_pcie_port(imx6_pcie, pdev); if (ret < 0) return ret; @@ -730,7 +732,7 @@ static void imx6_pcie_shutdown(struct platform_device *pdev) struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev); /* bring down link, so bootloader gets clean state in case of reboot */ - imx6_pcie_assert_core_reset(&imx6_pcie->pp); + imx6_pcie_assert_core_reset(imx6_pcie); } static const struct of_device_id imx6_pcie_of_match[] = { diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c index 41515092eb0d..9397c4667106 100644 --- a/drivers/pci/host/pci-keystone-dw.c +++ b/drivers/pci/host/pci-keystone-dw.c @@ -88,13 +88,24 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) return ks_pcie->app.start + MSI_IRQ; } +static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset) +{ + return readl(ks_pcie->va_app_base + offset); +} + +static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val) +{ + writel(val, ks_pcie->va_app_base + offset); +} + void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) { struct pcie_port *pp = &ks_pcie->pp; + struct device *dev = pp->dev; u32 pending, vector; int src, virq; - pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4)); + pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); /* * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit @@ -104,7 +115,7 @@ void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) if (BIT(src) & pending) { vector = offset + (src << 3); virq = irq_linear_revmap(pp->irq_domain, vector); - dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n", + dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", src, vector, virq); generic_handle_irq(virq); } @@ -124,9 +135,9 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d) offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); update_reg_offset_bit_pos(offset, ®_offset, &bit_pos); - writel(BIT(bit_pos), - ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4)); - writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI); + ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), + BIT(bit_pos)); + ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); } void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) @@ -135,8 +146,8 @@ void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); - writel(BIT(bit_pos), - ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4)); + ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), + BIT(bit_pos)); } void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) @@ -145,8 +156,8 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); - writel(BIT(bit_pos), - ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4)); + ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), + BIT(bit_pos)); } static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) @@ -215,6 +226,7 @@ static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = { int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip) { struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); + struct device *dev = pp->dev; int i; pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np, @@ -222,7 +234,7 @@ int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip) &ks_dw_pcie_msi_domain_ops, chip); if (!pp->irq_domain) { - dev_err(pp->dev, "irq domain init failed\n"); + dev_err(dev, "irq domain init failed\n"); return -ENXIO; } @@ -237,47 +249,47 @@ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) int i; for (i = 0; i < MAX_LEGACY_IRQS; i++) - writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4)); + ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); } void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) { struct pcie_port *pp = &ks_pcie->pp; + struct device *dev = pp->dev; u32 pending; int virq; - pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4)); + pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); if (BIT(0) & pending) { virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); - dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset, - virq); + dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq); generic_handle_irq(virq); } /* EOI the INTx interrupt */ - writel(offset, ks_pcie->va_app_base + IRQ_EOI); + ks_dw_app_writel(ks_pcie, IRQ_EOI, offset); } -void ks_dw_pcie_enable_error_irq(void __iomem *reg_base) +void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) { - writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET); + ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); } -irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev, - void __iomem *reg_base) +irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) { u32 status; - status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL; + status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL; if (!status) return IRQ_NONE; if (status & ERR_FATAL_IRQ) - dev_err(dev, "fatal error (status %#010x)\n", status); + dev_err(ks_pcie->pp.dev, "fatal error (status %#010x)\n", + status); /* Ack the IRQ; status bits are RW1C */ - writel(status, reg_base + ERR_IRQ_STATUS); + ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status); return IRQ_HANDLED; } @@ -322,15 +334,15 @@ static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = { * Since modification of dbi_cs2 involves different clock domain, read the * status back to ensure the transition is complete. */ -static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt) +static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) { u32 val; - writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS), - reg_virt + CMD_STATUS); + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val); do { - val = readl(reg_virt + CMD_STATUS); + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); } while (!(val & DBI_CS2_EN_VAL)); } @@ -340,15 +352,15 @@ static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt) * Since modification of dbi_cs2 involves different clock domain, read the * status back to ensure the transition is complete. */ -static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt) +static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) { u32 val; - writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS), - reg_virt + CMD_STATUS); + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val); do { - val = readl(reg_virt + CMD_STATUS); + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); } while (val & DBI_CS2_EN_VAL); } @@ -357,28 +369,29 @@ void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) struct pcie_port *pp = &ks_pcie->pp; u32 start = pp->mem->start, end = pp->mem->end; int i, tr_size; + u32 val; /* Disable BARs for inbound access */ - ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base); - writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0); - writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1); - ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base); + ks_dw_pcie_set_dbi_mode(ks_pcie); + dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0); + dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0); + ks_dw_pcie_clear_dbi_mode(ks_pcie); /* Set outbound translation size per window division */ - writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE); + ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7); tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M; /* Using Direct 1:1 mapping of RC <-> PCI memory space */ for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) { - writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i)); - writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i)); + ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1); + ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0); start += tr_size; } /* Enable OB translation */ - writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS), - ks_pcie->va_app_base + CMD_STATUS); + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val); } /** @@ -418,7 +431,7 @@ static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus, if (bus != 1) regval |= BIT(24); - writel(regval, ks_pcie->va_app_base + CFG_SETUP); + ks_dw_app_writel(ks_pcie, CFG_SETUP, regval); return pp->va_cfg0_base; } @@ -456,19 +469,19 @@ void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp) struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); /* Configure and set up BAR0 */ - ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base); + ks_dw_pcie_set_dbi_mode(ks_pcie); /* Enable BAR0 */ - writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0); - writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0); + dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 1); + dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, SZ_4K - 1); - ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base); + ks_dw_pcie_clear_dbi_mode(ks_pcie); /* * For BAR0, just setting bus address for inbound writes (MSI) should * be sufficient. Use physical address to avoid any conflicts. */ - writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0); + dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, ks_pcie->app.start); } /** @@ -476,8 +489,9 @@ void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp) */ int ks_dw_pcie_link_up(struct pcie_port *pp) { - u32 val = readl(pp->dbi_base + DEBUG0); + u32 val; + val = dw_pcie_readl_rc(pp, DEBUG0); return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0; } @@ -486,13 +500,13 @@ void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) u32 val; /* Disable Link training */ - val = readl(ks_pcie->va_app_base + CMD_STATUS); + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); val &= ~LTSSM_EN_VAL; - writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); /* Initiate Link Training */ - val = readl(ks_pcie->va_app_base + CMD_STATUS); - writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS); + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); } /** @@ -506,12 +520,13 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, struct device_node *msi_intc_np) { struct pcie_port *pp = &ks_pcie->pp; - struct platform_device *pdev = to_platform_device(pp->dev); + struct device *dev = pp->dev; + struct platform_device *pdev = to_platform_device(dev); struct resource *res; /* Index 0 is the config reg. space address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pp->dbi_base = devm_ioremap_resource(pp->dev, res); + pp->dbi_base = devm_ioremap_resource(dev, res); if (IS_ERR(pp->dbi_base)) return PTR_ERR(pp->dbi_base); @@ -524,7 +539,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, /* Index 1 is the application reg. space address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res); + ks_pcie->va_app_base = devm_ioremap_resource(dev, res); if (IS_ERR(ks_pcie->va_app_base)) return PTR_ERR(ks_pcie->va_app_base); @@ -537,7 +552,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, &ks_dw_pcie_legacy_irq_domain_ops, NULL); if (!ks_pcie->legacy_irq_domain) { - dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n"); + dev_err(dev, "Failed to add irq domain for legacy irqs\n"); return -EINVAL; } diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index 82b461b5b08a..043c19a05da1 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c @@ -89,12 +89,13 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs); static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) { struct pcie_port *pp = &ks_pcie->pp; + struct device *dev = pp->dev; unsigned int retries; dw_pcie_setup_rc(pp); if (dw_pcie_link_up(pp)) { - dev_err(pp->dev, "Link already up\n"); + dev_err(dev, "Link already up\n"); return 0; } @@ -105,7 +106,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) return 0; } - dev_err(pp->dev, "phy link never came up\n"); + dev_err(dev, "phy link never came up\n"); return -ETIMEDOUT; } @@ -115,9 +116,10 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc) struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); u32 offset = irq - ks_pcie->msi_host_irqs[0]; struct pcie_port *pp = &ks_pcie->pp; + struct device *dev = pp->dev; struct irq_chip *chip = irq_desc_get_chip(desc); - dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq); + dev_dbg(dev, "%s, irq %d\n", __func__, irq); /* * The chained irq handler installation would have replaced normal @@ -142,10 +144,11 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) unsigned int irq = irq_desc_get_irq(desc); struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); struct pcie_port *pp = &ks_pcie->pp; + struct device *dev = pp->dev; u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; struct irq_chip *chip = irq_desc_get_chip(desc); - dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq); + dev_dbg(dev, ": Handling legacy irq %d\n", irq); /* * The chained irq handler installation would have replaced normal @@ -234,7 +237,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) } if (ks_pcie->error_irq > 0) - ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base); + ks_dw_pcie_enable_error_irq(ks_pcie); } /* @@ -302,14 +305,14 @@ static irqreturn_t pcie_err_irq_handler(int irq, void *priv) { struct keystone_pcie *ks_pcie = priv; - return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev, - ks_pcie->va_app_base); + return ks_dw_pcie_handle_error_irq(ks_pcie); } static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, struct platform_device *pdev) { struct pcie_port *pp = &ks_pcie->pp; + struct device *dev = pp->dev; int ret; ret = ks_pcie_get_irq_controller_info(ks_pcie, @@ -332,12 +335,12 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, */ ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); if (ks_pcie->error_irq <= 0) - dev_info(&pdev->dev, "no error IRQ defined\n"); + dev_info(dev, "no error IRQ defined\n"); else { ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler, IRQF_SHARED, "pcie-error-irq", ks_pcie); if (ret < 0) { - dev_err(&pdev->dev, "failed to request error IRQ %d\n", + dev_err(dev, "failed to request error IRQ %d\n", ks_pcie->error_irq); return ret; } @@ -347,7 +350,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, pp->ops = &keystone_pcie_host_ops; ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np); if (ret) { - dev_err(&pdev->dev, "failed to initialize host\n"); + dev_err(dev, "failed to initialize host\n"); return ret; } @@ -381,12 +384,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev) struct phy *phy; int ret; - ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie), - GFP_KERNEL); + ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); if (!ks_pcie) return -ENOMEM; pp = &ks_pcie->pp; + pp->dev = dev; /* initialize SerDes Phy if present */ phy = devm_phy_get(dev, "pcie-phy"); @@ -408,7 +411,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev) devm_iounmap(dev, reg_p); devm_release_mem_region(dev, res->start, resource_size(res)); - pp->dev = dev; ks_pcie->np = dev->of_node; platform_set_drvdata(pdev, ks_pcie); ks_pcie->clk = devm_clk_get(dev, "pcie"); diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h index a5b0cb2ba4d7..bc54bafda068 100644 --- a/drivers/pci/host/pci-keystone.h +++ b/drivers/pci/host/pci-keystone.h @@ -17,8 +17,8 @@ #define MAX_LEGACY_HOST_IRQS 4 struct keystone_pcie { + struct pcie_port pp; /* pp.dbi_base is DT 0th res */ struct clk *clk; - struct pcie_port pp; /* PCI Device ID */ u32 device_id; int num_legacy_host_irqs; @@ -34,7 +34,7 @@ struct keystone_pcie { int error_irq; /* Application register space */ - void __iomem *va_app_base; + void __iomem *va_app_base; /* DT 1st resource */ struct resource app; }; @@ -45,9 +45,8 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); /* Keystone specific PCI controller APIs */ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset); -void ks_dw_pcie_enable_error_irq(void __iomem *reg_base); -irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev, - void __iomem *reg_base); +void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie); +irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie); int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, struct device_node *msi_intc_np); int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c index 114ba819277a..2cb7315e26d0 100644 --- a/drivers/pci/host/pci-layerscape.c +++ b/drivers/pci/host/pci-layerscape.c @@ -45,10 +45,9 @@ struct ls_pcie_drvdata { }; struct ls_pcie { - void __iomem *dbi; + struct pcie_port pp; /* pp.dbi_base is DT regs */ void __iomem *lut; struct regmap *scfg; - struct pcie_port pp; const struct ls_pcie_drvdata *drvdata; int index; }; @@ -59,7 +58,7 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie) { u32 header_type; - header_type = ioread8(pcie->dbi + PCI_HEADER_TYPE); + header_type = ioread8(pcie->pp.dbi_base + PCI_HEADER_TYPE); header_type &= 0x7f; return header_type == PCI_HEADER_TYPE_BRIDGE; @@ -68,13 +67,13 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie) /* Clear multi-function bit */ static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) { - iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE); + iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->pp.dbi_base + PCI_HEADER_TYPE); } /* Fix class value */ static void ls_pcie_fix_class(struct ls_pcie *pcie) { - iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); + iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->pp.dbi_base + PCI_CLASS_DEVICE); } /* Drop MSG TLP except for Vendor MSG */ @@ -82,9 +81,9 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) { u32 val; - val = ioread32(pcie->dbi + PCIE_STRFMR1); + val = ioread32(pcie->pp.dbi_base + PCIE_STRFMR1); val &= 0xDFFFFFFF; - iowrite32(val, pcie->dbi + PCIE_STRFMR1); + iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1); } static int ls1021_pcie_link_up(struct pcie_port *pp) @@ -106,18 +105,19 @@ static int ls1021_pcie_link_up(struct pcie_port *pp) static void ls1021_pcie_host_init(struct pcie_port *pp) { + struct device *dev = pp->dev; struct ls_pcie *pcie = to_ls_pcie(pp); u32 index[2]; - pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node, + pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg"); if (IS_ERR(pcie->scfg)) { - dev_err(pp->dev, "No syscfg phandle specified\n"); + dev_err(dev, "No syscfg phandle specified\n"); pcie->scfg = NULL; return; } - if (of_property_read_u32_array(pp->dev->of_node, + if (of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2)) { pcie->scfg = NULL; return; @@ -148,18 +148,19 @@ static void ls_pcie_host_init(struct pcie_port *pp) { struct ls_pcie *pcie = to_ls_pcie(pp); - iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); + iowrite32(1, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN); ls_pcie_fix_class(pcie); ls_pcie_clear_multifunction(pcie); ls_pcie_drop_msg_tlp(pcie); - iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); + iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN); } static int ls_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip) { + struct device *dev = pp->dev; + struct device_node *np = dev->of_node; struct device_node *msi_node; - struct device_node *np = pp->dev->of_node; /* * The MSI domain is set by the generic of_msi_configure(). This @@ -169,7 +170,7 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp, */ msi_node = of_parse_phandle(np, "msi-parent", 0); if (!msi_node) { - dev_err(pp->dev, "failed to find msi-parent\n"); + dev_err(dev, "failed to find msi-parent\n"); return -EINVAL; } @@ -212,19 +213,15 @@ static const struct of_device_id ls_pcie_of_match[] = { { }, }; -static int __init ls_add_pcie_port(struct pcie_port *pp, - struct platform_device *pdev) +static int __init ls_add_pcie_port(struct ls_pcie *pcie) { + struct pcie_port *pp = &pcie->pp; + struct device *dev = pp->dev; int ret; - struct ls_pcie *pcie = to_ls_pcie(pp); - - pp->dev = &pdev->dev; - pp->dbi_base = pcie->dbi; - pp->ops = pcie->drvdata->ops; ret = dw_pcie_host_init(pp); if (ret) { - dev_err(pp->dev, "failed to initialize host\n"); + dev_err(dev, "failed to initialize host\n"); return ret; } @@ -233,38 +230,42 @@ static int __init ls_add_pcie_port(struct pcie_port *pp, static int __init ls_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; const struct of_device_id *match; struct ls_pcie *pcie; + struct pcie_port *pp; struct resource *dbi_base; int ret; - match = of_match_device(ls_pcie_of_match, &pdev->dev); + match = of_match_device(ls_pcie_of_match, dev); if (!match) return -ENODEV; - pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; + pp = &pcie->pp; + pp->dev = dev; + pp->ops = pcie->drvdata->ops; + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); - pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base); - if (IS_ERR(pcie->dbi)) { - dev_err(&pdev->dev, "missing *regs* space\n"); - return PTR_ERR(pcie->dbi); + pcie->pp.dbi_base = devm_ioremap_resource(dev, dbi_base); + if (IS_ERR(pcie->pp.dbi_base)) { + dev_err(dev, "missing *regs* space\n"); + return PTR_ERR(pcie->pp.dbi_base); } pcie->drvdata = match->data; - pcie->lut = pcie->dbi + pcie->drvdata->lut_offset; + pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset; if (!ls_pcie_is_bridge(pcie)) return -ENODEV; - ret = ls_add_pcie_port(&pcie->pp, pdev); + ret = ls_add_pcie_port(pcie); if (ret < 0) return ret; - platform_set_drvdata(pdev, pcie); - return 0; } diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 307f81d6b479..45a89d969700 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -1190,13 +1190,13 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) static int mvebu_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct mvebu_pcie *pcie; - struct device_node *np = pdev->dev.of_node; + struct device_node *np = dev->of_node; struct device_node *child; int num, i, ret; - pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie), - GFP_KERNEL); + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; @@ -1206,7 +1206,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev) /* Get the PCIe memory and I/O aperture */ mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); if (resource_size(&pcie->mem) == 0) { - dev_err(&pdev->dev, "invalid memory aperture size\n"); + dev_err(dev, "invalid memory aperture size\n"); return -EINVAL; } @@ -1224,20 +1224,18 @@ static int mvebu_pcie_probe(struct platform_device *pdev) /* Get the bus range */ ret = of_pci_parse_bus_range(np, &pcie->busn); if (ret) { - dev_err(&pdev->dev, "failed to parse bus-range property: %d\n", - ret); + dev_err(dev, "failed to parse bus-range property: %d\n", ret); return ret; } - num = of_get_available_child_count(pdev->dev.of_node); + num = of_get_available_child_count(np); - pcie->ports = devm_kcalloc(&pdev->dev, num, sizeof(*pcie->ports), - GFP_KERNEL); + pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL); if (!pcie->ports) return -ENOMEM; i = 0; - for_each_available_child_of_node(pdev->dev.of_node, child) { + for_each_available_child_of_node(np, child) { struct mvebu_pcie_port *port = &pcie->ports[i]; ret = mvebu_pcie_parse_port(pcie, port, child); @@ -1266,8 +1264,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev) port->base = mvebu_pcie_map_registers(pdev, child, port); if (IS_ERR(port->base)) { - dev_err(&pdev->dev, "%s: cannot map registers\n", - port->name); + dev_err(dev, "%s: cannot map registers\n", port->name); port->base = NULL; mvebu_pcie_powerdown(port); continue; diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index 597566f96f5e..1eeefa4df64c 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c @@ -154,10 +154,11 @@ static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) static irqreturn_t rcar_pci_err_irq(int irq, void *pw) { struct rcar_pci_priv *priv = pw; + struct device *dev = priv->dev; u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG); if (status & RCAR_PCI_INT_ALLERRORS) { - dev_err(priv->dev, "error irq: status %08x\n", status); + dev_err(dev, "error irq: status %08x\n", status); /* clear the error(s) */ iowrite32(status & RCAR_PCI_INT_ALLERRORS, @@ -170,13 +171,14 @@ static irqreturn_t rcar_pci_err_irq(int irq, void *pw) static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { + struct device *dev = priv->dev; int ret; u32 val; - ret = devm_request_irq(priv->dev, priv->irq, rcar_pci_err_irq, + ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq, IRQF_SHARED, "error irq", priv); if (ret) { - dev_err(priv->dev, "cannot claim IRQ for error handling\n"); + dev_err(dev, "cannot claim IRQ for error handling\n"); return; } @@ -192,15 +194,16 @@ static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { } static int rcar_pci_setup(int nr, struct pci_sys_data *sys) { struct rcar_pci_priv *priv = sys->private_data; + struct device *dev = priv->dev; void __iomem *reg = priv->reg; u32 val; int ret; - pm_runtime_enable(priv->dev); - pm_runtime_get_sync(priv->dev); + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); val = ioread32(reg + RCAR_PCI_UNIT_REV_REG); - dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val); + dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val); /* Disable Direct Power Down State and assert reset */ val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD; @@ -275,7 +278,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) /* Add PCI resources */ pci_add_resource(&sys->resources, &priv->mem_res); - ret = devm_request_pci_bus_resources(priv->dev, &sys->resources); + ret = devm_request_pci_bus_resources(dev, &sys->resources); if (ret < 0) return ret; @@ -311,6 +314,7 @@ static int pci_dma_range_parser_init(struct of_pci_range_parser *parser, static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci, struct device_node *np) { + struct device *dev = pci->dev; struct of_pci_range range; struct of_pci_range_parser parser; int index = 0; @@ -331,14 +335,14 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci, /* Catch HW limitations */ if (!(range.flags & IORESOURCE_PREFETCH)) { - dev_err(pci->dev, "window must be prefetchable\n"); + dev_err(dev, "window must be prefetchable\n"); return -EINVAL; } if (pci->window_addr) { u32 lowaddr = 1 << (ffs(pci->window_addr) - 1); if (lowaddr < pci->window_size) { - dev_err(pci->dev, "invalid window size/addr\n"); + dev_err(dev, "invalid window size/addr\n"); return -EINVAL; } } @@ -350,6 +354,7 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci, static int rcar_pci_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct resource *cfg_res, *mem_res; struct rcar_pci_priv *priv; void __iomem *reg; @@ -357,7 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev) void *hw_private[1]; cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - reg = devm_ioremap_resource(&pdev->dev, cfg_res); + reg = devm_ioremap_resource(dev, cfg_res); if (IS_ERR(reg)) return PTR_ERR(reg); @@ -368,8 +373,7 @@ static int rcar_pci_probe(struct platform_device *pdev) if (mem_res->start & 0xFFFF) return -EINVAL; - priv = devm_kzalloc(&pdev->dev, - sizeof(struct rcar_pci_priv), GFP_KERNEL); + priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -378,10 +382,10 @@ static int rcar_pci_probe(struct platform_device *pdev) priv->irq = platform_get_irq(pdev, 0); priv->reg = reg; - priv->dev = &pdev->dev; + priv->dev = dev; if (priv->irq < 0) { - dev_err(&pdev->dev, "no valid irq found\n"); + dev_err(dev, "no valid irq found\n"); return priv->irq; } @@ -390,23 +394,23 @@ static int rcar_pci_probe(struct platform_device *pdev) priv->window_pci = 0x40000000; priv->window_size = SZ_1G; - if (pdev->dev.of_node) { + if (dev->of_node) { struct resource busnr; int ret; - ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr); + ret = of_pci_parse_bus_range(dev->of_node, &busnr); if (ret < 0) { - dev_err(&pdev->dev, "failed to parse bus-range\n"); + dev_err(dev, "failed to parse bus-range\n"); return ret; } priv->busnr = busnr.start; if (busnr.end != busnr.start) - dev_warn(&pdev->dev, "only one bus number supported\n"); + dev_warn(dev, "only one bus number supported\n"); - ret = rcar_pci_parse_map_dma_ranges(priv, pdev->dev.of_node); + ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node); if (ret < 0) { - dev_err(&pdev->dev, "failed to parse dma-range\n"); + dev_err(dev, "failed to parse dma-range\n"); return ret; } } else { @@ -421,7 +425,7 @@ static int rcar_pci_probe(struct platform_device *pdev) hw.map_irq = rcar_pci_map_irq; hw.ops = &rcar_pci_ops; hw.setup = rcar_pci_setup; - pci_common_init_dev(&pdev->dev, &hw); + pci_common_init_dev(dev, &hw); return 0; } diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index e2a8e4cab22e..8dfccf733241 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -384,6 +384,7 @@ static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where) static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie, unsigned int busnr) { + struct device *dev = pcie->dev; pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED); phys_addr_t cs = pcie->cs->start; @@ -413,8 +414,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie, err = ioremap_page_range(virt, virt + SZ_64K, phys, prot); if (err < 0) { - dev_err(pcie->dev, "ioremap_page_range() failed: %d\n", - err); + dev_err(dev, "ioremap_page_range() failed: %d\n", err); goto unmap; } } @@ -462,6 +462,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, int where) { struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata); + struct device *dev = pcie->dev; void __iomem *addr = NULL; if (bus->number == 0) { @@ -482,8 +483,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, addr = (void __iomem *)b->area->addr; if (!addr) { - dev_err(pcie->dev, - "failed to map cfg. space for bus %u\n", + dev_err(dev, "failed to map cfg. space for bus %u\n", bus->number); return NULL; } @@ -584,12 +584,13 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port) static void tegra_pcie_port_free(struct tegra_pcie_port *port) { struct tegra_pcie *pcie = port->pcie; + struct device *dev = pcie->dev; - devm_iounmap(pcie->dev, port->base); - devm_release_mem_region(pcie->dev, port->regs.start, + devm_iounmap(dev, port->base); + devm_release_mem_region(dev, port->regs.start, resource_size(&port->regs)); list_del(&port->list); - devm_kfree(pcie->dev, port); + devm_kfree(dev, port); } /* Tegra PCIE root complex wrongly reports device class */ @@ -612,12 +613,13 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) { struct tegra_pcie *pcie = sys_to_pcie(sys); + struct device *dev = pcie->dev; int err; sys->mem_offset = pcie->offset.mem; sys->io_offset = pcie->offset.io; - err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->io); + err = devm_request_resource(dev, &iomem_resource, &pcie->io); if (err < 0) return err; @@ -631,7 +633,7 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) sys->mem_offset); pci_add_resource(&sys->resources, &pcie->busn); - err = devm_request_pci_bus_resources(pcie->dev, &sys->resources); + err = devm_request_pci_bus_resources(dev, &sys->resources); if (err < 0) return err; @@ -672,6 +674,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg) "Peer2Peer error", }; struct tegra_pcie *pcie = arg; + struct device *dev = pcie->dev; u32 code, signature; code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK; @@ -689,11 +692,9 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg) * happen a lot during enumeration */ if (code == AFI_INTR_MASTER_ABORT) - dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code], - signature); + dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature); else - dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code], - signature); + dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature); if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_FPCI_DECODE_ERROR) { @@ -701,9 +702,9 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg) u64 address = (u64)fpci << 32 | (signature & 0xfffffffc); if (code == AFI_INTR_MASTER_ABORT) - dev_dbg(pcie->dev, " FPCI address: %10llx\n", address); + dev_dbg(dev, " FPCI address: %10llx\n", address); else - dev_err(pcie->dev, " FPCI address: %10llx\n", address); + dev_err(dev, " FPCI address: %10llx\n", address); } return IRQ_HANDLED; @@ -793,6 +794,7 @@ static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout) static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) { + struct device *dev = pcie->dev; const struct tegra_pcie_soc *soc = pcie->soc; u32 value; int err; @@ -829,7 +831,7 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) /* wait for the PLL to lock */ err = tegra_pcie_pll_wait(pcie, 500); if (err < 0) { - dev_err(pcie->dev, "PLL failed to lock: %d\n", err); + dev_err(dev, "PLL failed to lock: %d\n", err); return err; } @@ -859,7 +861,7 @@ static int tegra_pcie_phy_disable(struct tegra_pcie *pcie) /* override IDDQ */ value = pads_readl(pcie, PADS_CTL); value |= PADS_CTL_IDDQ_1L; - pads_writel(pcie, PADS_CTL, value); + pads_writel(pcie, value, PADS_CTL); /* reset PLL */ value = pads_readl(pcie, soc->pads_pll_ctl); @@ -880,8 +882,7 @@ static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port) for (i = 0; i < port->lanes; i++) { err = phy_power_on(port->phys[i]); if (err < 0) { - dev_err(dev, "failed to power on PHY#%u: %d\n", i, - err); + dev_err(dev, "failed to power on PHY#%u: %d\n", i, err); return err; } } @@ -909,6 +910,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port) static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) { + struct device *dev = pcie->dev; const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_pcie_port *port; int err; @@ -920,7 +922,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) err = tegra_pcie_phy_enable(pcie); if (err < 0) - dev_err(pcie->dev, "failed to power on PHY: %d\n", err); + dev_err(dev, "failed to power on PHY: %d\n", err); return err; } @@ -928,7 +930,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) list_for_each_entry(port, &pcie->ports, list) { err = tegra_pcie_port_phy_power_on(port); if (err < 0) { - dev_err(pcie->dev, + dev_err(dev, "failed to power on PCIe port %u PHY: %d\n", port->index, err); return err; @@ -946,6 +948,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) { + struct device *dev = pcie->dev; struct tegra_pcie_port *port; int err; @@ -956,8 +959,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) err = tegra_pcie_phy_disable(pcie); if (err < 0) - dev_err(pcie->dev, "failed to power off PHY: %d\n", - err); + dev_err(dev, "failed to power off PHY: %d\n", err); return err; } @@ -965,7 +967,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) list_for_each_entry(port, &pcie->ports, list) { err = tegra_pcie_port_phy_power_off(port); if (err < 0) { - dev_err(pcie->dev, + dev_err(dev, "failed to power off PCIe port %u PHY: %d\n", port->index, err); return err; @@ -977,6 +979,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) { + struct device *dev = pcie->dev; const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_pcie_port *port; unsigned long value; @@ -1016,7 +1019,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) err = tegra_pcie_phy_power_on(pcie); if (err < 0) { - dev_err(pcie->dev, "failed to power on PHY(s): %d\n", err); + dev_err(dev, "failed to power on PHY(s): %d\n", err); return err; } @@ -1049,13 +1052,14 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) static void tegra_pcie_power_off(struct tegra_pcie *pcie) { + struct device *dev = pcie->dev; int err; /* TODO: disable and unprepare clocks? */ err = tegra_pcie_phy_power_off(pcie); if (err < 0) - dev_err(pcie->dev, "failed to power off PHY(s): %d\n", err); + dev_err(dev, "failed to power off PHY(s): %d\n", err); reset_control_assert(pcie->pcie_xrst); reset_control_assert(pcie->afi_rst); @@ -1065,11 +1069,12 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie) err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies); if (err < 0) - dev_warn(pcie->dev, "failed to disable regulators: %d\n", err); + dev_warn(dev, "failed to disable regulators: %d\n", err); } static int tegra_pcie_power_on(struct tegra_pcie *pcie) { + struct device *dev = pcie->dev; const struct tegra_pcie_soc *soc = pcie->soc; int err; @@ -1082,13 +1087,13 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie) /* enable regulators */ err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies); if (err < 0) - dev_err(pcie->dev, "failed to enable regulators: %d\n", err); + dev_err(dev, "failed to enable regulators: %d\n", err); err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, pcie->pex_clk, pcie->pex_rst); if (err) { - dev_err(pcie->dev, "powerup sequence failed: %d\n", err); + dev_err(dev, "powerup sequence failed: %d\n", err); return err; } @@ -1096,22 +1101,21 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie) err = clk_prepare_enable(pcie->afi_clk); if (err < 0) { - dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err); + dev_err(dev, "failed to enable AFI clock: %d\n", err); return err; } if (soc->has_cml_clk) { err = clk_prepare_enable(pcie->cml_clk); if (err < 0) { - dev_err(pcie->dev, "failed to enable CML clock: %d\n", - err); + dev_err(dev, "failed to enable CML clock: %d\n", err); return err; } } err = clk_prepare_enable(pcie->pll_e); if (err < 0) { - dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err); + dev_err(dev, "failed to enable PLLE clock: %d\n", err); return err; } @@ -1120,22 +1124,23 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie) static int tegra_pcie_clocks_get(struct tegra_pcie *pcie) { + struct device *dev = pcie->dev; const struct tegra_pcie_soc *soc = pcie->soc; - pcie->pex_clk = devm_clk_get(pcie->dev, "pex"); + pcie->pex_clk = devm_clk_get(dev, "pex"); if (IS_ERR(pcie->pex_clk)) return PTR_ERR(pcie->pex_clk); - pcie->afi_clk = devm_clk_get(pcie->dev, "afi"); + pcie->afi_clk = devm_clk_get(dev, "afi"); if (IS_ERR(pcie->afi_clk)) return PTR_ERR(pcie->afi_clk); - pcie->pll_e = devm_clk_get(pcie->dev, "pll_e"); + pcie->pll_e = devm_clk_get(dev, "pll_e"); if (IS_ERR(pcie->pll_e)) return PTR_ERR(pcie->pll_e); if (soc->has_cml_clk) { - pcie->cml_clk = devm_clk_get(pcie->dev, "cml"); + pcie->cml_clk = devm_clk_get(dev, "cml"); if (IS_ERR(pcie->cml_clk)) return PTR_ERR(pcie->cml_clk); } @@ -1145,15 +1150,17 @@ static int tegra_pcie_clocks_get(struct tegra_pcie *pcie) static int tegra_pcie_resets_get(struct tegra_pcie *pcie) { - pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex"); + struct device *dev = pcie->dev; + + pcie->pex_rst = devm_reset_control_get(dev, "pex"); if (IS_ERR(pcie->pex_rst)) return PTR_ERR(pcie->pex_rst); - pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi"); + pcie->afi_rst = devm_reset_control_get(dev, "afi"); if (IS_ERR(pcie->afi_rst)) return PTR_ERR(pcie->afi_rst); - pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x"); + pcie->pcie_xrst = devm_reset_control_get(dev, "pcie_x"); if (IS_ERR(pcie->pcie_xrst)) return PTR_ERR(pcie->pcie_xrst); @@ -1162,18 +1169,19 @@ static int tegra_pcie_resets_get(struct tegra_pcie *pcie) static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie) { + struct device *dev = pcie->dev; int err; - pcie->phy = devm_phy_optional_get(pcie->dev, "pcie"); + pcie->phy = devm_phy_optional_get(dev, "pcie"); if (IS_ERR(pcie->phy)) { err = PTR_ERR(pcie->phy); - dev_err(pcie->dev, "failed to get PHY: %d\n", err); + dev_err(dev, "failed to get PHY: %d\n", err); return err; } err = phy_init(pcie->phy); if (err < 0) { - dev_err(pcie->dev, "failed to initialize PHY: %d\n", err); + dev_err(dev, "failed to initialize PHY: %d\n", err); return err; } @@ -1256,43 +1264,44 @@ static int tegra_pcie_phys_get(struct tegra_pcie *pcie) static int tegra_pcie_get_resources(struct tegra_pcie *pcie) { - struct platform_device *pdev = to_platform_device(pcie->dev); + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); struct resource *pads, *afi, *res; int err; err = tegra_pcie_clocks_get(pcie); if (err) { - dev_err(&pdev->dev, "failed to get clocks: %d\n", err); + dev_err(dev, "failed to get clocks: %d\n", err); return err; } err = tegra_pcie_resets_get(pcie); if (err) { - dev_err(&pdev->dev, "failed to get resets: %d\n", err); + dev_err(dev, "failed to get resets: %d\n", err); return err; } err = tegra_pcie_phys_get(pcie); if (err < 0) { - dev_err(&pdev->dev, "failed to get PHYs: %d\n", err); + dev_err(dev, "failed to get PHYs: %d\n", err); return err; } err = tegra_pcie_power_on(pcie); if (err) { - dev_err(&pdev->dev, "failed to power up: %d\n", err); + dev_err(dev, "failed to power up: %d\n", err); return err; } pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads"); - pcie->pads = devm_ioremap_resource(&pdev->dev, pads); + pcie->pads = devm_ioremap_resource(dev, pads); if (IS_ERR(pcie->pads)) { err = PTR_ERR(pcie->pads); goto poweroff; } afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi"); - pcie->afi = devm_ioremap_resource(&pdev->dev, afi); + pcie->afi = devm_ioremap_resource(dev, afi); if (IS_ERR(pcie->afi)) { err = PTR_ERR(pcie->afi); goto poweroff; @@ -1305,7 +1314,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) goto poweroff; } - pcie->cs = devm_request_mem_region(pcie->dev, res->start, + pcie->cs = devm_request_mem_region(dev, res->start, resource_size(res), res->name); if (!pcie->cs) { err = -EADDRNOTAVAIL; @@ -1315,7 +1324,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) /* request interrupt */ err = platform_get_irq_byname(pdev, "intr"); if (err < 0) { - dev_err(&pdev->dev, "failed to get IRQ: %d\n", err); + dev_err(dev, "failed to get IRQ: %d\n", err); goto poweroff; } @@ -1323,7 +1332,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie); if (err) { - dev_err(&pdev->dev, "failed to register IRQ: %d\n", err); + dev_err(dev, "failed to register IRQ: %d\n", err); goto poweroff; } @@ -1336,6 +1345,7 @@ poweroff: static int tegra_pcie_put_resources(struct tegra_pcie *pcie) { + struct device *dev = pcie->dev; int err; if (pcie->irq > 0) @@ -1345,7 +1355,7 @@ static int tegra_pcie_put_resources(struct tegra_pcie *pcie) err = phy_exit(pcie->phy); if (err < 0) - dev_err(pcie->dev, "failed to teardown PHY: %d\n", err); + dev_err(dev, "failed to teardown PHY: %d\n", err); return 0; } @@ -1384,6 +1394,7 @@ static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq) static irqreturn_t tegra_pcie_msi_irq(int irq, void *data) { struct tegra_pcie *pcie = data; + struct device *dev = pcie->dev; struct tegra_msi *msi = &pcie->msi; unsigned int i, processed = 0; @@ -1403,13 +1414,13 @@ static irqreturn_t tegra_pcie_msi_irq(int irq, void *data) if (test_bit(index, msi->used)) generic_handle_irq(irq); else - dev_info(pcie->dev, "unhandled MSI\n"); + dev_info(dev, "unhandled MSI\n"); } else { /* * that's weird who triggered this? * just clear it */ - dev_info(pcie->dev, "unexpected MSI\n"); + dev_info(dev, "unexpected MSI\n"); } /* see if there's any more pending in this vector */ @@ -1488,7 +1499,8 @@ static const struct irq_domain_ops msi_domain_ops = { static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) { - struct platform_device *pdev = to_platform_device(pcie->dev); + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_msi *msi = &pcie->msi; unsigned long base; @@ -1497,20 +1509,20 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) mutex_init(&msi->lock); - msi->chip.dev = pcie->dev; + msi->chip.dev = dev; msi->chip.setup_irq = tegra_msi_setup_irq; msi->chip.teardown_irq = tegra_msi_teardown_irq; - msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR, + msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR, &msi_domain_ops, &msi->chip); if (!msi->domain) { - dev_err(&pdev->dev, "failed to create IRQ domain\n"); + dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; } err = platform_get_irq_byname(pdev, "msi"); if (err < 0) { - dev_err(&pdev->dev, "failed to get IRQ: %d\n", err); + dev_err(dev, "failed to get IRQ: %d\n", err); goto err; } @@ -1519,7 +1531,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD, tegra_msi_irq_chip.name, pcie); if (err < 0) { - dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); + dev_err(dev, "failed to request IRQ: %d\n", err); goto err; } @@ -1594,46 +1606,47 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes, u32 *xbar) { - struct device_node *np = pcie->dev->of_node; + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { switch (lanes) { case 0x0000104: - dev_info(pcie->dev, "4x1, 1x1 configuration\n"); + dev_info(dev, "4x1, 1x1 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1; return 0; case 0x0000102: - dev_info(pcie->dev, "2x1, 1x1 configuration\n"); + dev_info(dev, "2x1, 1x1 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1; return 0; } } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { switch (lanes) { case 0x00000204: - dev_info(pcie->dev, "4x1, 2x1 configuration\n"); + dev_info(dev, "4x1, 2x1 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420; return 0; case 0x00020202: - dev_info(pcie->dev, "2x3 configuration\n"); + dev_info(dev, "2x3 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222; return 0; case 0x00010104: - dev_info(pcie->dev, "4x1, 1x2 configuration\n"); + dev_info(dev, "4x1, 1x2 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411; return 0; } } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { switch (lanes) { case 0x00000004: - dev_info(pcie->dev, "single-mode configuration\n"); + dev_info(dev, "single-mode configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE; return 0; case 0x00000202: - dev_info(pcie->dev, "dual-mode configuration\n"); + dev_info(dev, "dual-mode configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL; return 0; } @@ -1673,7 +1686,8 @@ static bool of_regulator_bulk_available(struct device_node *np, */ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie) { - struct device_node *np = pcie->dev->of_node; + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) pcie->num_supplies = 3; @@ -1681,12 +1695,12 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie) pcie->num_supplies = 2; if (pcie->num_supplies == 0) { - dev_err(pcie->dev, "device %s not supported in legacy mode\n", + dev_err(dev, "device %s not supported in legacy mode\n", np->full_name); return -ENODEV; } - pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, + pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, sizeof(*pcie->supplies), GFP_KERNEL); if (!pcie->supplies) @@ -1698,8 +1712,7 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie) if (pcie->num_supplies > 2) pcie->supplies[2].supply = "avdd"; - return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies, - pcie->supplies); + return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies); } /* @@ -1713,13 +1726,14 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie) */ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) { - struct device_node *np = pcie->dev->of_node; + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; unsigned int i = 0; if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { pcie->num_supplies = 7; - pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, + pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, sizeof(*pcie->supplies), GFP_KERNEL); if (!pcie->supplies) @@ -1746,7 +1760,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) pcie->num_supplies = 4 + (need_pexa ? 2 : 0) + (need_pexb ? 2 : 0); - pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, + pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, sizeof(*pcie->supplies), GFP_KERNEL); if (!pcie->supplies) @@ -1769,7 +1783,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { pcie->num_supplies = 5; - pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, + pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, sizeof(*pcie->supplies), GFP_KERNEL); if (!pcie->supplies) @@ -1782,9 +1796,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) pcie->supplies[4].supply = "vddio-pex-clk"; } - if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies, + if (of_regulator_bulk_available(dev->of_node, pcie->supplies, pcie->num_supplies)) - return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies, + return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies); /* @@ -1792,9 +1806,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) * that the device tree complies with an older version of the device * tree binding. */ - dev_info(pcie->dev, "using legacy DT binding for power supplies\n"); + dev_info(dev, "using legacy DT binding for power supplies\n"); - devm_kfree(pcie->dev, pcie->supplies); + devm_kfree(dev, pcie->supplies); pcie->num_supplies = 0; return tegra_pcie_get_legacy_regulators(pcie); @@ -1802,7 +1816,8 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) { - struct device_node *np = pcie->dev->of_node, *port; + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node, *port; const struct tegra_pcie_soc *soc = pcie->soc; struct of_pci_range_parser parser; struct of_pci_range range; @@ -1812,7 +1827,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) int err; if (of_pci_range_parser_init(&parser, np)) { - dev_err(pcie->dev, "missing \"ranges\" property\n"); + dev_err(dev, "missing \"ranges\" property\n"); return -EINVAL; } @@ -1867,8 +1882,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) err = of_pci_parse_bus_range(np, &pcie->busn); if (err < 0) { - dev_err(pcie->dev, "failed to parse ranges property: %d\n", - err); + dev_err(dev, "failed to parse ranges property: %d\n", err); pcie->busn.name = np->name; pcie->busn.start = 0; pcie->busn.end = 0xff; @@ -1883,15 +1897,14 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) err = of_pci_get_devfn(port); if (err < 0) { - dev_err(pcie->dev, "failed to parse address: %d\n", - err); + dev_err(dev, "failed to parse address: %d\n", err); return err; } index = PCI_SLOT(err); if (index < 1 || index > soc->num_ports) { - dev_err(pcie->dev, "invalid port number: %d\n", index); + dev_err(dev, "invalid port number: %d\n", index); return -EINVAL; } @@ -1899,13 +1912,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) err = of_property_read_u32(port, "nvidia,num-lanes", &value); if (err < 0) { - dev_err(pcie->dev, "failed to parse # of lanes: %d\n", + dev_err(dev, "failed to parse # of lanes: %d\n", err); return err; } if (value > 16) { - dev_err(pcie->dev, "invalid # of lanes: %u\n", value); + dev_err(dev, "invalid # of lanes: %u\n", value); return -EINVAL; } @@ -1919,14 +1932,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) mask |= ((1 << value) - 1) << lane; lane += value; - rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL); + rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); if (!rp) return -ENOMEM; err = of_address_to_resource(port, 0, &rp->regs); if (err < 0) { - dev_err(pcie->dev, "failed to parse address: %d\n", - err); + dev_err(dev, "failed to parse address: %d\n", err); return err; } @@ -1936,7 +1948,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) rp->pcie = pcie; rp->np = port; - rp->base = devm_ioremap_resource(pcie->dev, &rp->regs); + rp->base = devm_ioremap_resource(dev, &rp->regs); if (IS_ERR(rp->base)) return PTR_ERR(rp->base); @@ -1945,7 +1957,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config); if (err < 0) { - dev_err(pcie->dev, "invalid lane configuration\n"); + dev_err(dev, "invalid lane configuration\n"); return err; } @@ -1964,6 +1976,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port) { + struct device *dev = port->pcie->dev; unsigned int retries = 3; unsigned long value; @@ -1986,8 +1999,7 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port) } while (--timeout); if (!timeout) { - dev_err(port->pcie->dev, "link %u down, retrying\n", - port->index); + dev_err(dev, "link %u down, retrying\n", port->index); goto retry; } @@ -2011,11 +2023,12 @@ retry: static int tegra_pcie_enable(struct tegra_pcie *pcie) { + struct device *dev = pcie->dev; struct tegra_pcie_port *port, *tmp; struct hw_pci hw; list_for_each_entry_safe(port, tmp, &pcie->ports, list) { - dev_info(pcie->dev, "probing port %u, using %u lanes\n", + dev_info(dev, "probing port %u, using %u lanes\n", port->index, port->lanes); tegra_pcie_port_enable(port); @@ -2023,7 +2036,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie) if (tegra_pcie_port_check_link(port)) continue; - dev_info(pcie->dev, "link %u down, ignoring\n", port->index); + dev_info(dev, "link %u down, ignoring\n", port->index); tegra_pcie_port_disable(port); tegra_pcie_port_free(port); @@ -2041,8 +2054,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie) hw.map_irq = tegra_pcie_map_irq; hw.ops = &tegra_pcie_ops; - pci_common_init_dev(pcie->dev, &hw); - + pci_common_init_dev(dev, &hw); return 0; } @@ -2204,17 +2216,18 @@ remove: static int tegra_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct tegra_pcie *pcie; int err; - pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; - pcie->soc = of_device_get_match_data(&pdev->dev); + pcie->soc = of_device_get_match_data(dev); INIT_LIST_HEAD(&pcie->buses); INIT_LIST_HEAD(&pcie->ports); - pcie->dev = &pdev->dev; + pcie->dev = dev; err = tegra_pcie_parse_dt(pcie); if (err < 0) @@ -2222,7 +2235,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) err = tegra_pcie_get_resources(pcie); if (err < 0) { - dev_err(&pdev->dev, "failed to request resources: %d\n", err); + dev_err(dev, "failed to request resources: %d\n", err); return err; } @@ -2236,27 +2249,23 @@ static int tegra_pcie_probe(struct platform_device *pdev) if (IS_ENABLED(CONFIG_PCI_MSI)) { err = tegra_pcie_enable_msi(pcie); if (err < 0) { - dev_err(&pdev->dev, - "failed to enable MSI support: %d\n", - err); + dev_err(dev, "failed to enable MSI support: %d\n", err); goto put_resources; } } err = tegra_pcie_enable(pcie); if (err < 0) { - dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err); + dev_err(dev, "failed to enable PCIe ports: %d\n", err); goto disable_msi; } if (IS_ENABLED(CONFIG_DEBUG_FS)) { err = tegra_pcie_debugfs_init(pcie); if (err < 0) - dev_err(&pdev->dev, "failed to setup debugfs: %d\n", - err); + dev_err(dev, "failed to setup debugfs: %d\n", err); } - platform_set_drvdata(pdev, pcie); return 0; disable_msi: diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index a81273c23341..1de23d74783f 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -76,6 +76,16 @@ struct xgene_pcie_port { u32 version; }; +static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg) +{ + return readl(port->csr_base + reg); +} + +static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val) +{ + writel(val, port->csr_base + reg); +} + static inline u32 pcie_bar_low_val(u32 addr, u32 flags) { return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; @@ -112,9 +122,9 @@ static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn) if (!pci_is_root_bus(bus)) rtdid_val = (b << 8) | (d << 3) | f; - writel(rtdid_val, port->csr_base + RTDID); + xgene_pcie_writel(port, RTDID, rtdid_val); /* read the register back to ensure flush */ - readl(port->csr_base + RTDID); + xgene_pcie_readl(port, RTDID); } /* @@ -179,28 +189,28 @@ static struct pci_ops xgene_pcie_ops = { .write = pci_generic_config_write32, }; -static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr, +static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr, u32 flags, u64 size) { u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags; u32 val32 = 0; u32 val; - val32 = readl(csr_base + addr); + val32 = xgene_pcie_readl(port, addr); val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16); - writel(val, csr_base + addr); + xgene_pcie_writel(port, addr, val); - val32 = readl(csr_base + addr + 0x04); + val32 = xgene_pcie_readl(port, addr + 0x04); val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16); - writel(val, csr_base + addr + 0x04); + xgene_pcie_writel(port, addr + 0x04, val); - val32 = readl(csr_base + addr + 0x04); + val32 = xgene_pcie_readl(port, addr + 0x04); val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16); - writel(val, csr_base + addr + 0x04); + xgene_pcie_writel(port, addr + 0x04, val); - val32 = readl(csr_base + addr + 0x08); + val32 = xgene_pcie_readl(port, addr + 0x08); val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16); - writel(val, csr_base + addr + 0x08); + xgene_pcie_writel(port, addr + 0x08, val); return mask; } @@ -208,32 +218,32 @@ static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr, static void xgene_pcie_linkup(struct xgene_pcie_port *port, u32 *lanes, u32 *speed) { - void __iomem *csr_base = port->csr_base; u32 val32; port->link_up = false; - val32 = readl(csr_base + PCIECORE_CTLANDSTATUS); + val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS); if (val32 & LINK_UP_MASK) { port->link_up = true; *speed = PIPE_PHY_RATE_RD(val32); - val32 = readl(csr_base + BRIDGE_STATUS_0); + val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0); *lanes = val32 >> 26; } } static int xgene_pcie_init_port(struct xgene_pcie_port *port) { + struct device *dev = port->dev; int rc; - port->clk = clk_get(port->dev, NULL); + port->clk = clk_get(dev, NULL); if (IS_ERR(port->clk)) { - dev_err(port->dev, "clock not available\n"); + dev_err(dev, "clock not available\n"); return -ENODEV; } rc = clk_prepare_enable(port->clk); if (rc) { - dev_err(port->dev, "clock enable failed\n"); + dev_err(dev, "clock enable failed\n"); return rc; } @@ -243,15 +253,16 @@ static int xgene_pcie_init_port(struct xgene_pcie_port *port) static int xgene_pcie_map_reg(struct xgene_pcie_port *port, struct platform_device *pdev) { + struct device *dev = port->dev; struct resource *res; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); - port->csr_base = devm_ioremap_resource(port->dev, res); + port->csr_base = devm_ioremap_resource(dev, res); if (IS_ERR(port->csr_base)) return PTR_ERR(port->csr_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); - port->cfg_base = devm_ioremap_resource(port->dev, res); + port->cfg_base = devm_ioremap_resource(dev, res); if (IS_ERR(port->cfg_base)) return PTR_ERR(port->cfg_base); port->cfg_addr = res->start; @@ -263,7 +274,7 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port, struct resource *res, u32 offset, u64 cpu_addr, u64 pci_addr) { - void __iomem *base = port->csr_base + offset; + struct device *dev = port->dev; resource_size_t size = resource_size(res); u64 restype = resource_type(res); u64 mask = 0; @@ -280,22 +291,24 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port, if (size >= min_size) mask = ~(size - 1) | flag; else - dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n", + dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n", (u64)size, min_size); - writel(lower_32_bits(cpu_addr), base); - writel(upper_32_bits(cpu_addr), base + 0x04); - writel(lower_32_bits(mask), base + 0x08); - writel(upper_32_bits(mask), base + 0x0c); - writel(lower_32_bits(pci_addr), base + 0x10); - writel(upper_32_bits(pci_addr), base + 0x14); + xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr)); + xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr)); + xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask)); + xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask)); + xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr)); + xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr)); } -static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr) +static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port) { - writel(lower_32_bits(addr), csr_base + CFGBARL); - writel(upper_32_bits(addr), csr_base + CFGBARH); - writel(EN_REG, csr_base + CFGCTL); + u64 addr = port->cfg_addr; + + xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr)); + xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr)); + xgene_pcie_writel(port, CFGCTL, EN_REG); } static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, @@ -310,7 +323,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, struct resource *res = window->res; u64 restype = resource_type(res); - dev_dbg(port->dev, "%pR\n", res); + dev_dbg(dev, "%pR\n", res); switch (restype) { case IORESOURCE_IO: @@ -339,17 +352,18 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, return -EINVAL; } } - xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr); - + xgene_pcie_setup_cfg_reg(port); return 0; } -static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size) +static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg, + u64 pim, u64 size) { - writel(lower_32_bits(pim), addr); - writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04); - writel(lower_32_bits(size), addr + 0x10); - writel(upper_32_bits(size), addr + 0x14); + xgene_pcie_writel(port, pim_reg, lower_32_bits(pim)); + xgene_pcie_writel(port, pim_reg + 0x04, + upper_32_bits(pim) | EN_COHERENCY); + xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size)); + xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size)); } /* @@ -379,10 +393,10 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, struct of_pci_range *range, u8 *ib_reg_mask) { - void __iomem *csr_base = port->csr_base; void __iomem *cfg_base = port->cfg_base; + struct device *dev = port->dev; void *bar_addr; - void *pim_addr; + u32 pim_reg; u64 cpu_addr = range->cpu_addr; u64 pci_addr = range->pci_addr; u64 size = range->size; @@ -393,7 +407,7 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size); if (region < 0) { - dev_warn(port->dev, "invalid pcie dma-range config\n"); + dev_warn(dev, "invalid pcie dma-range config\n"); return; } @@ -403,29 +417,27 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, bar_low = pcie_bar_low_val((u32)cpu_addr, flags); switch (region) { case 0: - xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size); + xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size); bar_addr = cfg_base + PCI_BASE_ADDRESS_0; writel(bar_low, bar_addr); writel(upper_32_bits(cpu_addr), bar_addr + 0x4); - pim_addr = csr_base + PIM1_1L; + pim_reg = PIM1_1L; break; case 1: - bar_addr = csr_base + IBAR2; - writel(bar_low, bar_addr); - writel(lower_32_bits(mask), csr_base + IR2MSK); - pim_addr = csr_base + PIM2_1L; + xgene_pcie_writel(port, IBAR2, bar_low); + xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask)); + pim_reg = PIM2_1L; break; case 2: - bar_addr = csr_base + IBAR3L; - writel(bar_low, bar_addr); - writel(upper_32_bits(cpu_addr), bar_addr + 0x4); - writel(lower_32_bits(mask), csr_base + IR3MSKL); - writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4); - pim_addr = csr_base + PIM3_1L; + xgene_pcie_writel(port, IBAR3L, bar_low); + xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr)); + xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask)); + xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask)); + pim_reg = PIM3_1L; break; } - xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1)); + xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1)); } static int pci_dma_range_parser_init(struct of_pci_range_parser *parser, @@ -463,7 +475,7 @@ static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port) for_each_of_pci_range(&parser, &range) { u64 end = range.cpu_addr + range.size - 1; - dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", range.flags, range.cpu_addr, end, range.pci_addr); xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask); } @@ -476,13 +488,14 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port) int i; for (i = PIM1_1L; i <= CFGCTL; i += 4) - writel(0x0, port->csr_base + i); + xgene_pcie_writel(port, i, 0); } static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res, resource_size_t io_base) { + struct device *dev = port->dev; u32 val, lanes = 0, speed = 0; int ret; @@ -490,7 +503,7 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, /* setup the vendor and device IDs correctly */ val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID; - writel(val, port->csr_base + BRIDGE_CFG_0); + xgene_pcie_writel(port, BRIDGE_CFG_0, val); ret = xgene_pcie_map_ranges(port, res, io_base); if (ret) @@ -502,27 +515,28 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, xgene_pcie_linkup(port, &lanes, &speed); if (!port->link_up) - dev_info(port->dev, "(rc) link down\n"); + dev_info(dev, "(rc) link down\n"); else - dev_info(port->dev, "(rc) x%d gen-%d link up\n", - lanes, speed + 1); + dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1); return 0; } static int xgene_pcie_probe_bridge(struct platform_device *pdev) { - struct device_node *dn = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct device_node *dn = dev->of_node; struct xgene_pcie_port *port; resource_size_t iobase = 0; struct pci_bus *bus; int ret; LIST_HEAD(res); - port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; - port->node = of_node_get(pdev->dev.of_node); - port->dev = &pdev->dev; + + port->node = of_node_get(dn); + port->dev = dev; port->version = XGENE_PCIE_IP_VER_UNKN; if (of_device_is_compatible(port->node, "apm,xgene-pcie")) @@ -540,7 +554,7 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) if (ret) return ret; - ret = devm_request_pci_bus_resources(&pdev->dev, &res); + ret = devm_request_pci_bus_resources(dev, &res); if (ret) goto error; @@ -548,8 +562,7 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) if (ret) goto error; - bus = pci_create_root_bus(&pdev->dev, 0, - &xgene_pcie_ops, port, &res); + bus = pci_create_root_bus(dev, 0, &xgene_pcie_ops, port, &res); if (!bus) { ret = -ENOMEM; goto error; @@ -558,8 +571,6 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); pci_bus_add_devices(bus); - - platform_set_drvdata(pdev, port); return 0; error: diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c index c24e96559cbb..b0ac4dfafa0b 100644 --- a/drivers/pci/host/pcie-altera.c +++ b/drivers/pci/host/pcie-altera.c @@ -55,15 +55,19 @@ #define TLP_PAYLOAD_SIZE 0x01 #define TLP_READ_TAG 0x1d #define TLP_WRITE_TAG 0x10 -#define TLP_CFG_DW0(fmttype) (((fmttype) << 24) | TLP_PAYLOAD_SIZE) -#define TLP_CFG_DW1(reqid, tag, be) (((reqid) << 16) | (tag << 8) | (be)) +#define RP_DEVFN 0 +#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) +#define TLP_CFG_DW0(pcie, bus) \ + ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \ + : TLP_FMTTYPE_CFGRD1) << 24) | \ + TLP_PAYLOAD_SIZE) +#define TLP_CFG_DW1(pcie, tag, be) \ + (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) #define TLP_CFG_DW2(bus, devfn, offset) \ (((bus) << 24) | ((devfn) << 16) | (offset)) -#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) #define TLP_COMP_STATUS(s) (((s) >> 12) & 7) #define TLP_HDR_SIZE 3 #define TLP_LOOP 500 -#define RP_DEVFN 0 #define LINK_UP_TIMEOUT HZ #define LINK_RETRAIN_TIMEOUT HZ @@ -74,7 +78,7 @@ struct altera_pcie { struct platform_device *pdev; - void __iomem *cra_base; + void __iomem *cra_base; /* DT Cra */ int irq; u8 root_bus_nr; struct irq_domain *irq_domain; @@ -131,7 +135,7 @@ static void tlp_write_tx(struct altera_pcie *pcie, cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); } -static bool altera_pcie_valid_config(struct altera_pcie *pcie, +static bool altera_pcie_valid_device(struct altera_pcie *pcie, struct pci_bus *bus, int dev) { /* If there is no link, then there is no device */ @@ -218,13 +222,8 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, { u32 headers[TLP_HDR_SIZE]; - if (bus == pcie->root_bus_nr) - headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD0); - else - headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1); - - headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN), - TLP_READ_TAG, byte_en); + headers[0] = TLP_CFG_DW0(pcie, bus); + headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); headers[2] = TLP_CFG_DW2(bus, devfn, where); tlp_write_packet(pcie, headers, 0, false); @@ -238,13 +237,8 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, u32 headers[TLP_HDR_SIZE]; int ret; - if (bus == pcie->root_bus_nr) - headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR0); - else - headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1); - - headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN), - TLP_WRITE_TAG, byte_en); + headers[0] = TLP_CFG_DW0(pcie, bus); + headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en); headers[2] = TLP_CFG_DW2(bus, devfn, where); /* check alignment to Qword */ @@ -342,7 +336,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, if (altera_pcie_hide_rc_bar(bus, devfn, where)) return PCIBIOS_BAD_REGISTER_NUMBER; - if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) { + if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) { *value = 0xffffffff; return PCIBIOS_DEVICE_NOT_FOUND; } @@ -359,7 +353,7 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, if (altera_pcie_hide_rc_bar(bus, devfn, where)) return PCIBIOS_BAD_REGISTER_NUMBER; - if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) + if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size, @@ -394,6 +388,7 @@ static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno, static void altera_wait_link_retrain(struct altera_pcie *pcie) { + struct device *dev = &pcie->pdev->dev; u16 reg16; unsigned long start_jiffies; @@ -406,7 +401,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie) break; if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) { - dev_err(&pcie->pdev->dev, "link retrain timeout\n"); + dev_err(dev, "link retrain timeout\n"); break; } udelay(100); @@ -419,7 +414,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie) break; if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) { - dev_err(&pcie->pdev->dev, "link up timeout\n"); + dev_err(dev, "link up timeout\n"); break; } udelay(100); @@ -460,7 +455,6 @@ static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq, { irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); - return 0; } @@ -472,12 +466,14 @@ static void altera_pcie_isr(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct altera_pcie *pcie; + struct device *dev; unsigned long status; u32 bit; u32 virq; chained_irq_enter(chip, desc); pcie = irq_desc_get_handler_data(desc); + dev = &pcie->pdev->dev; while ((status = cra_readl(pcie, P2A_INT_STATUS) & P2A_INT_STS_ALL) != 0) { @@ -489,8 +485,7 @@ static void altera_pcie_isr(struct irq_desc *desc) if (virq) generic_handle_irq(virq); else - dev_err(&pcie->pdev->dev, - "unexpected IRQ, INT%d\n", bit); + dev_err(dev, "unexpected IRQ, INT%d\n", bit); } } @@ -549,30 +544,25 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) static int altera_pcie_parse_dt(struct altera_pcie *pcie) { - struct resource *cra; + struct device *dev = &pcie->pdev->dev; struct platform_device *pdev = pcie->pdev; + struct resource *cra; cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra"); - if (!cra) { - dev_err(&pdev->dev, "no Cra memory resource defined\n"); - return -ENODEV; - } - - pcie->cra_base = devm_ioremap_resource(&pdev->dev, cra); + pcie->cra_base = devm_ioremap_resource(dev, cra); if (IS_ERR(pcie->cra_base)) { - dev_err(&pdev->dev, "failed to map cra memory\n"); + dev_err(dev, "failed to map cra memory\n"); return PTR_ERR(pcie->cra_base); } /* setup IRQ */ pcie->irq = platform_get_irq(pdev, 0); if (pcie->irq <= 0) { - dev_err(&pdev->dev, "failed to get IRQ: %d\n", pcie->irq); + dev_err(dev, "failed to get IRQ: %d\n", pcie->irq); return -EINVAL; } irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); - return 0; } @@ -583,12 +573,13 @@ static void altera_pcie_host_init(struct altera_pcie *pcie) static int altera_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct altera_pcie *pcie; struct pci_bus *bus; struct pci_bus *child; int ret; - pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; @@ -596,7 +587,7 @@ static int altera_pcie_probe(struct platform_device *pdev) ret = altera_pcie_parse_dt(pcie); if (ret) { - dev_err(&pdev->dev, "Parsing DT failed\n"); + dev_err(dev, "Parsing DT failed\n"); return ret; } @@ -604,13 +595,13 @@ static int altera_pcie_probe(struct platform_device *pdev) ret = altera_pcie_parse_request_of_pci_ranges(pcie); if (ret) { - dev_err(&pdev->dev, "Failed add resources\n"); + dev_err(dev, "Failed add resources\n"); return ret; } ret = altera_pcie_init_irq_domain(pcie); if (ret) { - dev_err(&pdev->dev, "Failed creating IRQ Domain\n"); + dev_err(dev, "Failed creating IRQ Domain\n"); return ret; } @@ -620,7 +611,7 @@ static int altera_pcie_probe(struct platform_device *pdev) cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); altera_pcie_host_init(pcie); - bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops, + bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &altera_pcie_ops, pcie, &pcie->resources); if (!bus) return -ENOMEM; @@ -633,8 +624,6 @@ static int altera_pcie_probe(struct platform_device *pdev) pcie_bus_configure_settings(child); pci_bus_add_devices(bus); - - platform_set_drvdata(pdev, pcie); return ret; } diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/host/pcie-armada8k.c index 0f4f570068e3..0ac0f18690f2 100644 --- a/drivers/pci/host/pcie-armada8k.c +++ b/drivers/pci/host/pcie-armada8k.c @@ -29,34 +29,33 @@ #include "pcie-designware.h" struct armada8k_pcie { - void __iomem *base; + struct pcie_port pp; /* pp.dbi_base is DT ctrl */ struct clk *clk; - struct pcie_port pp; }; #define PCIE_VENDOR_REGS_OFFSET 0x8000 -#define PCIE_GLOBAL_CONTROL_REG 0x0 +#define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0) #define PCIE_APP_LTSSM_EN BIT(2) #define PCIE_DEVICE_TYPE_SHIFT 4 #define PCIE_DEVICE_TYPE_MASK 0xF #define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */ -#define PCIE_GLOBAL_STATUS_REG 0x8 +#define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8) #define PCIE_GLB_STS_RDLH_LINK_UP BIT(1) #define PCIE_GLB_STS_PHY_LINK_UP BIT(9) -#define PCIE_GLOBAL_INT_CAUSE1_REG 0x1C -#define PCIE_GLOBAL_INT_MASK1_REG 0x20 +#define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C) +#define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20) #define PCIE_INT_A_ASSERT_MASK BIT(9) #define PCIE_INT_B_ASSERT_MASK BIT(10) #define PCIE_INT_C_ASSERT_MASK BIT(11) #define PCIE_INT_D_ASSERT_MASK BIT(12) -#define PCIE_ARCACHE_TRC_REG 0x50 -#define PCIE_AWCACHE_TRC_REG 0x54 -#define PCIE_ARUSER_REG 0x5C -#define PCIE_AWUSER_REG 0x60 +#define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50) +#define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54) +#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C) +#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60) /* * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write * allocate @@ -72,11 +71,10 @@ struct armada8k_pcie { static int armada8k_pcie_link_up(struct pcie_port *pp) { - struct armada8k_pcie *pcie = to_armada8k_pcie(pp); u32 reg; u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; - reg = readl(pcie->base + PCIE_GLOBAL_STATUS_REG); + reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_STATUS_REG); if ((reg & mask) == mask) return 1; @@ -85,51 +83,50 @@ static int armada8k_pcie_link_up(struct pcie_port *pp) return 0; } -static void armada8k_pcie_establish_link(struct pcie_port *pp) +static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie) { - struct armada8k_pcie *pcie = to_armada8k_pcie(pp); - void __iomem *base = pcie->base; + struct pcie_port *pp = &pcie->pp; u32 reg; if (!dw_pcie_link_up(pp)) { /* Disable LTSSM state machine to enable configuration */ - reg = readl(base + PCIE_GLOBAL_CONTROL_REG); + reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG); reg &= ~(PCIE_APP_LTSSM_EN); - writel(reg, base + PCIE_GLOBAL_CONTROL_REG); + dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg); } /* Set the device to root complex mode */ - reg = readl(base + PCIE_GLOBAL_CONTROL_REG); + reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG); reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT); reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT; - writel(reg, base + PCIE_GLOBAL_CONTROL_REG); + dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg); /* Set the PCIe master AxCache attributes */ - writel(ARCACHE_DEFAULT_VALUE, base + PCIE_ARCACHE_TRC_REG); - writel(AWCACHE_DEFAULT_VALUE, base + PCIE_AWCACHE_TRC_REG); + dw_pcie_writel_rc(pp, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE); + dw_pcie_writel_rc(pp, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE); /* Set the PCIe master AxDomain attributes */ - reg = readl(base + PCIE_ARUSER_REG); + reg = dw_pcie_readl_rc(pp, PCIE_ARUSER_REG); reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; - writel(reg, base + PCIE_ARUSER_REG); + dw_pcie_writel_rc(pp, PCIE_ARUSER_REG, reg); - reg = readl(base + PCIE_AWUSER_REG); + reg = dw_pcie_readl_rc(pp, PCIE_AWUSER_REG); reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; - writel(reg, base + PCIE_AWUSER_REG); + dw_pcie_writel_rc(pp, PCIE_AWUSER_REG, reg); /* Enable INT A-D interrupts */ - reg = readl(base + PCIE_GLOBAL_INT_MASK1_REG); + reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_MASK1_REG); reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK | PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK; - writel(reg, base + PCIE_GLOBAL_INT_MASK1_REG); + dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_MASK1_REG, reg); if (!dw_pcie_link_up(pp)) { /* Configuration done. Start LTSSM */ - reg = readl(base + PCIE_GLOBAL_CONTROL_REG); + reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG); reg |= PCIE_APP_LTSSM_EN; - writel(reg, base + PCIE_GLOBAL_CONTROL_REG); + dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg); } /* Wait until the link becomes active again */ @@ -139,15 +136,16 @@ static void armada8k_pcie_establish_link(struct pcie_port *pp) static void armada8k_pcie_host_init(struct pcie_port *pp) { + struct armada8k_pcie *pcie = to_armada8k_pcie(pp); + dw_pcie_setup_rc(pp); - armada8k_pcie_establish_link(pp); + armada8k_pcie_establish_link(pcie); } static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) { - struct pcie_port *pp = arg; - struct armada8k_pcie *pcie = to_armada8k_pcie(pp); - void __iomem *base = pcie->base; + struct armada8k_pcie *pcie = arg; + struct pcie_port *pp = &pcie->pp; u32 val; /* @@ -155,8 +153,8 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) * PCI device. However, they are also latched into the PCIe * controller, so we simply discard them. */ - val = readl(base + PCIE_GLOBAL_INT_CAUSE1_REG); - writel(val, base + PCIE_GLOBAL_INT_CAUSE1_REG); + val = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG); + dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG, val); return IRQ_HANDLED; } @@ -166,9 +164,10 @@ static struct pcie_host_ops armada8k_pcie_host_ops = { .host_init = armada8k_pcie_host_init, }; -static int armada8k_add_pcie_port(struct pcie_port *pp, +static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, struct platform_device *pdev) { + struct pcie_port *pp = &pcie->pp; struct device *dev = &pdev->dev; int ret; @@ -182,7 +181,7 @@ static int armada8k_add_pcie_port(struct pcie_port *pp, } ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, - IRQF_SHARED, "armada8k-pcie", pp); + IRQF_SHARED, "armada8k-pcie", pcie); if (ret) { dev_err(dev, "failed to request irq %d\n", pp->irq); return ret; @@ -217,7 +216,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev) pp = &pcie->pp; pp->dev = dev; - platform_set_drvdata(pdev, pcie); /* Get the dw-pcie unit configuration/control registers base. */ base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); @@ -228,9 +226,7 @@ static int armada8k_pcie_probe(struct platform_device *pdev) goto fail; } - pcie->base = pp->dbi_base + PCIE_VENDOR_REGS_OFFSET; - - ret = armada8k_add_pcie_port(pp, pdev); + ret = armada8k_add_pcie_port(pcie, pdev); if (ret) goto fail; diff --git a/drivers/pci/host/pcie-artpec6.c b/drivers/pci/host/pcie-artpec6.c index 39bf1a6df463..212786b27f1a 100644 --- a/drivers/pci/host/pcie-artpec6.c +++ b/drivers/pci/host/pcie-artpec6.c @@ -27,9 +27,9 @@ #define to_artpec6_pcie(x) container_of(x, struct artpec6_pcie, pp) struct artpec6_pcie { - struct pcie_port pp; - struct regmap *regmap; - void __iomem *phy_base; + struct pcie_port pp; /* pp.dbi_base is DT dbi */ + struct regmap *regmap; /* DT axis,syscon-pcie */ + void __iomem *phy_base; /* DT phy */ }; /* PCIe Port Logic registers (memory-mapped) */ @@ -65,18 +65,31 @@ struct artpec6_pcie { #define ARTPEC6_CPU_TO_BUS_ADDR 0x0fffffff -static int artpec6_pcie_establish_link(struct pcie_port *pp) +static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset) { - struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp); + u32 val; + + regmap_read(artpec6_pcie->regmap, offset, &val); + return val; +} + +static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val) +{ + regmap_write(artpec6_pcie->regmap, offset, val); +} + +static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie) +{ + struct pcie_port *pp = &artpec6_pcie->pp; u32 val; unsigned int retries; /* Hold DW core in reset */ - regmap_read(artpec6_pcie->regmap, PCIECFG, &val); + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); val |= PCIECFG_CORE_RESET_REQ; - regmap_write(artpec6_pcie->regmap, PCIECFG, val); + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); - regmap_read(artpec6_pcie->regmap, PCIECFG, &val); + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ PCIECFG_MODE_TX_DRV_EN | PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */ @@ -84,27 +97,27 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp) val |= PCIECFG_REFCLK_ENABLE; val &= ~PCIECFG_DBG_OEN; val &= ~PCIECFG_CLKREQ_B; - regmap_write(artpec6_pcie->regmap, PCIECFG, val); + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); usleep_range(5000, 6000); - regmap_read(artpec6_pcie->regmap, NOCCFG, &val); + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); val |= NOCCFG_ENABLE_CLK_PCIE; - regmap_write(artpec6_pcie->regmap, NOCCFG, val); + artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); usleep_range(20, 30); - regmap_read(artpec6_pcie->regmap, PCIECFG, &val); + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE; - regmap_write(artpec6_pcie->regmap, PCIECFG, val); + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); usleep_range(6000, 7000); - regmap_read(artpec6_pcie->regmap, NOCCFG, &val); + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); val &= ~NOCCFG_POWER_PCIE_IDLEREQ; - regmap_write(artpec6_pcie->regmap, NOCCFG, val); + artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); retries = 50; do { usleep_range(1000, 2000); - regmap_read(artpec6_pcie->regmap, NOCCFG, &val); + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); retries--; } while (retries && (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); @@ -117,16 +130,16 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp) } while (retries && !(val & PHY_COSPLLLOCK)); /* Take DW core out of reset */ - regmap_read(artpec6_pcie->regmap, PCIECFG, &val); + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); val &= ~PCIECFG_CORE_RESET_REQ; - regmap_write(artpec6_pcie->regmap, PCIECFG, val); + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); usleep_range(100, 200); /* * Enable writing to config regs. This is required as the Synopsys * driver changes the class code. That register needs DBI write enable. */ - writel(DBI_RO_WR_EN, pp->dbi_base + MISC_CONTROL_1_OFF); + dw_pcie_writel_rc(pp, MISC_CONTROL_1_OFF, DBI_RO_WR_EN); pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR; pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR; @@ -137,78 +150,69 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp) dw_pcie_setup_rc(pp); /* assert LTSSM enable */ - regmap_read(artpec6_pcie->regmap, PCIECFG, &val); + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); val |= PCIECFG_LTSSM_ENABLE; - regmap_write(artpec6_pcie->regmap, PCIECFG, val); + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); /* check if the link is up or not */ if (!dw_pcie_wait_for_link(pp)) return 0; dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", - readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), - readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); + dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0), + dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1)); return -ETIMEDOUT; } -static void artpec6_pcie_enable_interrupts(struct pcie_port *pp) +static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie) { + struct pcie_port *pp = &artpec6_pcie->pp; + if (IS_ENABLED(CONFIG_PCI_MSI)) dw_pcie_msi_init(pp); } static void artpec6_pcie_host_init(struct pcie_port *pp) { - artpec6_pcie_establish_link(pp); - artpec6_pcie_enable_interrupts(pp); -} - -static int artpec6_pcie_link_up(struct pcie_port *pp) -{ - u32 rc; - - /* - * Get status from Synopsys IP - * link is debug bit 36, debug register 1 starts at bit 32 - */ - rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32)); - if (rc) - return 1; + struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp); - return 0; + artpec6_pcie_establish_link(artpec6_pcie); + artpec6_pcie_enable_interrupts(artpec6_pcie); } static struct pcie_host_ops artpec6_pcie_host_ops = { - .link_up = artpec6_pcie_link_up, .host_init = artpec6_pcie_host_init, }; static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg) { - struct pcie_port *pp = arg; + struct artpec6_pcie *artpec6_pcie = arg; + struct pcie_port *pp = &artpec6_pcie->pp; return dw_handle_msi_irq(pp); } -static int artpec6_add_pcie_port(struct pcie_port *pp, +static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, struct platform_device *pdev) { + struct pcie_port *pp = &artpec6_pcie->pp; + struct device *dev = pp->dev; int ret; if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); if (pp->msi_irq <= 0) { - dev_err(&pdev->dev, "failed to get MSI irq\n"); + dev_err(dev, "failed to get MSI irq\n"); return -ENODEV; } - ret = devm_request_irq(&pdev->dev, pp->msi_irq, + ret = devm_request_irq(dev, pp->msi_irq, artpec6_pcie_msi_handler, IRQF_SHARED | IRQF_NO_THREAD, - "artpec6-pcie-msi", pp); + "artpec6-pcie-msi", artpec6_pcie); if (ret) { - dev_err(&pdev->dev, "failed to request MSI irq\n"); + dev_err(dev, "failed to request MSI irq\n"); return ret; } } @@ -218,7 +222,7 @@ static int artpec6_add_pcie_port(struct pcie_port *pp, ret = dw_pcie_host_init(pp); if (ret) { - dev_err(&pdev->dev, "failed to initialize host\n"); + dev_err(dev, "failed to initialize host\n"); return ret; } @@ -227,41 +231,40 @@ static int artpec6_add_pcie_port(struct pcie_port *pp, static int artpec6_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct artpec6_pcie *artpec6_pcie; struct pcie_port *pp; struct resource *dbi_base; struct resource *phy_base; int ret; - artpec6_pcie = devm_kzalloc(&pdev->dev, sizeof(*artpec6_pcie), - GFP_KERNEL); + artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL); if (!artpec6_pcie) return -ENOMEM; pp = &artpec6_pcie->pp; - pp->dev = &pdev->dev; + pp->dev = dev; dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base); + pp->dbi_base = devm_ioremap_resource(dev, dbi_base); if (IS_ERR(pp->dbi_base)) return PTR_ERR(pp->dbi_base); phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - artpec6_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base); + artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base); if (IS_ERR(artpec6_pcie->phy_base)) return PTR_ERR(artpec6_pcie->phy_base); artpec6_pcie->regmap = - syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + syscon_regmap_lookup_by_phandle(dev->of_node, "axis,syscon-pcie"); if (IS_ERR(artpec6_pcie->regmap)) return PTR_ERR(artpec6_pcie->regmap); - ret = artpec6_add_pcie_port(pp, pdev); + ret = artpec6_add_pcie_port(artpec6_pcie, pdev); if (ret < 0) return ret; - platform_set_drvdata(pdev, artpec6_pcie); return 0; } diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c index 17da005497a5..537f58a664fa 100644 --- a/drivers/pci/host/pcie-designware-plat.c +++ b/drivers/pci/host/pcie-designware-plat.c @@ -25,8 +25,7 @@ #include "pcie-designware.h" struct dw_plat_pcie { - void __iomem *mem_base; - struct pcie_port pp; + struct pcie_port pp; /* pp.dbi_base is DT 0th resource */ }; static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg) @@ -52,6 +51,7 @@ static struct pcie_host_ops dw_plat_pcie_host_ops = { static int dw_plat_add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) { + struct device *dev = pp->dev; int ret; pp->irq = platform_get_irq(pdev, 1); @@ -63,11 +63,11 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp, if (pp->msi_irq < 0) return pp->msi_irq; - ret = devm_request_irq(&pdev->dev, pp->msi_irq, + ret = devm_request_irq(dev, pp->msi_irq, dw_plat_pcie_msi_irq_handler, IRQF_SHARED, "dw-plat-pcie-msi", pp); if (ret) { - dev_err(&pdev->dev, "failed to request MSI IRQ\n"); + dev_err(dev, "failed to request MSI IRQ\n"); return ret; } } @@ -77,7 +77,7 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp, ret = dw_pcie_host_init(pp); if (ret) { - dev_err(&pdev->dev, "failed to initialize host\n"); + dev_err(dev, "failed to initialize host\n"); return ret; } @@ -86,31 +86,28 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp, static int dw_plat_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct dw_plat_pcie *dw_plat_pcie; struct pcie_port *pp; struct resource *res; /* Resource from DT */ int ret; - dw_plat_pcie = devm_kzalloc(&pdev->dev, sizeof(*dw_plat_pcie), - GFP_KERNEL); + dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL); if (!dw_plat_pcie) return -ENOMEM; pp = &dw_plat_pcie->pp; - pp->dev = &pdev->dev; + pp->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dw_plat_pcie->mem_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dw_plat_pcie->mem_base)) - return PTR_ERR(dw_plat_pcie->mem_base); - - pp->dbi_base = dw_plat_pcie->mem_base; + pp->dbi_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pp->dbi_base)) + return PTR_ERR(pp->dbi_base); ret = dw_plat_add_pcie_port(pp, pdev); if (ret < 0) return ret; - platform_set_drvdata(pdev, dw_plat_pcie); return 0; } diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 74da71ea544a..035f50c03281 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -141,41 +141,35 @@ int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val) return PCIBIOS_SUCCESSFUL; } -static inline u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg) +u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg) { if (pp->ops->readl_rc) - return pp->ops->readl_rc(pp, pp->dbi_base + reg); + return pp->ops->readl_rc(pp, reg); return readl(pp->dbi_base + reg); } -static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg) +void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val) { if (pp->ops->writel_rc) - pp->ops->writel_rc(pp, val, pp->dbi_base + reg); + pp->ops->writel_rc(pp, reg, val); else writel(val, pp->dbi_base + reg); } -static inline u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg) +static u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg) { u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); - if (pp->ops->readl_rc) - return pp->ops->readl_rc(pp, pp->dbi_base + offset + reg); - - return readl(pp->dbi_base + offset + reg); + return dw_pcie_readl_rc(pp, offset + reg); } -static inline void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index, - u32 val, u32 reg) +static void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index, u32 reg, + u32 val) { u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); - if (pp->ops->writel_rc) - pp->ops->writel_rc(pp, val, pp->dbi_base + offset + reg); - else - writel(val, pp->dbi_base + offset + reg); + dw_pcie_writel_rc(pp, offset + reg, val); } static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, @@ -202,35 +196,35 @@ static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, u32 retries, val; if (pp->iatu_unroll_enabled) { - dw_pcie_writel_unroll(pp, index, - lower_32_bits(cpu_addr), PCIE_ATU_UNR_LOWER_BASE); - dw_pcie_writel_unroll(pp, index, - upper_32_bits(cpu_addr), PCIE_ATU_UNR_UPPER_BASE); - dw_pcie_writel_unroll(pp, index, - lower_32_bits(cpu_addr + size - 1), PCIE_ATU_UNR_LIMIT); - dw_pcie_writel_unroll(pp, index, - lower_32_bits(pci_addr), PCIE_ATU_UNR_LOWER_TARGET); - dw_pcie_writel_unroll(pp, index, - upper_32_bits(pci_addr), PCIE_ATU_UNR_UPPER_TARGET); - dw_pcie_writel_unroll(pp, index, - type, PCIE_ATU_UNR_REGION_CTRL1); - dw_pcie_writel_unroll(pp, index, - PCIE_ATU_ENABLE, PCIE_ATU_UNR_REGION_CTRL2); + dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_BASE, + lower_32_bits(cpu_addr)); + dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_BASE, + upper_32_bits(cpu_addr)); + dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LIMIT, + lower_32_bits(cpu_addr + size - 1)); + dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_TARGET, + lower_32_bits(pci_addr)); + dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_TARGET, + upper_32_bits(pci_addr)); + dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL1, + type); + dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_ENABLE); } else { - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), - PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), - PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1), - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), - PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), - PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); + dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT, + PCIE_ATU_REGION_OUTBOUND | index); + dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_BASE, + lower_32_bits(cpu_addr)); + dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_BASE, + upper_32_bits(cpu_addr)); + dw_pcie_writel_rc(pp, PCIE_ATU_LIMIT, + lower_32_bits(cpu_addr + size - 1)); + dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_TARGET, + lower_32_bits(pci_addr)); + dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_TARGET, + upper_32_bits(pci_addr)); + dw_pcie_writel_rc(pp, PCIE_ATU_CR1, type); + dw_pcie_writel_rc(pp, PCIE_ATU_CR2, PCIE_ATU_ENABLE); } /* @@ -760,8 +754,8 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, return ret; } -static int dw_pcie_valid_config(struct pcie_port *pp, - struct pci_bus *bus, int dev) +static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, + int dev) { /* If there is no link, then there is no device */ if (bus->number != pp->root_bus_nr) { @@ -781,7 +775,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, { struct pcie_port *pp = bus->sysdata; - if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { + if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) { *val = 0xffffffff; return PCIBIOS_DEVICE_NOT_FOUND; } @@ -797,7 +791,7 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, { struct pcie_port *pp = bus->sysdata; - if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) + if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; if (bus->number == pp->root_bus_nr) @@ -835,7 +829,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes); return; } - dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL); + dw_pcie_writel_rc(pp, PCIE_PORT_LINK_CONTROL, val); /* set link width speed control register */ val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL); @@ -854,30 +848,30 @@ void dw_pcie_setup_rc(struct pcie_port *pp) val |= PORT_LOGIC_LINK_WIDTH_8_LANES; break; } - dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL); + dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, val); /* setup RC BARs */ - dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0); - dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1); + dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0x00000004); + dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0x00000000); /* setup interrupt pins */ val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE); val &= 0xffff00ff; val |= 0x00000100; - dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE); + dw_pcie_writel_rc(pp, PCI_INTERRUPT_LINE, val); /* setup bus numbers */ val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS); val &= 0xff000000; val |= 0x00010100; - dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS); + dw_pcie_writel_rc(pp, PCI_PRIMARY_BUS, val); /* setup command register */ val = dw_pcie_readl_rc(pp, PCI_COMMAND); val &= 0xffff0000; val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_SERR; - dw_pcie_writel_rc(pp, val, PCI_COMMAND); + dw_pcie_writel_rc(pp, PCI_COMMAND, val); /* * If the platform provides ->rd_other_conf, it means the platform diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h index c8e5bc647f49..a567ea288ee2 100644 --- a/drivers/pci/host/pcie-designware.h +++ b/drivers/pci/host/pcie-designware.h @@ -54,9 +54,8 @@ struct pcie_port { }; struct pcie_host_ops { - u32 (*readl_rc)(struct pcie_port *pp, void __iomem *dbi_base); - void (*writel_rc)(struct pcie_port *pp, - u32 val, void __iomem *dbi_base); + u32 (*readl_rc)(struct pcie_port *pp, u32 reg); + void (*writel_rc)(struct pcie_port *pp, u32 reg, u32 val); int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val); int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus, @@ -73,6 +72,8 @@ struct pcie_host_ops { int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip); }; +u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg); +void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val); int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val); int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val); irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c index 7ee9dfcc45fb..56154c25980c 100644 --- a/drivers/pci/host/pcie-hisi.c +++ b/drivers/pci/host/pcie-hisi.c @@ -22,51 +22,38 @@ #include "pcie-designware.h" -#define PCIE_LTSSM_LINKUP_STATE 0x11 -#define PCIE_LTSSM_STATE_MASK 0x3F -#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 -#define PCIE_SYS_STATE4 0x31c -#define PCIE_HIP06_CTRL_OFF 0x1000 +#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 +#define PCIE_HIP06_CTRL_OFF 0x1000 +#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c) +#define PCIE_LTSSM_LINKUP_STATE 0x11 +#define PCIE_LTSSM_STATE_MASK 0x3F #define to_hisi_pcie(x) container_of(x, struct hisi_pcie, pp) struct hisi_pcie; struct pcie_soc_ops { - int (*hisi_pcie_link_up)(struct hisi_pcie *pcie); + int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie); }; struct hisi_pcie { + struct pcie_port pp; /* pp.dbi_base is DT rc_dbi */ struct regmap *subctrl; - void __iomem *reg_base; u32 port_id; - struct pcie_port pp; struct pcie_soc_ops *soc_ops; }; -static inline void hisi_pcie_apb_writel(struct hisi_pcie *pcie, - u32 val, u32 reg) -{ - writel(val, pcie->reg_base + reg); -} - -static inline u32 hisi_pcie_apb_readl(struct hisi_pcie *pcie, u32 reg) -{ - return readl(pcie->reg_base + reg); -} - /* HipXX PCIe host only supports 32-bit config access */ static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, u32 *val) { u32 reg; u32 reg_val; - struct hisi_pcie *pcie = to_hisi_pcie(pp); void *walker = ®_val; walker += (where & 0x3); reg = where & ~0x3; - reg_val = hisi_pcie_apb_readl(pcie, reg); + reg_val = dw_pcie_readl_rc(pp, reg); if (size == 1) *val = *(u8 __force *) walker; @@ -86,21 +73,20 @@ static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, { u32 reg_val; u32 reg; - struct hisi_pcie *pcie = to_hisi_pcie(pp); void *walker = ®_val; walker += (where & 0x3); reg = where & ~0x3; if (size == 4) - hisi_pcie_apb_writel(pcie, val, reg); + dw_pcie_writel_rc(pp, reg, val); else if (size == 2) { - reg_val = hisi_pcie_apb_readl(pcie, reg); + reg_val = dw_pcie_readl_rc(pp, reg); *(u16 __force *) walker = val; - hisi_pcie_apb_writel(pcie, reg_val, reg); + dw_pcie_writel_rc(pp, reg, reg_val); } else if (size == 1) { - reg_val = hisi_pcie_apb_readl(pcie, reg); + reg_val = dw_pcie_readl_rc(pp, reg); *(u8 __force *) walker = val; - hisi_pcie_apb_writel(pcie, reg_val, reg); + dw_pcie_writel_rc(pp, reg, reg_val); } else return PCIBIOS_BAD_REGISTER_NUMBER; @@ -119,10 +105,10 @@ static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie) static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie) { + struct pcie_port *pp = &hisi_pcie->pp; u32 val; - val = hisi_pcie_apb_readl(hisi_pcie, PCIE_HIP06_CTRL_OFF + - PCIE_SYS_STATE4); + val = dw_pcie_readl_rc(pp, PCIE_SYS_STATE4); return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); } @@ -140,19 +126,20 @@ static struct pcie_host_ops hisi_pcie_host_ops = { .link_up = hisi_pcie_link_up, }; -static int hisi_add_pcie_port(struct pcie_port *pp, - struct platform_device *pdev) +static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie, + struct platform_device *pdev) { + struct pcie_port *pp = &hisi_pcie->pp; + struct device *dev = pp->dev; int ret; u32 port_id; - struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp); - if (of_property_read_u32(pdev->dev.of_node, "port-id", &port_id)) { - dev_err(&pdev->dev, "failed to read port-id\n"); + if (of_property_read_u32(dev->of_node, "port-id", &port_id)) { + dev_err(dev, "failed to read port-id\n"); return -EINVAL; } if (port_id > 3) { - dev_err(&pdev->dev, "Invalid port-id: %d\n", port_id); + dev_err(dev, "Invalid port-id: %d\n", port_id); return -EINVAL; } hisi_pcie->port_id = port_id; @@ -161,7 +148,7 @@ static int hisi_add_pcie_port(struct pcie_port *pp, ret = dw_pcie_host_init(pp); if (ret) { - dev_err(&pdev->dev, "failed to initialize host\n"); + dev_err(dev, "failed to initialize host\n"); return ret; } @@ -170,6 +157,7 @@ static int hisi_add_pcie_port(struct pcie_port *pp, static int hisi_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct hisi_pcie *hisi_pcie; struct pcie_port *pp; const struct of_device_id *match; @@ -177,40 +165,36 @@ static int hisi_pcie_probe(struct platform_device *pdev) struct device_driver *driver; int ret; - hisi_pcie = devm_kzalloc(&pdev->dev, sizeof(*hisi_pcie), GFP_KERNEL); + hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL); if (!hisi_pcie) return -ENOMEM; pp = &hisi_pcie->pp; - pp->dev = &pdev->dev; - driver = (pdev->dev).driver; + pp->dev = dev; + driver = dev->driver; - match = of_match_device(driver->of_match_table, &pdev->dev); + match = of_match_device(driver->of_match_table, dev); hisi_pcie->soc_ops = (struct pcie_soc_ops *) match->data; hisi_pcie->subctrl = syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl"); if (IS_ERR(hisi_pcie->subctrl)) { - dev_err(pp->dev, "cannot get subctrl base\n"); + dev_err(dev, "cannot get subctrl base\n"); return PTR_ERR(hisi_pcie->subctrl); } reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); - hisi_pcie->reg_base = devm_ioremap_resource(&pdev->dev, reg); - if (IS_ERR(hisi_pcie->reg_base)) { - dev_err(pp->dev, "cannot get rc_dbi base\n"); - return PTR_ERR(hisi_pcie->reg_base); + pp->dbi_base = devm_ioremap_resource(dev, reg); + if (IS_ERR(pp->dbi_base)) { + dev_err(dev, "cannot get rc_dbi base\n"); + return PTR_ERR(pp->dbi_base); } - hisi_pcie->pp.dbi_base = hisi_pcie->reg_base; - - ret = hisi_add_pcie_port(pp, pdev); + ret = hisi_add_pcie_port(hisi_pcie, pdev); if (ret) return ret; - platform_set_drvdata(pdev, hisi_pcie); - - dev_warn(pp->dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n"); + dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n"); return 0; } diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c index 0d7bee4a0d26..8ce089043a27 100644 --- a/drivers/pci/host/pcie-iproc-bcma.c +++ b/drivers/pci/host/pcie-iproc-bcma.c @@ -42,19 +42,24 @@ static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) static int iproc_pcie_bcma_probe(struct bcma_device *bdev) { + struct device *dev = &bdev->dev; struct iproc_pcie *pcie; LIST_HEAD(res); struct resource res_mem; int ret; - pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL); + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; - pcie->dev = &bdev->dev; - bcma_set_drvdata(bdev, pcie); + pcie->dev = dev; pcie->base = bdev->io_addr; + if (!pcie->base) { + dev_err(dev, "no controller registers\n"); + return -ENOMEM; + } + pcie->base_addr = bdev->addr; res_mem.start = bdev->addr_s[0]; @@ -67,10 +72,11 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev) ret = iproc_pcie_setup(pcie, &res); if (ret) - dev_err(pcie->dev, "PCIe controller setup failed\n"); + dev_err(dev, "PCIe controller setup failed\n"); pci_free_resource_list(&res); + bcma_set_drvdata(bdev, pcie); return ret; } diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index 1738c5288eb6..a3de087976b3 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -40,35 +40,35 @@ MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table); static int iproc_pcie_pltfm_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; const struct of_device_id *of_id; struct iproc_pcie *pcie; - struct device_node *np = pdev->dev.of_node; + struct device_node *np = dev->of_node; struct resource reg; resource_size_t iobase = 0; LIST_HEAD(res); int ret; - of_id = of_match_device(iproc_pcie_of_match_table, &pdev->dev); + of_id = of_match_device(iproc_pcie_of_match_table, dev); if (!of_id) return -EINVAL; - pcie = devm_kzalloc(&pdev->dev, sizeof(struct iproc_pcie), GFP_KERNEL); + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; - pcie->dev = &pdev->dev; + pcie->dev = dev; pcie->type = (enum iproc_pcie_type)of_id->data; - platform_set_drvdata(pdev, pcie); ret = of_address_to_resource(np, 0, ®); if (ret < 0) { - dev_err(pcie->dev, "unable to obtain controller resources\n"); + dev_err(dev, "unable to obtain controller resources\n"); return ret; } - pcie->base = devm_ioremap(pcie->dev, reg.start, resource_size(®)); + pcie->base = devm_ioremap(dev, reg.start, resource_size(®)); if (!pcie->base) { - dev_err(pcie->dev, "unable to map controller registers\n"); + dev_err(dev, "unable to map controller registers\n"); return -ENOMEM; } pcie->base_addr = reg.start; @@ -79,7 +79,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset", &val); if (ret) { - dev_err(pcie->dev, + dev_err(dev, "missing brcm,pcie-ob-axi-offset property\n"); return ret; } @@ -88,7 +88,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) ret = of_property_read_u32(np, "brcm,pcie-ob-window-size", &val); if (ret) { - dev_err(pcie->dev, + dev_err(dev, "missing brcm,pcie-ob-window-size property\n"); return ret; } @@ -101,7 +101,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) } /* PHY use is optional */ - pcie->phy = devm_phy_get(&pdev->dev, "pcie-phy"); + pcie->phy = devm_phy_get(dev, "pcie-phy"); if (IS_ERR(pcie->phy)) { if (PTR_ERR(pcie->phy) == -EPROBE_DEFER) return -EPROBE_DEFER; @@ -110,7 +110,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase); if (ret) { - dev_err(pcie->dev, + dev_err(dev, "unable to get PCI host bridge resources\n"); return ret; } @@ -119,10 +119,11 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) ret = iproc_pcie_setup(pcie, &res); if (ret) - dev_err(pcie->dev, "PCIe controller setup failed\n"); + dev_err(dev, "PCIe controller setup failed\n"); pci_free_resource_list(&res); + platform_set_drvdata(pdev, pcie); return ret; } diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index e167b2f0098d..0b999a9fb843 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -63,6 +63,8 @@ #define OARR_SIZE_CFG_SHIFT 1 #define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT) +#define PCI_EXP_CAP 0xac + #define MAX_NUM_OB_WINDOWS 2 #define IPROC_PCIE_REG_INVALID 0xffff @@ -258,9 +260,10 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie) static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) { + struct device *dev = pcie->dev; u8 hdr_type; u32 link_ctrl, class, val; - u16 pos, link_status; + u16 pos = PCI_EXP_CAP, link_status; bool link_is_active = false; /* @@ -272,14 +275,14 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS); if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) { - dev_err(pcie->dev, "PHY or data link is INACTIVE!\n"); + dev_err(dev, "PHY or data link is INACTIVE!\n"); return -ENODEV; } /* make sure we are not in EP mode */ pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type); if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) { - dev_err(pcie->dev, "in EP mode, hdr=%#02x\n", hdr_type); + dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type); return -EFAULT; } @@ -293,30 +296,27 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class); /* check link status to see if link is active */ - pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP); pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status); if (link_status & PCI_EXP_LNKSTA_NLW) link_is_active = true; if (!link_is_active) { /* try GEN 1 link speed */ -#define PCI_LINK_STATUS_CTRL_2_OFFSET 0x0dc #define PCI_TARGET_LINK_SPEED_MASK 0xf #define PCI_TARGET_LINK_SPEED_GEN2 0x2 #define PCI_TARGET_LINK_SPEED_GEN1 0x1 pci_bus_read_config_dword(bus, 0, - PCI_LINK_STATUS_CTRL_2_OFFSET, + pos + PCI_EXP_LNKCTL2, &link_ctrl); if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) == PCI_TARGET_LINK_SPEED_GEN2) { link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK; link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1; pci_bus_write_config_dword(bus, 0, - PCI_LINK_STATUS_CTRL_2_OFFSET, + pos + PCI_EXP_LNKCTL2, link_ctrl); msleep(100); - pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP); pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status); if (link_status & PCI_EXP_LNKSTA_NLW) @@ -324,7 +324,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) } } - dev_info(pcie->dev, "link: %s\n", link_is_active ? "UP" : "DOWN"); + dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN"); return link_is_active ? 0 : -ENODEV; } @@ -349,12 +349,13 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, u64 pci_addr, resource_size_t size) { struct iproc_pcie_ob *ob = &pcie->ob; + struct device *dev = pcie->dev; unsigned i; u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS; u64 remainder; if (size > max_size) { - dev_err(pcie->dev, + dev_err(dev, "res size %pap exceeds max supported size 0x%llx\n", &size, max_size); return -EINVAL; @@ -362,15 +363,14 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, div64_u64_rem(size, ob->window_size, &remainder); if (remainder) { - dev_err(pcie->dev, + dev_err(dev, "res size %pap needs to be multiple of window size %pap\n", &size, &ob->window_size); return -EINVAL; } if (axi_addr < ob->axi_offset) { - dev_err(pcie->dev, - "axi address %pap less than offset %pap\n", + dev_err(dev, "axi address %pap less than offset %pap\n", &axi_addr, &ob->axi_offset); return -EINVAL; } @@ -406,6 +406,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, struct list_head *resources) { + struct device *dev = pcie->dev; struct resource_entry *window; int ret; @@ -425,7 +426,7 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, return ret; break; default: - dev_err(pcie->dev, "invalid resource %pR\n", res); + dev_err(dev, "invalid resource %pR\n", res); return -EINVAL; } } @@ -455,26 +456,25 @@ static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) { + struct device *dev; int ret; void *sysdata; struct pci_bus *bus; - if (!pcie || !pcie->dev || !pcie->base) - return -EINVAL; - - ret = devm_request_pci_bus_resources(pcie->dev, res); + dev = pcie->dev; + ret = devm_request_pci_bus_resources(dev, res); if (ret) return ret; ret = phy_init(pcie->phy); if (ret) { - dev_err(pcie->dev, "unable to initialize PCIe PHY\n"); + dev_err(dev, "unable to initialize PCIe PHY\n"); return ret; } ret = phy_power_on(pcie->phy); if (ret) { - dev_err(pcie->dev, "unable to power on PCIe PHY\n"); + dev_err(dev, "unable to power on PCIe PHY\n"); goto err_exit_phy; } @@ -486,7 +486,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) pcie->reg_offsets = iproc_pcie_reg_paxc; break; default: - dev_err(pcie->dev, "incompatible iProc PCIe interface\n"); + dev_err(dev, "incompatible iProc PCIe interface\n"); ret = -EINVAL; goto err_power_off_phy; } @@ -496,7 +496,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) if (pcie->need_ob_cfg) { ret = iproc_pcie_map_ranges(pcie, res); if (ret) { - dev_err(pcie->dev, "map failed\n"); + dev_err(dev, "map failed\n"); goto err_power_off_phy; } } @@ -508,9 +508,9 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) sysdata = pcie; #endif - bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, sysdata, res); + bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res); if (!bus) { - dev_err(pcie->dev, "unable to create PCI root bus\n"); + dev_err(dev, "unable to create PCI root bus\n"); ret = -ENOMEM; goto err_power_off_phy; } @@ -518,7 +518,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) ret = iproc_pcie_check_link(pcie, bus); if (ret) { - dev_err(pcie->dev, "no PCIe EP device detected\n"); + dev_err(dev, "no PCIe EP device detected\n"); goto err_rm_root_bus; } @@ -526,7 +526,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) if (IS_ENABLED(CONFIG_PCI_MSI)) if (iproc_pcie_msi_enable(pcie)) - dev_info(pcie->dev, "not using iProc MSI\n"); + dev_info(dev, "not using iProc MSI\n"); pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c index 5ec2d440a6b7..ef0a84c7a588 100644 --- a/drivers/pci/host/pcie-qcom.c +++ b/drivers/pci/host/pcie-qcom.c @@ -86,12 +86,10 @@ struct qcom_pcie_ops { }; struct qcom_pcie { - struct pcie_port pp; - struct device *dev; + struct pcie_port pp; /* pp.dbi_base is DT dbi */ + void __iomem *parf; /* DT parf */ + void __iomem *elbi; /* DT elbi */ union qcom_pcie_resources res; - void __iomem *parf; - void __iomem *dbi; - void __iomem *elbi; struct phy *phy; struct gpio_desc *reset; struct qcom_pcie_ops *ops; @@ -136,7 +134,7 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie) static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v0 *res = &pcie->res.v0; - struct device *dev = pcie->dev; + struct device *dev = pcie->pp.dev; res->vdda = devm_regulator_get(dev, "vdda"); if (IS_ERR(res->vdda)) @@ -188,7 +186,7 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v1 *res = &pcie->res.v1; - struct device *dev = pcie->dev; + struct device *dev = pcie->pp.dev; res->vdda = devm_regulator_get(dev, "vdda"); if (IS_ERR(res->vdda)) @@ -237,7 +235,7 @@ static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) static int qcom_pcie_init_v0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v0 *res = &pcie->res.v0; - struct device *dev = pcie->dev; + struct device *dev = pcie->pp.dev; u32 val; int ret; @@ -359,7 +357,7 @@ static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) static int qcom_pcie_init_v1(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v1 *res = &pcie->res.v1; - struct device *dev = pcie->dev; + struct device *dev = pcie->pp.dev; int ret; ret = reset_control_deassert(res->core); @@ -426,7 +424,7 @@ err_res: static int qcom_pcie_link_up(struct pcie_port *pp) { struct qcom_pcie *pcie = to_qcom_pcie(pp); - u16 val = readw(pcie->dbi + PCIE20_CAP + PCI_EXP_LNKSTA); + u16 val = readw(pcie->pp.dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); return !!(val & PCI_EXP_LNKSTA_DLLLA); } @@ -509,8 +507,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) if (!pcie) return -ENOMEM; + pp = &pcie->pp; pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev); - pcie->dev = dev; pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); if (IS_ERR(pcie->reset)) @@ -522,9 +520,9 @@ static int qcom_pcie_probe(struct platform_device *pdev) return PTR_ERR(pcie->parf); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pcie->dbi = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->dbi)) - return PTR_ERR(pcie->dbi); + pp->dbi_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pp->dbi_base)) + return PTR_ERR(pp->dbi_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); pcie->elbi = devm_ioremap_resource(dev, res); @@ -539,9 +537,7 @@ static int qcom_pcie_probe(struct platform_device *pdev) if (ret) return ret; - pp = &pcie->pp; pp->dev = dev; - pp->dbi_base = pcie->dbi; pp->root_bus_nr = -1; pp->ops = &qcom_pcie_dw_ops; @@ -569,8 +565,6 @@ static int qcom_pcie_probe(struct platform_device *pdev) return ret; } - platform_set_drvdata(pdev, pcie); - return 0; } diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index e06b1d3b4dea..62700d1896f4 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c @@ -31,8 +31,6 @@ #include <linux/pm_runtime.h> #include <linux/slab.h> -#define DRV_NAME "rcar-pcie" - #define PCIECAR 0x000010 #define PCIECCTLR 0x000018 #define CONFIG_SEND_ENABLE (1 << 31) @@ -397,6 +395,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) { + struct device *dev = pcie->dev; unsigned int timeout = 1000; u32 macsr; @@ -404,7 +403,7 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) return; if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) { - dev_err(pcie->dev, "Speed change already in progress\n"); + dev_err(dev, "Speed change already in progress\n"); return; } @@ -433,7 +432,7 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) rcar_pci_write_reg(pcie, macsr, MACSR); if (macsr & SPCHGFAIL) - dev_err(pcie->dev, "Speed change failed\n"); + dev_err(dev, "Speed change failed\n"); goto done; } @@ -441,15 +440,16 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) msleep(1); }; - dev_err(pcie->dev, "Speed change timed out\n"); + dev_err(dev, "Speed change timed out\n"); done: - dev_info(pcie->dev, "Current link speed is %s GT/s\n", + dev_info(dev, "Current link speed is %s GT/s\n", (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5"); } static int rcar_pcie_enable(struct rcar_pcie *pcie) { + struct device *dev = pcie->dev; struct pci_bus *bus, *child; LIST_HEAD(res); @@ -461,14 +461,14 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie) pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); if (IS_ENABLED(CONFIG_PCI_MSI)) - bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr, + bus = pci_scan_root_bus_msi(dev, pcie->root_bus_nr, &rcar_pcie_ops, pcie, &res, &pcie->msi.chip); else - bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr, + bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &rcar_pcie_ops, pcie, &res); if (!bus) { - dev_err(pcie->dev, "Scanning rootbus failed"); + dev_err(dev, "Scanning rootbus failed"); return -ENODEV; } @@ -487,6 +487,7 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie) static int phy_wait_for_ack(struct rcar_pcie *pcie) { + struct device *dev = pcie->dev; unsigned int timeout = 100; while (timeout--) { @@ -496,7 +497,7 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie) udelay(100); } - dev_err(pcie->dev, "Access to PCIe phy timed out\n"); + dev_err(dev, "Access to PCIe phy timed out\n"); return -ETIMEDOUT; } @@ -697,6 +698,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) { struct rcar_pcie *pcie = data; struct rcar_msi *msi = &pcie->msi; + struct device *dev = pcie->dev; unsigned long reg; reg = rcar_pci_read_reg(pcie, PCIEMSIFR); @@ -717,10 +719,10 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) if (test_bit(index, msi->used)) generic_handle_irq(irq); else - dev_info(pcie->dev, "unhandled MSI\n"); + dev_info(dev, "unhandled MSI\n"); } else { /* Unknown MSI, just clear it */ - dev_dbg(pcie->dev, "unexpected MSI\n"); + dev_dbg(dev, "unexpected MSI\n"); } /* see if there's any more pending in this vector */ @@ -843,22 +845,22 @@ static const struct irq_domain_ops msi_domain_ops = { static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) { - struct platform_device *pdev = to_platform_device(pcie->dev); + struct device *dev = pcie->dev; struct rcar_msi *msi = &pcie->msi; unsigned long base; int err, i; mutex_init(&msi->lock); - msi->chip.dev = pcie->dev; + msi->chip.dev = dev; msi->chip.setup_irq = rcar_msi_setup_irq; msi->chip.setup_irqs = rcar_msi_setup_irqs; msi->chip.teardown_irq = rcar_msi_teardown_irq; - msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR, + msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR, &msi_domain_ops, &msi->chip); if (!msi->domain) { - dev_err(&pdev->dev, "failed to create IRQ domain\n"); + dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; } @@ -866,19 +868,19 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) irq_create_mapping(msi->domain, i); /* Two irqs are for MSI, but they are also used for non-MSI irqs */ - err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq, + err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq, IRQF_SHARED | IRQF_NO_THREAD, rcar_msi_irq_chip.name, pcie); if (err < 0) { - dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); + dev_err(dev, "failed to request IRQ: %d\n", err); goto err; } - err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq, + err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq, IRQF_SHARED | IRQF_NO_THREAD, rcar_msi_irq_chip.name, pcie); if (err < 0) { - dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); + dev_err(dev, "failed to request IRQ: %d\n", err); goto err; } @@ -899,32 +901,32 @@ err: return err; } -static int rcar_pcie_get_resources(struct platform_device *pdev, - struct rcar_pcie *pcie) +static int rcar_pcie_get_resources(struct rcar_pcie *pcie) { + struct device *dev = pcie->dev; struct resource res; int err, i; - err = of_address_to_resource(pdev->dev.of_node, 0, &res); + err = of_address_to_resource(dev->of_node, 0, &res); if (err) return err; - pcie->base = devm_ioremap_resource(&pdev->dev, &res); + pcie->base = devm_ioremap_resource(dev, &res); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); - pcie->clk = devm_clk_get(&pdev->dev, "pcie"); + pcie->clk = devm_clk_get(dev, "pcie"); if (IS_ERR(pcie->clk)) { - dev_err(pcie->dev, "cannot get platform clock\n"); + dev_err(dev, "cannot get platform clock\n"); return PTR_ERR(pcie->clk); } err = clk_prepare_enable(pcie->clk); if (err) return err; - pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus"); + pcie->bus_clk = devm_clk_get(dev, "pcie_bus"); if (IS_ERR(pcie->bus_clk)) { - dev_err(pcie->dev, "cannot get pcie bus clock\n"); + dev_err(dev, "cannot get pcie bus clock\n"); err = PTR_ERR(pcie->bus_clk); goto fail_clk; } @@ -932,17 +934,17 @@ static int rcar_pcie_get_resources(struct platform_device *pdev, if (err) goto fail_clk; - i = irq_of_parse_and_map(pdev->dev.of_node, 0); + i = irq_of_parse_and_map(dev->of_node, 0); if (!i) { - dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); + dev_err(dev, "cannot get platform resources for msi interrupt\n"); err = -ENOENT; goto err_map_reg; } pcie->msi.irq1 = i; - i = irq_of_parse_and_map(pdev->dev.of_node, 1); + i = irq_of_parse_and_map(dev->of_node, 1); if (!i) { - dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); + dev_err(dev, "cannot get platform resources for msi interrupt\n"); err = -ENOENT; goto err_map_reg; } @@ -1119,60 +1121,60 @@ out_release_res: static int rcar_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct rcar_pcie *pcie; unsigned int data; const struct of_device_id *of_id; int err; int (*hw_init_fn)(struct rcar_pcie *); - pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; - pcie->dev = &pdev->dev; - platform_set_drvdata(pdev, pcie); + pcie->dev = dev; INIT_LIST_HEAD(&pcie->resources); rcar_pcie_parse_request_of_pci_ranges(pcie); - err = rcar_pcie_get_resources(pdev, pcie); + err = rcar_pcie_get_resources(pcie); if (err < 0) { - dev_err(&pdev->dev, "failed to request resources: %d\n", err); + dev_err(dev, "failed to request resources: %d\n", err); return err; } - err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node); + err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); if (err) return err; - of_id = of_match_device(rcar_pcie_of_match, pcie->dev); + of_id = of_match_device(rcar_pcie_of_match, dev); if (!of_id || !of_id->data) return -EINVAL; hw_init_fn = of_id->data; - pm_runtime_enable(pcie->dev); - err = pm_runtime_get_sync(pcie->dev); + pm_runtime_enable(dev); + err = pm_runtime_get_sync(dev); if (err < 0) { - dev_err(pcie->dev, "pm_runtime_get_sync failed\n"); + dev_err(dev, "pm_runtime_get_sync failed\n"); goto err_pm_disable; } /* Failure to get a link might just be that no cards are inserted */ err = hw_init_fn(pcie); if (err) { - dev_info(&pdev->dev, "PCIe link down\n"); + dev_info(dev, "PCIe link down\n"); err = 0; goto err_pm_put; } data = rcar_pci_read_reg(pcie, MACSR); - dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); + dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); if (IS_ENABLED(CONFIG_PCI_MSI)) { err = rcar_pcie_enable_msi(pcie); if (err < 0) { - dev_err(&pdev->dev, + dev_err(dev, "failed to enable MSI support: %d\n", err); goto err_pm_put; @@ -1186,16 +1188,16 @@ static int rcar_pcie_probe(struct platform_device *pdev) return 0; err_pm_put: - pm_runtime_put(pcie->dev); + pm_runtime_put(dev); err_pm_disable: - pm_runtime_disable(pcie->dev); + pm_runtime_disable(dev); return err; } static struct platform_driver rcar_pcie_driver = { .driver = { - .name = DRV_NAME, + .name = "rcar-pcie", .of_match_table = rcar_pcie_of_match, .suppress_bind_attrs = true, }, diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index b8c82fc812dc..e0b22dab9b7a 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c @@ -972,7 +972,7 @@ static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip, return -EINVAL; if (region_no == 0) { if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits)) - return -EINVAL; + return -EINVAL; } if (region_no != 0) { if (AXI_REGION_SIZE < (2ULL << num_pass_bits)) @@ -1091,8 +1091,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev) if (err) goto err_vpcie; - platform_set_drvdata(pdev, rockchip); - rockchip_pcie_enable_interrupts(rockchip); err = rockchip_pcie_init_irq_domain(rockchip); diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c index 09aed85f275a..3cf197ba7f37 100644 --- a/drivers/pci/host/pcie-spear13xx.c +++ b/drivers/pci/host/pcie-spear13xx.c @@ -25,10 +25,10 @@ #include "pcie-designware.h" struct spear13xx_pcie { + struct pcie_port pp; /* DT dbi is pp.dbi_base */ void __iomem *app_base; struct phy *phy; struct clk *clk; - struct pcie_port pp; bool is_gen1; }; @@ -57,96 +57,26 @@ struct pcie_app_reg { }; /* CR0 ID */ -#define RX_LANE_FLIP_EN_ID 0 -#define TX_LANE_FLIP_EN_ID 1 -#define SYS_AUX_PWR_DET_ID 2 #define APP_LTSSM_ENABLE_ID 3 -#define SYS_ATTEN_BUTTON_PRESSED_ID 4 -#define SYS_MRL_SENSOR_STATE_ID 5 -#define SYS_PWR_FAULT_DET_ID 6 -#define SYS_MRL_SENSOR_CHGED_ID 7 -#define SYS_PRE_DET_CHGED_ID 8 -#define SYS_CMD_CPLED_INT_ID 9 -#define APP_INIT_RST_0_ID 11 -#define APP_REQ_ENTR_L1_ID 12 -#define APP_READY_ENTR_L23_ID 13 -#define APP_REQ_EXIT_L1_ID 14 -#define DEVICE_TYPE_EP (0 << 25) -#define DEVICE_TYPE_LEP (1 << 25) #define DEVICE_TYPE_RC (4 << 25) -#define SYS_INT_ID 29 #define MISCTRL_EN_ID 30 #define REG_TRANSLATION_ENABLE 31 -/* CR1 ID */ -#define APPS_PM_XMT_TURNOFF_ID 2 -#define APPS_PM_XMT_PME_ID 5 - /* CR3 ID */ -#define XMLH_LTSSM_STATE_DETECT_QUIET 0x00 -#define XMLH_LTSSM_STATE_DETECT_ACT 0x01 -#define XMLH_LTSSM_STATE_POLL_ACTIVE 0x02 -#define XMLH_LTSSM_STATE_POLL_COMPLIANCE 0x03 -#define XMLH_LTSSM_STATE_POLL_CONFIG 0x04 -#define XMLH_LTSSM_STATE_PRE_DETECT_QUIET 0x05 -#define XMLH_LTSSM_STATE_DETECT_WAIT 0x06 -#define XMLH_LTSSM_STATE_CFG_LINKWD_START 0x07 -#define XMLH_LTSSM_STATE_CFG_LINKWD_ACEPT 0x08 -#define XMLH_LTSSM_STATE_CFG_LANENUM_WAIT 0x09 -#define XMLH_LTSSM_STATE_CFG_LANENUM_ACEPT 0x0A -#define XMLH_LTSSM_STATE_CFG_COMPLETE 0x0B -#define XMLH_LTSSM_STATE_CFG_IDLE 0x0C -#define XMLH_LTSSM_STATE_RCVRY_LOCK 0x0D -#define XMLH_LTSSM_STATE_RCVRY_SPEED 0x0E -#define XMLH_LTSSM_STATE_RCVRY_RCVRCFG 0x0F -#define XMLH_LTSSM_STATE_RCVRY_IDLE 0x10 -#define XMLH_LTSSM_STATE_L0 0x11 -#define XMLH_LTSSM_STATE_L0S 0x12 -#define XMLH_LTSSM_STATE_L123_SEND_EIDLE 0x13 -#define XMLH_LTSSM_STATE_L1_IDLE 0x14 -#define XMLH_LTSSM_STATE_L2_IDLE 0x15 -#define XMLH_LTSSM_STATE_L2_WAKE 0x16 -#define XMLH_LTSSM_STATE_DISABLED_ENTRY 0x17 -#define XMLH_LTSSM_STATE_DISABLED_IDLE 0x18 -#define XMLH_LTSSM_STATE_DISABLED 0x19 -#define XMLH_LTSSM_STATE_LPBK_ENTRY 0x1A -#define XMLH_LTSSM_STATE_LPBK_ACTIVE 0x1B -#define XMLH_LTSSM_STATE_LPBK_EXIT 0x1C -#define XMLH_LTSSM_STATE_LPBK_EXIT_TIMEOUT 0x1D -#define XMLH_LTSSM_STATE_HOT_RESET_ENTRY 0x1E -#define XMLH_LTSSM_STATE_HOT_RESET 0x1F -#define XMLH_LTSSM_STATE_MASK 0x3F #define XMLH_LINK_UP (1 << 6) -/* CR4 ID */ -#define CFG_MSI_EN_ID 18 - /* CR6 */ -#define INTA_CTRL_INT (1 << 7) -#define INTB_CTRL_INT (1 << 8) -#define INTC_CTRL_INT (1 << 9) -#define INTD_CTRL_INT (1 << 10) #define MSI_CTRL_INT (1 << 26) -/* CR19 ID */ -#define VEN_MSI_REQ_ID 11 -#define VEN_MSI_FUN_NUM_ID 8 -#define VEN_MSI_TC_ID 5 -#define VEN_MSI_VECTOR_ID 0 -#define VEN_MSI_REQ_EN ((u32)0x1 << VEN_MSI_REQ_ID) -#define VEN_MSI_FUN_NUM_MASK ((u32)0x7 << VEN_MSI_FUN_NUM_ID) -#define VEN_MSI_TC_MASK ((u32)0x7 << VEN_MSI_TC_ID) -#define VEN_MSI_VECTOR_MASK ((u32)0x1F << VEN_MSI_VECTOR_ID) - #define EXP_CAP_ID_OFFSET 0x70 #define to_spear13xx_pcie(x) container_of(x, struct spear13xx_pcie, pp) -static int spear13xx_pcie_establish_link(struct pcie_port *pp) +static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie) { - u32 val; - struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); + struct pcie_port *pp = &spear13xx_pcie->pp; struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + u32 val; u32 exp_cap_off = EXP_CAP_ID_OFFSET; if (dw_pcie_link_up(pp)) { @@ -203,9 +133,9 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp) static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) { - struct pcie_port *pp = arg; - struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); + struct spear13xx_pcie *spear13xx_pcie = arg; struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + struct pcie_port *pp = &spear13xx_pcie->pp; unsigned int status; status = readl(&app_reg->int_sts); @@ -220,9 +150,9 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static void spear13xx_pcie_enable_interrupts(struct pcie_port *pp) +static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie) { - struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); + struct pcie_port *pp = &spear13xx_pcie->pp; struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; /* Enable MSI interrupt */ @@ -246,8 +176,10 @@ static int spear13xx_pcie_link_up(struct pcie_port *pp) static void spear13xx_pcie_host_init(struct pcie_port *pp) { - spear13xx_pcie_establish_link(pp); - spear13xx_pcie_enable_interrupts(pp); + struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); + + spear13xx_pcie_establish_link(spear13xx_pcie); + spear13xx_pcie_enable_interrupts(spear13xx_pcie); } static struct pcie_host_ops spear13xx_pcie_host_ops = { @@ -255,10 +187,11 @@ static struct pcie_host_ops spear13xx_pcie_host_ops = { .host_init = spear13xx_pcie_host_init, }; -static int spear13xx_add_pcie_port(struct pcie_port *pp, - struct platform_device *pdev) +static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, + struct platform_device *pdev) { - struct device *dev = &pdev->dev; + struct pcie_port *pp = &spear13xx_pcie->pp; + struct device *dev = pp->dev; int ret; pp->irq = platform_get_irq(pdev, 0); @@ -268,7 +201,7 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp, } ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, IRQF_SHARED | IRQF_NO_THREAD, - "spear1340-pcie", pp); + "spear1340-pcie", spear13xx_pcie); if (ret) { dev_err(dev, "failed to request irq %d\n", pp->irq); return ret; @@ -288,10 +221,10 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp, static int spear13xx_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct spear13xx_pcie *spear13xx_pcie; struct pcie_port *pp; - struct device *dev = &pdev->dev; - struct device_node *np = pdev->dev.of_node; + struct device_node *np = dev->of_node; struct resource *dbi_base; int ret; @@ -323,7 +256,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev) } pp = &spear13xx_pcie->pp; - pp->dev = dev; dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); @@ -338,7 +270,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev) if (of_property_read_bool(np, "st,pcie-is-gen1")) spear13xx_pcie->is_gen1 = true; - ret = spear13xx_add_pcie_port(pp, pdev); + ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev); if (ret < 0) goto fail_clk; diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c index 67eae4179290..43eaa4afab94 100644 --- a/drivers/pci/host/pcie-xilinx-nwl.c +++ b/drivers/pci/host/pcie-xilinx-nwl.c @@ -212,6 +212,7 @@ static bool nwl_phy_link_up(struct nwl_pcie *pcie) static int nwl_wait_for_link(struct nwl_pcie *pcie) { + struct device *dev = pcie->dev; int retries; /* check if the link is up or not */ @@ -221,7 +222,7 @@ static int nwl_wait_for_link(struct nwl_pcie *pcie) usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); } - dev_err(pcie->dev, "PHY link never came up\n"); + dev_err(dev, "PHY link never came up\n"); return -ETIMEDOUT; } @@ -277,6 +278,7 @@ static struct pci_ops nwl_pcie_ops = { static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) { struct nwl_pcie *pcie = data; + struct device *dev = pcie->dev; u32 misc_stat; /* Checking for misc interrupts */ @@ -286,45 +288,43 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) return IRQ_NONE; if (misc_stat & MSGF_MISC_SR_RXMSG_OVER) - dev_err(pcie->dev, "Received Message FIFO Overflow\n"); + dev_err(dev, "Received Message FIFO Overflow\n"); if (misc_stat & MSGF_MISC_SR_SLAVE_ERR) - dev_err(pcie->dev, "Slave error\n"); + dev_err(dev, "Slave error\n"); if (misc_stat & MSGF_MISC_SR_MASTER_ERR) - dev_err(pcie->dev, "Master error\n"); + dev_err(dev, "Master error\n"); if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR) - dev_err(pcie->dev, - "In Misc Ingress address translation error\n"); + dev_err(dev, "In Misc Ingress address translation error\n"); if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR) - dev_err(pcie->dev, - "In Misc Egress address translation error\n"); + dev_err(dev, "In Misc Egress address translation error\n"); if (misc_stat & MSGF_MISC_SR_FATAL_AER) - dev_err(pcie->dev, "Fatal Error in AER Capability\n"); + dev_err(dev, "Fatal Error in AER Capability\n"); if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER) - dev_err(pcie->dev, "Non-Fatal Error in AER Capability\n"); + dev_err(dev, "Non-Fatal Error in AER Capability\n"); if (misc_stat & MSGF_MISC_SR_CORR_AER) - dev_err(pcie->dev, "Correctable Error in AER Capability\n"); + dev_err(dev, "Correctable Error in AER Capability\n"); if (misc_stat & MSGF_MISC_SR_UR_DETECT) - dev_err(pcie->dev, "Unsupported request Detected\n"); + dev_err(dev, "Unsupported request Detected\n"); if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV) - dev_err(pcie->dev, "Non-Fatal Error Detected\n"); + dev_err(dev, "Non-Fatal Error Detected\n"); if (misc_stat & MSGF_MISC_SR_FATAL_DEV) - dev_err(pcie->dev, "Fatal Error Detected\n"); + dev_err(dev, "Fatal Error Detected\n"); if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH) - dev_info(pcie->dev, "Link Autonomous Bandwidth Management Status bit set\n"); + dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n"); if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH) - dev_info(pcie->dev, "Link Bandwidth Management Status bit set\n"); + dev_info(dev, "Link Bandwidth Management Status bit set\n"); /* Clear misc interrupt status */ nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS); @@ -494,20 +494,21 @@ static const struct irq_domain_ops dev_msi_domain_ops = { static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie) { #ifdef CONFIG_PCI_MSI - struct fwnode_handle *fwnode = of_node_to_fwnode(pcie->dev->of_node); + struct device *dev = pcie->dev; + struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); struct nwl_msi *msi = &pcie->msi; msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR, &dev_msi_domain_ops, pcie); if (!msi->dev_domain) { - dev_err(pcie->dev, "failed to create dev IRQ domain\n"); + dev_err(dev, "failed to create dev IRQ domain\n"); return -ENOMEM; } msi->msi_domain = pci_msi_create_irq_domain(fwnode, &nwl_msi_domain_info, msi->dev_domain); if (!msi->msi_domain) { - dev_err(pcie->dev, "failed to create msi IRQ domain\n"); + dev_err(dev, "failed to create msi IRQ domain\n"); irq_domain_remove(msi->dev_domain); return -ENOMEM; } @@ -517,12 +518,13 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie) static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) { - struct device_node *node = pcie->dev->of_node; + struct device *dev = pcie->dev; + struct device_node *node = dev->of_node; struct device_node *legacy_intc_node; legacy_intc_node = of_get_next_child(node, NULL); if (!legacy_intc_node) { - dev_err(pcie->dev, "No legacy intc node found\n"); + dev_err(dev, "No legacy intc node found\n"); return -EINVAL; } @@ -532,7 +534,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) pcie); if (!pcie->legacy_irq_domain) { - dev_err(pcie->dev, "failed to create IRQ domain\n"); + dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; } @@ -542,7 +544,8 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus) { - struct platform_device *pdev = to_platform_device(pcie->dev); + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); struct nwl_msi *msi = &pcie->msi; unsigned long base; int ret; @@ -557,7 +560,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus) /* Get msi_1 IRQ number */ msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1"); if (msi->irq_msi1 < 0) { - dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi1); + dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1); ret = -EINVAL; goto err; } @@ -568,7 +571,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus) /* Get msi_0 IRQ number */ msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0"); if (msi->irq_msi0 < 0) { - dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi0); + dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0); ret = -EINVAL; goto err; } @@ -579,7 +582,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus) /* Check for msii_present bit */ ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT; if (!ret) { - dev_err(pcie->dev, "MSI not present\n"); + dev_err(dev, "MSI not present\n"); ret = -EIO; goto err; } @@ -628,13 +631,14 @@ err: static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) { - struct platform_device *pdev = to_platform_device(pcie->dev); + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); u32 breg_val, ecam_val, first_busno = 0; int err; breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT; if (!breg_val) { - dev_err(pcie->dev, "BREG is not present\n"); + dev_err(dev, "BREG is not present\n"); return breg_val; } @@ -665,7 +669,7 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT; if (!ecam_val) { - dev_err(pcie->dev, "ECAM is not present\n"); + dev_err(dev, "ECAM is not present\n"); return ecam_val; } @@ -692,23 +696,23 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS)); if (nwl_pcie_link_up(pcie)) - dev_info(pcie->dev, "Link is UP\n"); + dev_info(dev, "Link is UP\n"); else - dev_info(pcie->dev, "Link is DOWN\n"); + dev_info(dev, "Link is DOWN\n"); /* Get misc IRQ number */ pcie->irq_misc = platform_get_irq_byname(pdev, "misc"); if (pcie->irq_misc < 0) { - dev_err(&pdev->dev, "failed to get misc IRQ %d\n", + dev_err(dev, "failed to get misc IRQ %d\n", pcie->irq_misc); return -EINVAL; } - err = devm_request_irq(pcie->dev, pcie->irq_misc, + err = devm_request_irq(dev, pcie->irq_misc, nwl_pcie_misc_handler, IRQF_SHARED, "nwl_pcie:misc", pcie); if (err) { - dev_err(pcie->dev, "fail to register misc IRQ#%d\n", + dev_err(dev, "fail to register misc IRQ#%d\n", pcie->irq_misc); return err; } @@ -744,31 +748,32 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, struct platform_device *pdev) { - struct device_node *node = pcie->dev->of_node; + struct device *dev = pcie->dev; + struct device_node *node = dev->of_node; struct resource *res; const char *type; /* Check for device type */ type = of_get_property(node, "device_type", NULL); if (!type || strcmp(type, "pci")) { - dev_err(pcie->dev, "invalid \"device_type\" %s\n", type); + dev_err(dev, "invalid \"device_type\" %s\n", type); return -EINVAL; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg"); - pcie->breg_base = devm_ioremap_resource(pcie->dev, res); + pcie->breg_base = devm_ioremap_resource(dev, res); if (IS_ERR(pcie->breg_base)) return PTR_ERR(pcie->breg_base); pcie->phys_breg_base = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg"); - pcie->pcireg_base = devm_ioremap_resource(pcie->dev, res); + pcie->pcireg_base = devm_ioremap_resource(dev, res); if (IS_ERR(pcie->pcireg_base)) return PTR_ERR(pcie->pcireg_base); pcie->phys_pcie_reg_base = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); - pcie->ecam_base = devm_ioremap_resource(pcie->dev, res); + pcie->ecam_base = devm_ioremap_resource(dev, res); if (IS_ERR(pcie->ecam_base)) return PTR_ERR(pcie->ecam_base); pcie->phys_ecam_base = res->start; @@ -776,8 +781,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, /* Get intx IRQ number */ pcie->irq_intx = platform_get_irq_byname(pdev, "intx"); if (pcie->irq_intx < 0) { - dev_err(&pdev->dev, "failed to get intx IRQ %d\n", - pcie->irq_intx); + dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx); return -EINVAL; } @@ -794,7 +798,8 @@ static const struct of_device_id nwl_pcie_of_match[] = { static int nwl_pcie_probe(struct platform_device *pdev) { - struct device_node *node = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; struct nwl_pcie *pcie; struct pci_bus *bus; struct pci_bus *child; @@ -802,42 +807,42 @@ static int nwl_pcie_probe(struct platform_device *pdev) resource_size_t iobase = 0; LIST_HEAD(res); - pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; - pcie->dev = &pdev->dev; + pcie->dev = dev; pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT; err = nwl_pcie_parse_dt(pcie, pdev); if (err) { - dev_err(pcie->dev, "Parsing DT failed\n"); + dev_err(dev, "Parsing DT failed\n"); return err; } err = nwl_pcie_bridge_init(pcie); if (err) { - dev_err(pcie->dev, "HW Initialization failed\n"); + dev_err(dev, "HW Initialization failed\n"); return err; } err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase); if (err) { - dev_err(pcie->dev, "Getting bridge resources failed\n"); + dev_err(dev, "Getting bridge resources failed\n"); return err; } - err = devm_request_pci_bus_resources(pcie->dev, &res); + err = devm_request_pci_bus_resources(dev, &res); if (err) goto error; err = nwl_pcie_init_irq_domain(pcie); if (err) { - dev_err(pcie->dev, "Failed creating IRQ Domain\n"); + dev_err(dev, "Failed creating IRQ Domain\n"); goto error; } - bus = pci_create_root_bus(&pdev->dev, pcie->root_busno, + bus = pci_create_root_bus(dev, pcie->root_busno, &nwl_pcie_ops, pcie, &res); if (!bus) { err = -ENOMEM; @@ -847,8 +852,7 @@ static int nwl_pcie_probe(struct platform_device *pdev) if (IS_ENABLED(CONFIG_PCI_MSI)) { err = nwl_pcie_enable_msi(pcie, bus); if (err < 0) { - dev_err(&pdev->dev, - "failed to enable MSI support: %d\n", err); + dev_err(dev, "failed to enable MSI support: %d\n", err); goto error; } } @@ -857,7 +861,6 @@ static int nwl_pcie_probe(struct platform_device *pdev) list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); pci_bus_add_devices(bus); - platform_set_drvdata(pdev, pcie); return 0; error: diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c index be568039d9d0..c8616fadccf1 100644 --- a/drivers/pci/host/pcie-xilinx.c +++ b/drivers/pci/host/pcie-xilinx.c @@ -140,10 +140,11 @@ static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port) */ static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) { + struct device *dev = port->dev; unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR); if (val & XILINX_PCIE_RPEFR_ERR_VALID) { - dev_dbg(port->dev, "Requester ID %lu\n", + dev_dbg(dev, "Requester ID %lu\n", val & XILINX_PCIE_RPEFR_REQ_ID); pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK, XILINX_PCIE_REG_RPEFR); @@ -228,11 +229,10 @@ static void xilinx_pcie_destroy_msi(unsigned int irq) /** * xilinx_pcie_assign_msi - Allocate MSI number - * @port: PCIe port structure * * Return: A valid IRQ on success and error value on failure. */ -static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port) +static int xilinx_pcie_assign_msi(void) { int pos; @@ -275,7 +275,7 @@ static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip, struct msi_msg msg; phys_addr_t msg_addr; - hwirq = xilinx_pcie_assign_msi(port); + hwirq = xilinx_pcie_assign_msi(); if (hwirq < 0) return hwirq; @@ -383,6 +383,7 @@ static const struct irq_domain_ops intx_domain_ops = { static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) { struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data; + struct device *dev = port->dev; u32 val, mask, status, msi_data; /* Read interrupt decode and mask registers */ @@ -394,32 +395,32 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) return IRQ_NONE; if (status & XILINX_PCIE_INTR_LINK_DOWN) - dev_warn(port->dev, "Link Down\n"); + dev_warn(dev, "Link Down\n"); if (status & XILINX_PCIE_INTR_ECRC_ERR) - dev_warn(port->dev, "ECRC failed\n"); + dev_warn(dev, "ECRC failed\n"); if (status & XILINX_PCIE_INTR_STR_ERR) - dev_warn(port->dev, "Streaming error\n"); + dev_warn(dev, "Streaming error\n"); if (status & XILINX_PCIE_INTR_HOT_RESET) - dev_info(port->dev, "Hot reset\n"); + dev_info(dev, "Hot reset\n"); if (status & XILINX_PCIE_INTR_CFG_TIMEOUT) - dev_warn(port->dev, "ECAM access timeout\n"); + dev_warn(dev, "ECAM access timeout\n"); if (status & XILINX_PCIE_INTR_CORRECTABLE) { - dev_warn(port->dev, "Correctable error message\n"); + dev_warn(dev, "Correctable error message\n"); xilinx_pcie_clear_err_interrupts(port); } if (status & XILINX_PCIE_INTR_NONFATAL) { - dev_warn(port->dev, "Non fatal error message\n"); + dev_warn(dev, "Non fatal error message\n"); xilinx_pcie_clear_err_interrupts(port); } if (status & XILINX_PCIE_INTR_FATAL) { - dev_warn(port->dev, "Fatal error message\n"); + dev_warn(dev, "Fatal error message\n"); xilinx_pcie_clear_err_interrupts(port); } @@ -429,7 +430,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) /* Check whether interrupt valid */ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { - dev_warn(port->dev, "RP Intr FIFO1 read error\n"); + dev_warn(dev, "RP Intr FIFO1 read error\n"); goto error; } @@ -451,7 +452,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { - dev_warn(port->dev, "RP Intr FIFO1 read error\n"); + dev_warn(dev, "RP Intr FIFO1 read error\n"); goto error; } @@ -471,31 +472,31 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) } if (status & XILINX_PCIE_INTR_SLV_UNSUPP) - dev_warn(port->dev, "Slave unsupported request\n"); + dev_warn(dev, "Slave unsupported request\n"); if (status & XILINX_PCIE_INTR_SLV_UNEXP) - dev_warn(port->dev, "Slave unexpected completion\n"); + dev_warn(dev, "Slave unexpected completion\n"); if (status & XILINX_PCIE_INTR_SLV_COMPL) - dev_warn(port->dev, "Slave completion timeout\n"); + dev_warn(dev, "Slave completion timeout\n"); if (status & XILINX_PCIE_INTR_SLV_ERRP) - dev_warn(port->dev, "Slave Error Poison\n"); + dev_warn(dev, "Slave Error Poison\n"); if (status & XILINX_PCIE_INTR_SLV_CMPABT) - dev_warn(port->dev, "Slave Completer Abort\n"); + dev_warn(dev, "Slave Completer Abort\n"); if (status & XILINX_PCIE_INTR_SLV_ILLBUR) - dev_warn(port->dev, "Slave Illegal Burst\n"); + dev_warn(dev, "Slave Illegal Burst\n"); if (status & XILINX_PCIE_INTR_MST_DECERR) - dev_warn(port->dev, "Master decode error\n"); + dev_warn(dev, "Master decode error\n"); if (status & XILINX_PCIE_INTR_MST_SLVERR) - dev_warn(port->dev, "Master slave error\n"); + dev_warn(dev, "Master slave error\n"); if (status & XILINX_PCIE_INTR_MST_ERRP) - dev_warn(port->dev, "Master error poison\n"); + dev_warn(dev, "Master error poison\n"); error: /* Clear the Interrupt Decode register */ @@ -554,10 +555,12 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) */ static void xilinx_pcie_init_port(struct xilinx_pcie_port *port) { + struct device *dev = port->dev; + if (xilinx_pcie_link_is_up(port)) - dev_info(port->dev, "PCIe Link is UP\n"); + dev_info(dev, "PCIe Link is UP\n"); else - dev_info(port->dev, "PCIe Link is DOWN\n"); + dev_info(dev, "PCIe Link is DOWN\n"); /* Disable all interrupts */ pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK, @@ -627,8 +630,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) */ static int xilinx_pcie_probe(struct platform_device *pdev) { - struct xilinx_pcie_port *port; struct device *dev = &pdev->dev; + struct xilinx_pcie_port *port; struct pci_bus *bus; int err; resource_size_t iobase = 0; @@ -668,15 +671,14 @@ static int xilinx_pcie_probe(struct platform_device *pdev) if (err) goto error; - bus = pci_create_root_bus(&pdev->dev, 0, - &xilinx_pcie_ops, port, &res); + bus = pci_create_root_bus(dev, 0, &xilinx_pcie_ops, port, &res); if (!bus) { err = -ENOMEM; goto error; } #ifdef CONFIG_PCI_MSI - xilinx_pcie_msi_chip.dev = port->dev; + xilinx_pcie_msi_chip.dev = dev; bus->msi = &xilinx_pcie_msi_chip; #endif pci_scan_child_bus(bus); @@ -685,8 +687,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev) pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); #endif pci_bus_add_devices(bus); - platform_set_drvdata(pdev, port); - return 0; error: diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index 460fa6708bfc..2acdb0d6ea89 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -405,7 +405,7 @@ static inline void acerhdf_enable_kernelmode(void) kernelmode = 1; thz_dev->polling_delay = interval*1000; - thermal_zone_device_update(thz_dev); + thermal_zone_device_update(thz_dev, THERMAL_EVENT_UNSPECIFIED); pr_notice("kernel mode fan control ON\n"); } diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 15f131146501..28551f5a2e07 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -932,30 +932,19 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(infos); -static int parse_arg(const char *buf, unsigned long count, int *val) -{ - if (!count) - return 0; - if (count > 31) - return -EINVAL; - if (sscanf(buf, "%i", val) != 1) - return -EINVAL; - return count; -} - static ssize_t sysfs_acpi_set(struct asus_laptop *asus, const char *buf, size_t count, const char *method) { int rv, value; - rv = parse_arg(buf, count, &value); - if (rv <= 0) + rv = kstrtoint(buf, 0, &value); + if (rv < 0) return rv; if (write_acpi_int(asus->handle, method, value)) return -ENODEV; - return rv; + return count; } /* @@ -975,15 +964,17 @@ static ssize_t ledd_store(struct device *dev, struct device_attribute *attr, struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; - rv = parse_arg(buf, count, &value); - if (rv > 0) { - if (write_acpi_int(asus->handle, METHOD_LEDD, value)) { - pr_warn("LED display write failed\n"); - return -ENODEV; - } - asus->ledd_status = (u32) value; + rv = kstrtoint(buf, 0, &value); + if (rv < 0) + return rv; + + if (write_acpi_int(asus->handle, METHOD_LEDD, value)) { + pr_warn("LED display write failed\n"); + return -ENODEV; } - return rv; + + asus->ledd_status = (u32) value; + return count; } static DEVICE_ATTR_RW(ledd); @@ -1148,10 +1139,12 @@ static ssize_t display_store(struct device *dev, struct device_attribute *attr, struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; - rv = parse_arg(buf, count, &value); - if (rv > 0) - asus_set_display(asus, value); - return rv; + rv = kstrtoint(buf, 0, &value); + if (rv < 0) + return rv; + + asus_set_display(asus, value); + return count; } static DEVICE_ATTR_WO(display); @@ -1190,11 +1183,12 @@ static ssize_t ls_switch_store(struct device *dev, struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; - rv = parse_arg(buf, count, &value); - if (rv > 0) - asus_als_switch(asus, value ? 1 : 0); + rv = kstrtoint(buf, 0, &value); + if (rv < 0) + return rv; - return rv; + asus_als_switch(asus, value ? 1 : 0); + return count; } static DEVICE_ATTR_RW(ls_switch); @@ -1219,14 +1213,15 @@ static ssize_t ls_level_store(struct device *dev, struct device_attribute *attr, struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; - rv = parse_arg(buf, count, &value); - if (rv > 0) { - value = (0 < value) ? ((15 < value) ? 15 : value) : 0; - /* 0 <= value <= 15 */ - asus_als_level(asus, value); - } + rv = kstrtoint(buf, 0, &value); + if (rv < 0) + return rv; + + value = (0 < value) ? ((15 < value) ? 15 : value) : 0; + /* 0 <= value <= 15 */ + asus_als_level(asus, value); - return rv; + return count; } static DEVICE_ATTR_RW(ls_level); @@ -1301,14 +1296,14 @@ static ssize_t gps_store(struct device *dev, struct device_attribute *attr, int rv, value; int ret; - rv = parse_arg(buf, count, &value); - if (rv <= 0) - return -EINVAL; + rv = kstrtoint(buf, 0, &value); + if (rv < 0) + return rv; ret = asus_gps_switch(asus, !!value); if (ret) return ret; rfkill_set_sw_state(asus->gps.rfkill, !value); - return rv; + return count; } static DEVICE_ATTR_RW(gps); diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index adecc1c555f0..26e4cbc34db8 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -27,6 +27,7 @@ #include <linux/input/sparse-keymap.h> #include <linux/fb.h> #include <linux/dmi.h> +#include <linux/i8042.h> #include "asus-wmi.h" @@ -55,10 +56,34 @@ MODULE_PARM_DESC(wapf, "WAPF value"); static struct quirk_entry *quirks; +static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str, + struct serio *port) +{ + static bool extended; + bool ret = false; + + if (str & I8042_STR_AUXDATA) + return false; + + if (unlikely(data == 0xe1)) { + extended = true; + ret = true; + } else if (unlikely(extended)) { + extended = false; + ret = true; + } + + return ret; +} + static struct quirk_entry quirk_asus_unknown = { .wapf = 0, }; +static struct quirk_entry quirk_asus_q500a = { + .i8042_filter = asus_q500a_i8042_filter, +}; + /* * For those machines that need software to control bt/wifi status * and can't adjust brightness through ACPI interface @@ -87,6 +112,10 @@ static struct quirk_entry quirk_no_rfkill_wapf4 = { .no_rfkill = true, }; +static struct quirk_entry quirk_asus_ux303ub = { + .wmi_backlight_native = true, +}; + static int dmi_matched(const struct dmi_system_id *dmi) { quirks = dmi->driver_data; @@ -96,6 +125,15 @@ static int dmi_matched(const struct dmi_system_id *dmi) static const struct dmi_system_id asus_quirks[] = { { .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. Q500A", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "Q500A"), + }, + .driver_data = &quirk_asus_q500a, + }, + { + .callback = dmi_matched, .ident = "ASUSTeK COMPUTER INC. U32U", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), @@ -351,11 +389,22 @@ static const struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_no_rfkill, }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. UX303UB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"), + }, + .driver_data = &quirk_asus_ux303ub, + }, {}, }; static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver) { + int ret; + quirks = &quirk_asus_unknown; dmi_check_system(asus_quirks); @@ -367,6 +416,15 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver) quirks->wapf = wapf; else wapf = quirks->wapf; + + if (quirks->i8042_filter) { + ret = i8042_install_filter(quirks->i8042_filter); + if (ret) { + pr_warn("Unable to install key filter\n"); + return; + } + pr_info("Using i8042 filter function for receiving events\n"); + } } static const struct key_entry asus_nb_wmi_keymap[] = { diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 7c093a0b78bb..ce6ca31a2d09 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -2084,6 +2084,9 @@ static int asus_wmi_add(struct platform_device *pdev) if (asus->driver->quirks->wmi_backlight_power) acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); + if (asus->driver->quirks->wmi_backlight_native) + acpi_video_set_dmi_backlight_type(acpi_backlight_native); + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { err = asus_wmi_backlight_init(asus); if (err && err != -ENODEV) diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h index 5de1df510ebd..0e19014e9f54 100644 --- a/drivers/platform/x86/asus-wmi.h +++ b/drivers/platform/x86/asus-wmi.h @@ -28,6 +28,7 @@ #define _ASUS_WMI_H_ #include <linux/platform_device.h> +#include <linux/i8042.h> #define ASUS_WMI_KEY_IGNORE (-1) #define ASUS_WMI_BRN_DOWN 0x20 @@ -43,6 +44,7 @@ struct quirk_entry { bool scalar_panel_brightness; bool store_backlight_power; bool wmi_backlight_power; + bool wmi_backlight_native; int wapf; /* * For machines with AMD graphic chips, it will send out WMI event @@ -51,6 +53,9 @@ struct quirk_entry { * and let the ACPI interrupt to send out the key event. */ int no_display_toggle; + + bool (*i8042_filter)(unsigned char data, unsigned char str, + struct serio *serio); }; struct asus_wmi_driver { diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c index 0aec4fd4c48e..37e646034ef8 100644 --- a/drivers/platform/x86/dell-smo8800.c +++ b/drivers/platform/x86/dell-smo8800.c @@ -24,6 +24,7 @@ #include <linux/acpi.h> #include <linux/interrupt.h> #include <linux/miscdevice.h> +#include <linux/uaccess.h> struct smo8800_device { u32 irq; /* acpi device irq */ diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 520b58a04daa..e8b1b836ca2d 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -100,7 +100,7 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev) struct dentry *dir, *file; dir = debugfs_create_dir("pmc_core", NULL); - if (IS_ERR_OR_NULL(dir)) + if (!dir) return -ENOMEM; pmcdev->dbgfs_dir = dir; diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index a511d518206b..0bf51d574fa9 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -522,48 +522,36 @@ static struct resource telemetry_res[] = { static int ipc_create_punit_device(void) { struct platform_device *pdev; - int ret; - - pdev = platform_device_alloc(PUNIT_DEVICE_NAME, -1); - if (!pdev) { - dev_err(ipcdev.dev, "Failed to alloc punit platform device\n"); - return -ENOMEM; - } - - pdev->dev.parent = ipcdev.dev; - ret = platform_device_add_resources(pdev, punit_res_array, - ARRAY_SIZE(punit_res_array)); - if (ret) { - dev_err(ipcdev.dev, "Failed to add platform punit resources\n"); - goto err; - } + const struct platform_device_info pdevinfo = { + .parent = ipcdev.dev, + .name = PUNIT_DEVICE_NAME, + .id = -1, + .res = punit_res_array, + .num_res = ARRAY_SIZE(punit_res_array), + }; + + pdev = platform_device_register_full(&pdevinfo); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); - ret = platform_device_add(pdev); - if (ret) { - dev_err(ipcdev.dev, "Failed to add punit platform device\n"); - goto err; - } ipcdev.punit_dev = pdev; return 0; -err: - platform_device_put(pdev); - return ret; } static int ipc_create_tco_device(void) { struct platform_device *pdev; struct resource *res; - int ret; - - pdev = platform_device_alloc(TCO_DEVICE_NAME, -1); - if (!pdev) { - dev_err(ipcdev.dev, "Failed to alloc tco platform device\n"); - return -ENOMEM; - } - - pdev->dev.parent = ipcdev.dev; + const struct platform_device_info pdevinfo = { + .parent = ipcdev.dev, + .name = TCO_DEVICE_NAME, + .id = -1, + .res = tco_res, + .num_res = ARRAY_SIZE(tco_res), + .data = &tco_info, + .size_data = sizeof(tco_info), + }; res = tco_res + TCO_RESOURCE_ACPI_IO; res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET; @@ -577,45 +565,26 @@ static int ipc_create_tco_device(void) res->start = ipcdev.gcr_base + TCO_PMC_OFFSET; res->end = res->start + TCO_PMC_SIZE - 1; - ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res)); - if (ret) { - dev_err(ipcdev.dev, "Failed to add tco platform resources\n"); - goto err; - } + pdev = platform_device_register_full(&pdevinfo); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); - ret = platform_device_add_data(pdev, &tco_info, sizeof(tco_info)); - if (ret) { - dev_err(ipcdev.dev, "Failed to add tco platform data\n"); - goto err; - } - - ret = platform_device_add(pdev); - if (ret) { - dev_err(ipcdev.dev, "Failed to add tco platform device\n"); - goto err; - } ipcdev.tco_dev = pdev; return 0; -err: - platform_device_put(pdev); - return ret; } static int ipc_create_telemetry_device(void) { struct platform_device *pdev; struct resource *res; - int ret; - - pdev = platform_device_alloc(TELEMETRY_DEVICE_NAME, -1); - if (!pdev) { - dev_err(ipcdev.dev, - "Failed to allocate telemetry platform device\n"); - return -ENOMEM; - } - - pdev->dev.parent = ipcdev.dev; + const struct platform_device_info pdevinfo = { + .parent = ipcdev.dev, + .name = TELEMETRY_DEVICE_NAME, + .id = -1, + .res = telemetry_res, + .num_res = ARRAY_SIZE(telemetry_res), + }; res = telemetry_res + TELEMETRY_RESOURCE_PUNIT_SSRAM; res->start = ipcdev.telem_punit_ssram_base; @@ -625,26 +594,13 @@ static int ipc_create_telemetry_device(void) res->start = ipcdev.telem_pmc_ssram_base; res->end = res->start + ipcdev.telem_pmc_ssram_size - 1; - ret = platform_device_add_resources(pdev, telemetry_res, - ARRAY_SIZE(telemetry_res)); - if (ret) { - dev_err(ipcdev.dev, - "Failed to add telemetry platform resources\n"); - goto err; - } + pdev = platform_device_register_full(&pdevinfo); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); - ret = platform_device_add(pdev); - if (ret) { - dev_err(ipcdev.dev, - "Failed to add telemetry platform device\n"); - goto err; - } ipcdev.telemetry_dev = pdev; return 0; -err: - platform_device_put(pdev); - return ret; } static int ipc_create_pmc_devices(void) diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 9d60a40d8b3f..074bf2fa1c55 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -321,10 +321,9 @@ static int write_acpi_int(const char *methodName, int val) static acpi_status tci_raw(struct toshiba_acpi_dev *dev, const u32 in[TCI_WORDS], u32 out[TCI_WORDS]) { + union acpi_object in_objs[TCI_WORDS], out_objs[TCI_WORDS + 1]; struct acpi_object_list params; - union acpi_object in_objs[TCI_WORDS]; struct acpi_buffer results; - union acpi_object out_objs[TCI_WORDS + 1]; acpi_status status; int i; @@ -387,9 +386,8 @@ static int sci_open(struct toshiba_acpi_dev *dev) { u32 in[TCI_WORDS] = { SCI_OPEN, 0, 0, 0, 0, 0 }; u32 out[TCI_WORDS]; - acpi_status status; + acpi_status status = tci_raw(dev, in, out); - status = tci_raw(dev, in, out); if (ACPI_FAILURE(status)) { pr_err("ACPI call to open SCI failed\n"); return 0; @@ -425,9 +423,8 @@ static void sci_close(struct toshiba_acpi_dev *dev) { u32 in[TCI_WORDS] = { SCI_CLOSE, 0, 0, 0, 0, 0 }; u32 out[TCI_WORDS]; - acpi_status status; + acpi_status status = tci_raw(dev, in, out); - status = tci_raw(dev, in, out); if (ACPI_FAILURE(status)) { pr_err("ACPI call to close SCI failed\n"); return; @@ -479,10 +476,15 @@ static void toshiba_illumination_available(struct toshiba_acpi_dev *dev) status = tci_raw(dev, in, out); sci_close(dev); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { pr_err("ACPI call to query Illumination support failed\n"); - else if (out[0] == TOS_SUCCESS) - dev->illumination_supported = 1; + return; + } + + if (out[0] != TOS_SUCCESS) + return; + + dev->illumination_supported = 1; } static void toshiba_illumination_set(struct led_classdev *cdev, @@ -509,7 +511,8 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev) { struct toshiba_acpi_dev *dev = container_of(cdev, struct toshiba_acpi_dev, led_dev); - u32 state, result; + u32 result; + u32 state; /* First request : initialize communication. */ if (!sci_open(dev)) @@ -546,24 +549,28 @@ static void toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev) sci_close(dev); if (ACPI_FAILURE(status)) { pr_err("ACPI call to query kbd illumination support failed\n"); - } else if (out[0] == TOS_SUCCESS) { - /* - * Check for keyboard backlight timeout max value, - * previous kbd backlight implementation set this to - * 0x3c0003, and now the new implementation set this - * to 0x3c001a, use this to distinguish between them. - */ - if (out[3] == SCI_KBD_TIME_MAX) - dev->kbd_type = 2; - else - dev->kbd_type = 1; - /* Get the current keyboard backlight mode */ - dev->kbd_mode = out[2] & SCI_KBD_MODE_MASK; - /* Get the current time (1-60 seconds) */ - dev->kbd_time = out[2] >> HCI_MISC_SHIFT; - /* Flag as supported */ - dev->kbd_illum_supported = 1; + return; } + + if (out[0] != TOS_SUCCESS) + return; + + /* + * Check for keyboard backlight timeout max value, + * previous kbd backlight implementation set this to + * 0x3c0003, and now the new implementation set this + * to 0x3c001a, use this to distinguish between them. + */ + if (out[3] == SCI_KBD_TIME_MAX) + dev->kbd_type = 2; + else + dev->kbd_type = 1; + /* Get the current keyboard backlight mode */ + dev->kbd_mode = out[2] & SCI_KBD_MODE_MASK; + /* Get the current time (1-60 seconds) */ + dev->kbd_time = out[2] >> HCI_MISC_SHIFT; + /* Flag as supported */ + dev->kbd_illum_supported = 1; } static int toshiba_kbd_illum_status_set(struct toshiba_acpi_dev *dev, u32 time) @@ -672,9 +679,9 @@ static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state) /* Eco Mode support */ static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev) { - acpi_status status; u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 0, 0, 0 }; u32 out[TCI_WORDS]; + acpi_status status; dev->eco_supported = 0; dev->eco_led_registered = false; @@ -682,7 +689,10 @@ static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev) status = tci_raw(dev, in, out); if (ACPI_FAILURE(status)) { pr_err("ACPI call to get ECO led failed\n"); - } else if (out[0] == TOS_INPUT_DATA_ERROR) { + return; + } + + if (out[0] == TOS_INPUT_DATA_ERROR) { /* * If we receive 0x8300 (Input Data Error), it means that the * LED device is present, but that we just screwed the input @@ -694,10 +704,15 @@ static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev) */ in[3] = 1; status = tci_raw(dev, in, out); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { pr_err("ACPI call to get ECO led failed\n"); - else if (out[0] == TOS_SUCCESS) - dev->eco_supported = 1; + return; + } + + if (out[0] != TOS_SUCCESS) + return; + + dev->eco_supported = 1; } } @@ -714,10 +729,11 @@ toshiba_eco_mode_get_status(struct led_classdev *cdev) if (ACPI_FAILURE(status)) { pr_err("ACPI call to get ECO led failed\n"); return LED_OFF; - } else if (out[0] != TOS_SUCCESS) { - return LED_OFF; } + if (out[0] != TOS_SUCCESS) + return LED_OFF; + return out[2] ? LED_FULL : LED_OFF; } @@ -751,10 +767,15 @@ static void toshiba_accelerometer_available(struct toshiba_acpi_dev *dev) * this call also serves as initialization */ status = tci_raw(dev, in, out); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { pr_err("ACPI call to query the accelerometer failed\n"); - else if (out[0] == TOS_SUCCESS) - dev->accelerometer_supported = 1; + return; + } + + if (out[0] != TOS_SUCCESS) + return; + + dev->accelerometer_supported = 1; } static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev, @@ -769,15 +790,18 @@ static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev, if (ACPI_FAILURE(status)) { pr_err("ACPI call to query the accelerometer failed\n"); return -EIO; - } else if (out[0] == TOS_NOT_SUPPORTED) { - return -ENODEV; - } else if (out[0] == TOS_SUCCESS) { - *xy = out[2]; - *z = out[4]; - return 0; } - return -EIO; + if (out[0] == TOS_NOT_SUPPORTED) + return -ENODEV; + + if (out[0] != TOS_SUCCESS) + return -EIO; + + *xy = out[2]; + *z = out[4]; + + return 0; } /* Sleep (Charge and Music) utilities support */ @@ -797,24 +821,29 @@ static void toshiba_usb_sleep_charge_available(struct toshiba_acpi_dev *dev) pr_err("ACPI call to get USB Sleep and Charge mode failed\n"); sci_close(dev); return; - } else if (out[0] == TOS_NOT_SUPPORTED) { + } + + if (out[0] != TOS_SUCCESS) { sci_close(dev); return; - } else if (out[0] == TOS_SUCCESS) { - dev->usbsc_mode_base = out[4]; } + dev->usbsc_mode_base = out[4]; + in[5] = SCI_USB_CHARGE_BAT_LVL; status = tci_raw(dev, in, out); sci_close(dev); if (ACPI_FAILURE(status)) { pr_err("ACPI call to get USB Sleep and Charge mode failed\n"); - } else if (out[0] == TOS_SUCCESS) { - dev->usbsc_bat_level = out[2]; - /* Flag as supported */ - dev->usb_sleep_charge_supported = 1; + return; } + if (out[0] != TOS_SUCCESS) + return; + + dev->usbsc_bat_level = out[2]; + /* Flag as supported */ + dev->usb_sleep_charge_supported = 1; } static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev, @@ -868,14 +897,19 @@ static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev, sci_close(dev); if (ACPI_FAILURE(status)) { pr_err("ACPI call to get USB S&C battery level failed\n"); - } else if (out[0] == TOS_NOT_SUPPORTED) { - return -ENODEV; - } else if (out[0] == TOS_SUCCESS) { - *mode = out[2]; - return 0; + return -EIO; } - return -EIO; + if (out[0] == TOS_NOT_SUPPORTED) + return -ENODEV; + + if (out[0] != TOS_SUCCESS) + return -EIO; + + *mode = out[2]; + + return 0; + } static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev, @@ -892,9 +926,12 @@ static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev, in[5] = SCI_USB_CHARGE_BAT_LVL; status = tci_raw(dev, in, out); sci_close(dev); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { pr_err("ACPI call to set USB S&C battery level failed\n"); - else if (out[0] == TOS_NOT_SUPPORTED) + return -EIO; + } + + if (out[0] == TOS_NOT_SUPPORTED) return -ENODEV; return out[0] == TOS_SUCCESS ? 0 : -EIO; @@ -915,14 +952,18 @@ static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev, sci_close(dev); if (ACPI_FAILURE(status)) { pr_err("ACPI call to get USB Rapid Charge failed\n"); - } else if (out[0] == TOS_NOT_SUPPORTED) { - return -ENODEV; - } else if (out[0] == TOS_SUCCESS || out[0] == TOS_SUCCESS2) { - *state = out[2]; - return 0; + return -EIO; } - return -EIO; + if (out[0] == TOS_NOT_SUPPORTED) + return -ENODEV; + + if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2) + return -EIO; + + *state = out[2]; + + return 0; } static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev, @@ -939,9 +980,12 @@ static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev, in[5] = SCI_USB_CHARGE_RAPID_DSP; status = tci_raw(dev, in, out); sci_close(dev); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { pr_err("ACPI call to set USB Rapid Charge failed\n"); - else if (out[0] == TOS_NOT_SUPPORTED) + return -EIO; + } + + if (out[0] == TOS_NOT_SUPPORTED) return -ENODEV; return (out[0] == TOS_SUCCESS || out[0] == TOS_SUCCESS2) ? 0 : -EIO; @@ -1097,14 +1141,18 @@ static int toshiba_hotkey_event_type_get(struct toshiba_acpi_dev *dev, status = tci_raw(dev, in, out); if (ACPI_FAILURE(status)) { pr_err("ACPI call to get System type failed\n"); - } else if (out[0] == TOS_NOT_SUPPORTED) { - return -ENODEV; - } else if (out[0] == TOS_SUCCESS) { - *type = out[3]; - return 0; + return -EIO; } - return -EIO; + if (out[0] == TOS_NOT_SUPPORTED) + return -ENODEV; + + if (out[0] != TOS_SUCCESS) + return -EIO; + + *type = out[3]; + + return 0; } /* Wireless status (RFKill, WLAN, BT, WWAN) */ @@ -1154,7 +1202,6 @@ static void toshiba_wwan_available(struct toshiba_acpi_dev *dev) */ in[3] = HCI_WIRELESS_WWAN; status = tci_raw(dev, in, out); - if (ACPI_FAILURE(status)) { pr_err("ACPI call to get WWAN status failed\n"); return; @@ -1174,7 +1221,6 @@ static int toshiba_wwan_set(struct toshiba_acpi_dev *dev, u32 state) in[3] = HCI_WIRELESS_WWAN_STATUS; status = tci_raw(dev, in, out); - if (ACPI_FAILURE(status)) { pr_err("ACPI call to set WWAN status failed\n"); return -EIO; @@ -1193,7 +1239,6 @@ static int toshiba_wwan_set(struct toshiba_acpi_dev *dev, u32 state) */ in[3] = HCI_WIRELESS_WWAN_POWER; status = tci_raw(dev, in, out); - if (ACPI_FAILURE(status)) { pr_err("ACPI call to set WWAN power failed\n"); return -EIO; @@ -1216,8 +1261,10 @@ static void toshiba_cooling_method_available(struct toshiba_acpi_dev *dev) dev->max_cooling_method = 0; status = tci_raw(dev, in, out); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { pr_err("ACPI call to get Cooling Method failed\n"); + return; + } if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2) return; @@ -1244,7 +1291,7 @@ static int toshiba_cooling_method_set(struct toshiba_acpi_dev *dev, u32 state) u32 result = hci_write(dev, HCI_COOLING_METHOD, state); if (result == TOS_FAILURE) - pr_err("ACPI call to get Cooling Method failed\n"); + pr_err("ACPI call to set Cooling Method failed\n"); if (result == TOS_NOT_SUPPORTED) return -ENODEV; @@ -1282,9 +1329,9 @@ static struct proc_dir_entry *toshiba_proc_dir; /* LCD Brightness */ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev) { + int brightness = 0; u32 result; u32 value; - int brightness = 0; if (dev->tr_backlight_supported) { int ret = get_tr_backlight_status(dev, &value); @@ -1301,10 +1348,10 @@ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev) pr_err("ACPI call to get LCD Brightness failed\n"); else if (result == TOS_NOT_SUPPORTED) return -ENODEV; - if (result == TOS_SUCCESS) - return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT); - return -EIO; + return result == TOS_SUCCESS ? + brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT) : + -EIO; } static int get_lcd_brightness(struct backlight_device *bd) @@ -1325,15 +1372,15 @@ static int lcd_proc_show(struct seq_file *m, void *v) levels = dev->backlight_dev->props.max_brightness + 1; value = get_lcd_brightness(dev->backlight_dev); - if (value >= 0) { - seq_printf(m, "brightness: %d\n", value); - seq_printf(m, "brightness_levels: %d\n", levels); - return 0; + if (value < 0) { + pr_err("Error reading LCD brightness\n"); + return value; } - pr_err("Error reading LCD brightness\n"); + seq_printf(m, "brightness: %d\n", value); + seq_printf(m, "brightness_levels: %d\n", levels); - return -EIO; + return 0; } static int lcd_proc_open(struct inode *inode, struct file *file) @@ -1377,7 +1424,7 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf, struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file)); char cmd[42]; size_t len; - int levels = dev->backlight_dev->props.max_brightness + 1; + int levels; int value; len = min(count, sizeof(cmd) - 1); @@ -1385,6 +1432,7 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf, return -EFAULT; cmd[len] = '\0'; + levels = dev->backlight_dev->props.max_brightness + 1; if (sscanf(cmd, " brightness : %i", &value) != 1 && value < 0 && value > levels) return -EINVAL; @@ -1420,20 +1468,21 @@ static int get_video_status(struct toshiba_acpi_dev *dev, u32 *status) static int video_proc_show(struct seq_file *m, void *v) { struct toshiba_acpi_dev *dev = m->private; + int is_lcd, is_crt, is_tv; u32 value; - if (!get_video_status(dev, &value)) { - int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0; - int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0; - int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0; + if (get_video_status(dev, &value)) + return -EIO; - seq_printf(m, "lcd_out: %d\n", is_lcd); - seq_printf(m, "crt_out: %d\n", is_crt); - seq_printf(m, "tv_out: %d\n", is_tv); - return 0; - } + is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0; + is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0; + is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0; - return -EIO; + seq_printf(m, "lcd_out: %d\n", is_lcd); + seq_printf(m, "crt_out: %d\n", is_crt); + seq_printf(m, "tv_out: %d\n", is_tv); + + return 0; } static int video_proc_open(struct inode *inode, struct file *file) @@ -1447,10 +1496,8 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf, struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file)); char *buffer; char *cmd; + int lcd_out, crt_out, tv_out; int remain = count; - int lcd_out = -1; - int crt_out = -1; - int tv_out = -1; int value; int ret; u32 video_out; @@ -1486,6 +1533,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf, kfree(cmd); + lcd_out = crt_out = tv_out = -1; ret = get_video_status(dev, &video_out); if (!ret) { unsigned int new_video_out = video_out; @@ -1980,8 +2028,8 @@ static ssize_t usb_sleep_charge_store(struct device *dev, const char *buf, size_t count) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); - u32 mode; int state; + u32 mode; int ret; ret = kstrtoint(buf, 0, &state); @@ -2021,9 +2069,8 @@ static ssize_t sleep_functions_on_battery_show(struct device *dev, char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + int bat_lvl, status; u32 state; - int bat_lvl; - int status; int ret; int tmp; diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c index 5db495dd018e..be1d137c6079 100644 --- a/drivers/platform/x86/toshiba_bluetooth.c +++ b/drivers/platform/x86/toshiba_bluetooth.c @@ -80,7 +80,9 @@ static int toshiba_bluetooth_present(acpi_handle handle) if (ACPI_FAILURE(result)) { pr_err("ACPI call to query Bluetooth presence failed\n"); return -ENXIO; - } else if (!bt_present) { + } + + if (!bt_present) { pr_info("Bluetooth device not present\n"); return -ENODEV; } diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c index 7f2afc6b5eb9..b3dec521e2b6 100644 --- a/drivers/platform/x86/toshiba_haps.c +++ b/drivers/platform/x86/toshiba_haps.c @@ -59,7 +59,7 @@ static int toshiba_haps_protection_level(acpi_handle handle, int level) return -EIO; } - pr_info("HDD protection level set to: %d\n", level); + pr_debug("HDD protection level set to: %d\n", level); return 0; } @@ -141,7 +141,7 @@ static struct attribute_group haps_attr_group = { */ static void toshiba_haps_notify(struct acpi_device *device, u32 event) { - pr_info("Received event: 0x%x", event); + pr_debug("Received event: 0x%x", event); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), @@ -168,9 +168,13 @@ static int toshiba_haps_available(acpi_handle handle) * A non existent device as well as having (only) * Solid State Drives can cause the call to fail. */ - status = acpi_evaluate_integer(handle, "_STA", NULL, - &hdd_present); - if (ACPI_FAILURE(status) || !hdd_present) { + status = acpi_evaluate_integer(handle, "_STA", NULL, &hdd_present); + if (ACPI_FAILURE(status)) { + pr_err("ACPI call to query HDD protection failed\n"); + return 0; + } + + if (!hdd_present) { pr_info("HDD protection not available or using SSD\n"); return 0; } diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index d637c933c8a9..58a97d420572 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -193,6 +193,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) if (err) break; + memset(&precise_offset, 0, sizeof(precise_offset)); ts = ktime_to_timespec64(xtstamp.device); precise_offset.device.sec = ts.tv_sec; precise_offset.device.nsec = ts.tv_nsec; diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 80a566a00d04..bf0128899c09 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -262,6 +262,15 @@ config PWM_LPSS_PLATFORM To compile this driver as a module, choose M here: the module will be called pwm-lpss-platform. +config PWM_MESON + tristate "Amlogic Meson PWM driver" + depends on ARCH_MESON + help + The platform driver for Amlogic Meson PWM controller. + + To compile this driver as a module, choose M here: the module + will be called pwm-meson. + config PWM_MTK_DISP tristate "MediaTek display PWM driver" depends on ARCH_MEDIATEK || COMPILE_TEST diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index feef1dd29f73..1194c54efcc2 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-pci.o obj-$(CONFIG_PWM_LPSS_PLATFORM) += pwm-lpss-platform.o +obj-$(CONFIG_PWM_MESON) += pwm-meson.o obj-$(CONFIG_PWM_MTK_DISP) += pwm-mtk-disp.o obj-$(CONFIG_PWM_MXS) += pwm-mxs.o obj-$(CONFIG_PWM_OMAP_DMTIMER) += pwm-omap-dmtimer.o diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 0dbd29e287db..172ef8245811 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -339,6 +339,8 @@ int pwmchip_remove(struct pwm_chip *chip) unsigned int i; int ret = 0; + pwmchip_sysfs_unexport_children(chip); + mutex_lock(&pwm_lock); for (i = 0; i < chip->npwm; i++) { diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c index 65108129d505..01339c152ab0 100644 --- a/drivers/pwm/pwm-berlin.c +++ b/drivers/pwm/pwm-berlin.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pwm.h> +#include <linux/slab.h> #define BERLIN_PWM_EN 0x0 #define BERLIN_PWM_ENABLE BIT(0) @@ -27,6 +28,13 @@ #define BERLIN_PWM_TCNT 0xc #define BERLIN_PWM_MAX_TCNT 65535 +struct berlin_pwm_channel { + u32 enable; + u32 ctrl; + u32 duty; + u32 tcnt; +}; + struct berlin_pwm_chip { struct pwm_chip chip; struct clk *clk; @@ -55,6 +63,25 @@ static inline void berlin_pwm_writel(struct berlin_pwm_chip *chip, writel_relaxed(value, chip->base + channel * 0x10 + offset); } +static int berlin_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct berlin_pwm_channel *channel; + + channel = kzalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return -ENOMEM; + + return pwm_set_chip_data(pwm, channel); +} + +static void berlin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct berlin_pwm_channel *channel = pwm_get_chip_data(pwm); + + pwm_set_chip_data(pwm, NULL); + kfree(channel); +} + static int berlin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm_dev, int duty_ns, int period_ns) { @@ -137,6 +164,8 @@ static void berlin_pwm_disable(struct pwm_chip *chip, } static const struct pwm_ops berlin_pwm_ops = { + .request = berlin_pwm_request, + .free = berlin_pwm_free, .config = berlin_pwm_config, .set_polarity = berlin_pwm_set_polarity, .enable = berlin_pwm_enable, @@ -204,12 +233,67 @@ static int berlin_pwm_remove(struct platform_device *pdev) return ret; } +#ifdef CONFIG_PM_SLEEP +static int berlin_pwm_suspend(struct device *dev) +{ + struct berlin_pwm_chip *pwm = dev_get_drvdata(dev); + unsigned int i; + + for (i = 0; i < pwm->chip.npwm; i++) { + struct berlin_pwm_channel *channel; + + channel = pwm_get_chip_data(&pwm->chip.pwms[i]); + if (!channel) + continue; + + channel->enable = berlin_pwm_readl(pwm, i, BERLIN_PWM_ENABLE); + channel->ctrl = berlin_pwm_readl(pwm, i, BERLIN_PWM_CONTROL); + channel->duty = berlin_pwm_readl(pwm, i, BERLIN_PWM_DUTY); + channel->tcnt = berlin_pwm_readl(pwm, i, BERLIN_PWM_TCNT); + } + + clk_disable_unprepare(pwm->clk); + + return 0; +} + +static int berlin_pwm_resume(struct device *dev) +{ + struct berlin_pwm_chip *pwm = dev_get_drvdata(dev); + unsigned int i; + int ret; + + ret = clk_prepare_enable(pwm->clk); + if (ret) + return ret; + + for (i = 0; i < pwm->chip.npwm; i++) { + struct berlin_pwm_channel *channel; + + channel = pwm_get_chip_data(&pwm->chip.pwms[i]); + if (!channel) + continue; + + berlin_pwm_writel(pwm, i, channel->ctrl, BERLIN_PWM_CONTROL); + berlin_pwm_writel(pwm, i, channel->duty, BERLIN_PWM_DUTY); + berlin_pwm_writel(pwm, i, channel->tcnt, BERLIN_PWM_TCNT); + berlin_pwm_writel(pwm, i, channel->enable, BERLIN_PWM_ENABLE); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(berlin_pwm_pm_ops, berlin_pwm_suspend, + berlin_pwm_resume); + static struct platform_driver berlin_pwm_driver = { .probe = berlin_pwm_probe, .remove = berlin_pwm_remove, .driver = { .name = "berlin-pwm", .of_match_table = berlin_pwm_match, + .pm = &berlin_pwm_pm_ops, }, }; module_platform_driver(berlin_pwm_driver); diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c index 99b9acc1a420..f6ca4e8c6253 100644 --- a/drivers/pwm/pwm-cros-ec.c +++ b/drivers/pwm/pwm-cros-ec.c @@ -38,7 +38,7 @@ static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty) struct { struct cros_ec_command msg; struct ec_params_pwm_set_duty params; - } buf; + } __packed buf; struct ec_params_pwm_set_duty *params = &buf.params; struct cros_ec_command *msg = &buf.msg; @@ -65,7 +65,7 @@ static int __cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index, struct ec_params_pwm_get_duty params; struct ec_response_pwm_get_duty resp; }; - } buf; + } __packed buf; struct ec_params_pwm_get_duty *params = &buf.params; struct ec_response_pwm_get_duty *resp = &buf.resp; struct cros_ec_command *msg = &buf.msg; diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c index 19dc64cab2f0..d7f5f7de030d 100644 --- a/drivers/pwm/pwm-lpc18xx-sct.c +++ b/drivers/pwm/pwm-lpc18xx-sct.c @@ -413,14 +413,18 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) } for (i = 0; i < lpc18xx_pwm->chip.npwm; i++) { + struct lpc18xx_pwm_data *data; + pwm = &lpc18xx_pwm->chip.pwms[i]; - pwm->chip_data = devm_kzalloc(lpc18xx_pwm->dev, - sizeof(struct lpc18xx_pwm_data), - GFP_KERNEL); - if (!pwm->chip_data) { + + data = devm_kzalloc(lpc18xx_pwm->dev, sizeof(*data), + GFP_KERNEL); + if (!data) { ret = -ENOMEM; goto remove_pwmchip; } + + pwm_set_chip_data(pwm, data); } platform_set_drvdata(pdev, lpc18xx_pwm); diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c new file mode 100644 index 000000000000..381871b2bb46 --- /dev/null +++ b/drivers/pwm/pwm-meson.c @@ -0,0 +1,529 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * Copyright (C) 2014 Amlogic, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * Copyright (C) 2014 Amlogic, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#define REG_PWM_A 0x0 +#define REG_PWM_B 0x4 +#define PWM_HIGH_SHIFT 16 + +#define REG_MISC_AB 0x8 +#define MISC_B_CLK_EN BIT(23) +#define MISC_A_CLK_EN BIT(15) +#define MISC_CLK_DIV_MASK 0x7f +#define MISC_B_CLK_DIV_SHIFT 16 +#define MISC_A_CLK_DIV_SHIFT 8 +#define MISC_B_CLK_SEL_SHIFT 6 +#define MISC_A_CLK_SEL_SHIFT 4 +#define MISC_CLK_SEL_WIDTH 2 +#define MISC_B_EN BIT(1) +#define MISC_A_EN BIT(0) + +static const unsigned int mux_reg_shifts[] = { + MISC_A_CLK_SEL_SHIFT, + MISC_B_CLK_SEL_SHIFT +}; + +struct meson_pwm_channel { + unsigned int hi; + unsigned int lo; + u8 pre_div; + + struct pwm_state state; + + struct clk *clk_parent; + struct clk_mux mux; + struct clk *clk; +}; + +struct meson_pwm_data { + const char * const *parent_names; +}; + +struct meson_pwm { + struct pwm_chip chip; + const struct meson_pwm_data *data; + void __iomem *base; + u8 inverter_mask; + spinlock_t lock; +}; + +static inline struct meson_pwm *to_meson_pwm(struct pwm_chip *chip) +{ + return container_of(chip, struct meson_pwm, chip); +} + +static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct meson_pwm_channel *channel = pwm_get_chip_data(pwm); + struct device *dev = chip->dev; + int err; + + if (!channel) + return -ENODEV; + + if (channel->clk_parent) { + err = clk_set_parent(channel->clk, channel->clk_parent); + if (err < 0) { + dev_err(dev, "failed to set parent %s for %s: %d\n", + __clk_get_name(channel->clk_parent), + __clk_get_name(channel->clk), err); + return err; + } + } + + err = clk_prepare_enable(channel->clk); + if (err < 0) { + dev_err(dev, "failed to enable clock %s: %d\n", + __clk_get_name(channel->clk), err); + return err; + } + + chip->ops->get_state(chip, pwm, &channel->state); + + return 0; +} + +static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct meson_pwm_channel *channel = pwm_get_chip_data(pwm); + + if (channel) + clk_disable_unprepare(channel->clk); +} + +static int meson_pwm_calc(struct meson_pwm *meson, + struct meson_pwm_channel *channel, unsigned int id, + unsigned int duty, unsigned int period) +{ + unsigned int pre_div, cnt, duty_cnt; + unsigned long fin_freq = -1, fin_ns; + + if (~(meson->inverter_mask >> id) & 0x1) + duty = period - duty; + + if (period == channel->state.period && + duty == channel->state.duty_cycle) + return 0; + + fin_freq = clk_get_rate(channel->clk); + if (fin_freq == 0) { + dev_err(meson->chip.dev, "invalid source clock frequency\n"); + return -EINVAL; + } + + dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq); + fin_ns = NSEC_PER_SEC / fin_freq; + + /* Calc pre_div with the period */ + for (pre_div = 0; pre_div < MISC_CLK_DIV_MASK; pre_div++) { + cnt = DIV_ROUND_CLOSEST(period, fin_ns * (pre_div + 1)); + dev_dbg(meson->chip.dev, "fin_ns=%lu pre_div=%u cnt=%u\n", + fin_ns, pre_div, cnt); + if (cnt <= 0xffff) + break; + } + + if (pre_div == MISC_CLK_DIV_MASK) { + dev_err(meson->chip.dev, "unable to get period pre_div\n"); + return -EINVAL; + } + + dev_dbg(meson->chip.dev, "period=%u pre_div=%u cnt=%u\n", period, + pre_div, cnt); + + if (duty == period) { + channel->pre_div = pre_div; + channel->hi = cnt; + channel->lo = 0; + } else if (duty == 0) { + channel->pre_div = pre_div; + channel->hi = 0; + channel->lo = cnt; + } else { + /* Then check is we can have the duty with the same pre_div */ + duty_cnt = DIV_ROUND_CLOSEST(duty, fin_ns * (pre_div + 1)); + if (duty_cnt > 0xffff) { + dev_err(meson->chip.dev, "unable to get duty cycle\n"); + return -EINVAL; + } + + dev_dbg(meson->chip.dev, "duty=%u pre_div=%u duty_cnt=%u\n", + duty, pre_div, duty_cnt); + + channel->pre_div = pre_div; + channel->hi = duty_cnt; + channel->lo = cnt - duty_cnt; + } + + return 0; +} + +static void meson_pwm_enable(struct meson_pwm *meson, + struct meson_pwm_channel *channel, + unsigned int id) +{ + u32 value, clk_shift, clk_enable, enable; + unsigned int offset; + + switch (id) { + case 0: + clk_shift = MISC_A_CLK_DIV_SHIFT; + clk_enable = MISC_A_CLK_EN; + enable = MISC_A_EN; + offset = REG_PWM_A; + break; + + case 1: + clk_shift = MISC_B_CLK_DIV_SHIFT; + clk_enable = MISC_B_CLK_EN; + enable = MISC_B_EN; + offset = REG_PWM_B; + break; + + default: + return; + } + + value = readl(meson->base + REG_MISC_AB); + value &= ~(MISC_CLK_DIV_MASK << clk_shift); + value |= channel->pre_div << clk_shift; + value |= clk_enable; + writel(value, meson->base + REG_MISC_AB); + + value = (channel->hi << PWM_HIGH_SHIFT) | channel->lo; + writel(value, meson->base + offset); + + value = readl(meson->base + REG_MISC_AB); + value |= enable; + writel(value, meson->base + REG_MISC_AB); +} + +static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id) +{ + u32 value, enable; + + switch (id) { + case 0: + enable = MISC_A_EN; + break; + + case 1: + enable = MISC_B_EN; + break; + + default: + return; + } + + value = readl(meson->base + REG_MISC_AB); + value &= ~enable; + writel(value, meson->base + REG_MISC_AB); +} + +static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct meson_pwm_channel *channel = pwm_get_chip_data(pwm); + struct meson_pwm *meson = to_meson_pwm(chip); + unsigned long flags; + int err = 0; + + if (!state) + return -EINVAL; + + spin_lock_irqsave(&meson->lock, flags); + + if (!state->enabled) { + meson_pwm_disable(meson, pwm->hwpwm); + channel->state.enabled = false; + + goto unlock; + } + + if (state->period != channel->state.period || + state->duty_cycle != channel->state.duty_cycle || + state->polarity != channel->state.polarity) { + if (channel->state.enabled) { + meson_pwm_disable(meson, pwm->hwpwm); + channel->state.enabled = false; + } + + if (state->polarity != channel->state.polarity) { + if (state->polarity == PWM_POLARITY_NORMAL) + meson->inverter_mask |= BIT(pwm->hwpwm); + else + meson->inverter_mask &= ~BIT(pwm->hwpwm); + } + + err = meson_pwm_calc(meson, channel, pwm->hwpwm, + state->duty_cycle, state->period); + if (err < 0) + goto unlock; + + channel->state.polarity = state->polarity; + channel->state.period = state->period; + channel->state.duty_cycle = state->duty_cycle; + } + + if (state->enabled && !channel->state.enabled) { + meson_pwm_enable(meson, channel, pwm->hwpwm); + channel->state.enabled = true; + } + +unlock: + spin_unlock_irqrestore(&meson->lock, flags); + return err; +} + +static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct meson_pwm *meson = to_meson_pwm(chip); + u32 value, mask; + + if (!state) + return; + + switch (pwm->hwpwm) { + case 0: + mask = MISC_A_EN; + break; + + case 1: + mask = MISC_B_EN; + break; + + default: + return; + } + + value = readl(meson->base + REG_MISC_AB); + state->enabled = (value & mask) != 0; +} + +static const struct pwm_ops meson_pwm_ops = { + .request = meson_pwm_request, + .free = meson_pwm_free, + .apply = meson_pwm_apply, + .get_state = meson_pwm_get_state, + .owner = THIS_MODULE, +}; + +static const char * const pwm_meson8b_parent_names[] = { + "xtal", "vid_pll", "fclk_div4", "fclk_div3" +}; + +static const struct meson_pwm_data pwm_meson8b_data = { + .parent_names = pwm_meson8b_parent_names, +}; + +static const char * const pwm_gxbb_parent_names[] = { + "xtal", "hdmi_pll", "fclk_div4", "fclk_div3" +}; + +static const struct meson_pwm_data pwm_gxbb_data = { + .parent_names = pwm_gxbb_parent_names, +}; + +static const struct of_device_id meson_pwm_matches[] = { + { .compatible = "amlogic,meson8b-pwm", .data = &pwm_meson8b_data }, + { .compatible = "amlogic,meson-gxbb-pwm", .data = &pwm_gxbb_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, meson_pwm_matches); + +static int meson_pwm_init_channels(struct meson_pwm *meson, + struct meson_pwm_channel *channels) +{ + struct device *dev = meson->chip.dev; + struct device_node *np = dev->of_node; + struct clk_init_data init; + unsigned int i; + char name[255]; + int err; + + for (i = 0; i < meson->chip.npwm; i++) { + struct meson_pwm_channel *channel = &channels[i]; + + snprintf(name, sizeof(name), "%s#mux%u", np->full_name, i); + + init.name = name; + init.ops = &clk_mux_ops; + init.flags = CLK_IS_BASIC; + init.parent_names = meson->data->parent_names; + init.num_parents = 1 << MISC_CLK_SEL_WIDTH; + + channel->mux.reg = meson->base + REG_MISC_AB; + channel->mux.shift = mux_reg_shifts[i]; + channel->mux.mask = BIT(MISC_CLK_SEL_WIDTH) - 1; + channel->mux.flags = 0; + channel->mux.lock = &meson->lock; + channel->mux.table = NULL; + channel->mux.hw.init = &init; + + channel->clk = devm_clk_register(dev, &channel->mux.hw); + if (IS_ERR(channel->clk)) { + err = PTR_ERR(channel->clk); + dev_err(dev, "failed to register %s: %d\n", name, err); + return err; + } + + snprintf(name, sizeof(name), "clkin%u", i); + + channel->clk_parent = devm_clk_get(dev, name); + if (IS_ERR(channel->clk_parent)) { + err = PTR_ERR(channel->clk_parent); + if (err == -EPROBE_DEFER) + return err; + + channel->clk_parent = NULL; + } + } + + return 0; +} + +static void meson_pwm_add_channels(struct meson_pwm *meson, + struct meson_pwm_channel *channels) +{ + unsigned int i; + + for (i = 0; i < meson->chip.npwm; i++) + pwm_set_chip_data(&meson->chip.pwms[i], &channels[i]); +} + +static int meson_pwm_probe(struct platform_device *pdev) +{ + struct meson_pwm_channel *channels; + struct meson_pwm *meson; + struct resource *regs; + int err; + + meson = devm_kzalloc(&pdev->dev, sizeof(*meson), GFP_KERNEL); + if (!meson) + return -ENOMEM; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + meson->base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(meson->base)) + return PTR_ERR(meson->base); + + meson->chip.dev = &pdev->dev; + meson->chip.ops = &meson_pwm_ops; + meson->chip.base = -1; + meson->chip.npwm = 2; + meson->chip.of_xlate = of_pwm_xlate_with_flags; + meson->chip.of_pwm_n_cells = 3; + + meson->data = of_device_get_match_data(&pdev->dev); + meson->inverter_mask = BIT(meson->chip.npwm) - 1; + + channels = devm_kcalloc(&pdev->dev, meson->chip.npwm, sizeof(*meson), + GFP_KERNEL); + if (!channels) + return -ENOMEM; + + err = meson_pwm_init_channels(meson, channels); + if (err < 0) + return err; + + err = pwmchip_add(&meson->chip); + if (err < 0) { + dev_err(&pdev->dev, "failed to register PWM chip: %d\n", err); + return err; + } + + meson_pwm_add_channels(meson, channels); + + platform_set_drvdata(pdev, meson); + + return 0; +} + +static int meson_pwm_remove(struct platform_device *pdev) +{ + struct meson_pwm *meson = platform_get_drvdata(pdev); + + return pwmchip_remove(&meson->chip); +} + +static struct platform_driver meson_pwm_driver = { + .driver = { + .name = "meson-pwm", + .of_match_table = meson_pwm_matches, + }, + .probe = meson_pwm_probe, + .remove = meson_pwm_remove, +}; +module_platform_driver(meson_pwm_driver); + +MODULE_ALIAS("platform:meson-pwm"); +MODULE_DESCRIPTION("Amlogic Meson PWM Generator driver"); +MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c index 0ad3385298c0..893940d45f0d 100644 --- a/drivers/pwm/pwm-mtk-disp.c +++ b/drivers/pwm/pwm-mtk-disp.c @@ -18,30 +18,40 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/slab.h> #define DISP_PWM_EN 0x00 -#define PWM_ENABLE_MASK BIT(0) -#define DISP_PWM_COMMIT 0x08 -#define PWM_COMMIT_MASK BIT(0) - -#define DISP_PWM_CON_0 0x10 #define PWM_CLKDIV_SHIFT 16 #define PWM_CLKDIV_MAX 0x3ff #define PWM_CLKDIV_MASK (PWM_CLKDIV_MAX << PWM_CLKDIV_SHIFT) -#define DISP_PWM_CON_1 0x14 #define PWM_PERIOD_BIT_WIDTH 12 #define PWM_PERIOD_MASK ((1 << PWM_PERIOD_BIT_WIDTH) - 1) #define PWM_HIGH_WIDTH_SHIFT 16 #define PWM_HIGH_WIDTH_MASK (0x1fff << PWM_HIGH_WIDTH_SHIFT) +struct mtk_pwm_data { + u32 enable_mask; + unsigned int con0; + u32 con0_sel; + unsigned int con1; + + bool has_commit; + unsigned int commit; + unsigned int commit_mask; + + unsigned int bls_debug; + u32 bls_debug_mask; +}; + struct mtk_disp_pwm { struct pwm_chip chip; + const struct mtk_pwm_data *data; struct clk *clk_main; struct clk *clk_mm; void __iomem *base; @@ -106,12 +116,21 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, return err; } - mtk_disp_pwm_update_bits(mdp, DISP_PWM_CON_0, PWM_CLKDIV_MASK, + mtk_disp_pwm_update_bits(mdp, mdp->data->con0, + PWM_CLKDIV_MASK, clk_div << PWM_CLKDIV_SHIFT); - mtk_disp_pwm_update_bits(mdp, DISP_PWM_CON_1, - PWM_PERIOD_MASK | PWM_HIGH_WIDTH_MASK, value); - mtk_disp_pwm_update_bits(mdp, DISP_PWM_COMMIT, PWM_COMMIT_MASK, 1); - mtk_disp_pwm_update_bits(mdp, DISP_PWM_COMMIT, PWM_COMMIT_MASK, 0); + mtk_disp_pwm_update_bits(mdp, mdp->data->con1, + PWM_PERIOD_MASK | PWM_HIGH_WIDTH_MASK, + value); + + if (mdp->data->has_commit) { + mtk_disp_pwm_update_bits(mdp, mdp->data->commit, + mdp->data->commit_mask, + mdp->data->commit_mask); + mtk_disp_pwm_update_bits(mdp, mdp->data->commit, + mdp->data->commit_mask, + 0x0); + } clk_disable(mdp->clk_mm); clk_disable(mdp->clk_main); @@ -134,7 +153,8 @@ static int mtk_disp_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) return err; } - mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, PWM_ENABLE_MASK, 1); + mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask, + mdp->data->enable_mask); return 0; } @@ -143,7 +163,8 @@ static void mtk_disp_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip); - mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, PWM_ENABLE_MASK, 0); + mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask, + 0x0); clk_disable(mdp->clk_mm); clk_disable(mdp->clk_main); @@ -166,6 +187,8 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev) if (!mdp) return -ENOMEM; + mdp->data = of_device_get_match_data(&pdev->dev); + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); mdp->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(mdp->base)) @@ -200,6 +223,19 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mdp); + /* + * For MT2701, disable double buffer before writing register + * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH. + */ + if (!mdp->data->has_commit) { + mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug, + mdp->data->bls_debug_mask, + mdp->data->bls_debug_mask); + mtk_disp_pwm_update_bits(mdp, mdp->data->con0, + mdp->data->con0_sel, + mdp->data->con0_sel); + } + return 0; disable_clk_mm: @@ -221,9 +257,30 @@ static int mtk_disp_pwm_remove(struct platform_device *pdev) return ret; } +static const struct mtk_pwm_data mt2701_pwm_data = { + .enable_mask = BIT(16), + .con0 = 0xa8, + .con0_sel = 0x2, + .con1 = 0xac, + .has_commit = false, + .bls_debug = 0xb0, + .bls_debug_mask = 0x3, +}; + +static const struct mtk_pwm_data mt8173_pwm_data = { + .enable_mask = BIT(0), + .con0 = 0x10, + .con0_sel = 0x0, + .con1 = 0x14, + .has_commit = true, + .commit = 0x8, + .commit_mask = 0x1, +}; + static const struct of_device_id mtk_disp_pwm_of_match[] = { - { .compatible = "mediatek,mt8173-disp-pwm" }, - { .compatible = "mediatek,mt6595-disp-pwm" }, + { .compatible = "mediatek,mt2701-disp-pwm", .data = &mt2701_pwm_data}, + { .compatible = "mediatek,mt6595-disp-pwm", .data = &mt8173_pwm_data}, + { .compatible = "mediatek,mt8173-disp-pwm", .data = &mt8173_pwm_data}, { } }; MODULE_DEVICE_TABLE(of, mtk_disp_pwm_of_match); diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c index ada2d326dc3e..f113cda47032 100644 --- a/drivers/pwm/pwm-samsung.c +++ b/drivers/pwm/pwm-samsung.c @@ -193,9 +193,18 @@ static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *chip, * divider settings and choose the lowest divisor that can generate * frequencies lower than requested. */ - for (div = variant->div_base; div < 4; ++div) - if ((rate >> (variant->bits + div)) < freq) - break; + if (variant->bits < 32) { + /* Only for s3c24xx */ + for (div = variant->div_base; div < 4; ++div) + if ((rate >> (variant->bits + div)) < freq) + break; + } else { + /* + * Other variants have enough counter bits to generate any + * requested rate, so no need to check higher divisors. + */ + div = variant->div_base; + } pwm_samsung_set_divisor(chip, chan, BIT(div)); diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c index 92abbd56b9f7..dd82dc840af9 100644 --- a/drivers/pwm/pwm-sti.c +++ b/drivers/pwm/pwm-sti.c @@ -1,8 +1,10 @@ /* - * PWM device driver for ST SoCs. - * Author: Ajit Pal Singh <ajitpal.singh@st.com> + * PWM device driver for ST SoCs + * + * Copyright (C) 2013-2016 STMicroelectronics (R&D) Limited * - * Copyright (C) 2013-2014 STMicroelectronics (R&D) Limited + * Author: Ajit Pal Singh <ajitpal.singh@st.com> + * Lee Jones <lee.jones@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -11,6 +13,7 @@ */ #include <linux/clk.h> +#include <linux/interrupt.h> #include <linux/math64.h> #include <linux/mfd/syscon.h> #include <linux/module.h> @@ -18,43 +21,82 @@ #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/regmap.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/time.h> +#include <linux/wait.h> + +#define PWM_OUT_VAL(x) (0x00 + (4 * (x))) /* Device's Duty Cycle register */ +#define PWM_CPT_VAL(x) (0x10 + (4 * (x))) /* Capture value */ +#define PWM_CPT_EDGE(x) (0x30 + (4 * (x))) /* Edge to capture on */ -#define STI_DS_REG(ch) (4 * (ch)) /* Channel's Duty Cycle register */ -#define STI_PWMCR 0x50 /* Control/Config register */ -#define STI_INTEN 0x54 /* Interrupt Enable/Disable register */ -#define PWM_PRESCALE_LOW_MASK 0x0f -#define PWM_PRESCALE_HIGH_MASK 0xf0 +#define STI_PWM_CTRL 0x50 /* Control/Config register */ +#define STI_INT_EN 0x54 /* Interrupt Enable/Disable register */ +#define STI_INT_STA 0x58 /* Interrupt Status register */ +#define PWM_INT_ACK 0x5c +#define PWM_PRESCALE_LOW_MASK 0x0f +#define PWM_PRESCALE_HIGH_MASK 0xf0 +#define PWM_CPT_EDGE_MASK 0x03 +#define PWM_INT_ACK_MASK 0x1ff + +#define STI_MAX_CPT_DEVS 4 +#define CPT_DC_MAX 0xff /* Regfield IDs */ enum { + /* Bits in PWM_CTRL*/ PWMCLK_PRESCALE_LOW, PWMCLK_PRESCALE_HIGH, - PWM_EN, - PWM_INT_EN, + CPTCLK_PRESCALE, + + PWM_OUT_EN, + PWM_CPT_EN, + + PWM_CPT_INT_EN, + PWM_CPT_INT_STAT, /* Keep last */ MAX_REGFIELDS }; +/* + * Each capture input can be programmed to detect rising-edge, falling-edge, + * either edge or neither egde. + */ +enum sti_cpt_edge { + CPT_EDGE_DISABLED, + CPT_EDGE_RISING, + CPT_EDGE_FALLING, + CPT_EDGE_BOTH, +}; + +struct sti_cpt_ddata { + u32 snapshot[3]; + unsigned int index; + struct mutex lock; + wait_queue_head_t wait; +}; + struct sti_pwm_compat_data { const struct reg_field *reg_fields; - unsigned int num_chan; + unsigned int pwm_num_devs; + unsigned int cpt_num_devs; unsigned int max_pwm_cnt; unsigned int max_prescale; }; struct sti_pwm_chip { struct device *dev; - struct clk *clk; - unsigned long clk_rate; + struct clk *pwm_clk; + struct clk *cpt_clk; struct regmap *regmap; struct sti_pwm_compat_data *cdata; struct regmap_field *prescale_low; struct regmap_field *prescale_high; - struct regmap_field *pwm_en; - struct regmap_field *pwm_int_en; + struct regmap_field *pwm_out_en; + struct regmap_field *pwm_cpt_en; + struct regmap_field *pwm_cpt_int_en; + struct regmap_field *pwm_cpt_int_stat; struct pwm_chip chip; struct pwm_device *cur; unsigned long configured; @@ -64,10 +106,13 @@ struct sti_pwm_chip { }; static const struct reg_field sti_pwm_regfields[MAX_REGFIELDS] = { - [PWMCLK_PRESCALE_LOW] = REG_FIELD(STI_PWMCR, 0, 3), - [PWMCLK_PRESCALE_HIGH] = REG_FIELD(STI_PWMCR, 11, 14), - [PWM_EN] = REG_FIELD(STI_PWMCR, 9, 9), - [PWM_INT_EN] = REG_FIELD(STI_INTEN, 0, 0), + [PWMCLK_PRESCALE_LOW] = REG_FIELD(STI_PWM_CTRL, 0, 3), + [PWMCLK_PRESCALE_HIGH] = REG_FIELD(STI_PWM_CTRL, 11, 14), + [CPTCLK_PRESCALE] = REG_FIELD(STI_PWM_CTRL, 4, 8), + [PWM_OUT_EN] = REG_FIELD(STI_PWM_CTRL, 9, 9), + [PWM_CPT_EN] = REG_FIELD(STI_PWM_CTRL, 10, 10), + [PWM_CPT_INT_EN] = REG_FIELD(STI_INT_EN, 1, 4), + [PWM_CPT_INT_STAT] = REG_FIELD(STI_INT_STA, 1, 4), }; static inline struct sti_pwm_chip *to_sti_pwmchip(struct pwm_chip *chip) @@ -82,61 +127,68 @@ static int sti_pwm_get_prescale(struct sti_pwm_chip *pc, unsigned long period, unsigned int *prescale) { struct sti_pwm_compat_data *cdata = pc->cdata; - unsigned long val; + unsigned long clk_rate; + unsigned long value; unsigned int ps; + clk_rate = clk_get_rate(pc->pwm_clk); + if (!clk_rate) { + dev_err(pc->dev, "failed to get clock rate\n"); + return -EINVAL; + } + /* - * prescale = ((period_ns * clk_rate) / (10^9 * (max_pwm_count + 1)) - 1 + * prescale = ((period_ns * clk_rate) / (10^9 * (max_pwm_cnt + 1)) - 1 */ - val = NSEC_PER_SEC / pc->clk_rate; - val *= cdata->max_pwm_cnt + 1; + value = NSEC_PER_SEC / clk_rate; + value *= cdata->max_pwm_cnt + 1; - if (period % val) { + if (period % value) return -EINVAL; - } else { - ps = period / val - 1; - if (ps > cdata->max_prescale) - return -EINVAL; - } + + ps = period / value - 1; + if (ps > cdata->max_prescale) + return -EINVAL; + *prescale = ps; return 0; } /* - * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles. - * The only way to change the period (apart from changing the PWM input clock) - * is to change the PWM clock prescaler. - * The prescaler is of 8 bits, so 256 prescaler values and hence - * 256 possible period values are supported (for a particular clock rate). - * The requested period will be applied only if it matches one of these - * 256 values. + * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles. The + * only way to change the period (apart from changing the PWM input clock) is + * to change the PWM clock prescaler. + * + * The prescaler is of 8 bits, so 256 prescaler values and hence 256 possible + * period values are supported (for a particular clock rate). The requested + * period will be applied only if it matches one of these 256 values. */ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) + int duty_ns, int period_ns) { struct sti_pwm_chip *pc = to_sti_pwmchip(chip); struct sti_pwm_compat_data *cdata = pc->cdata; + unsigned int ncfg, value, prescale = 0; struct pwm_device *cur = pc->cur; struct device *dev = pc->dev; - unsigned int prescale = 0, pwmvalx; - int ret; - unsigned int ncfg; bool period_same = false; + int ret; ncfg = hweight_long(pc->configured); if (ncfg) period_same = (period_ns == pwm_get_period(cur)); - /* Allow configuration changes if one of the - * following conditions satisfy. - * 1. No channels have been configured. - * 2. Only one channel has been configured and the new request - * is for the same channel. - * 3. Only one channel has been configured and the new request is - * for a new channel and period of the new channel is same as - * the current configured period. - * 4. More than one channels are configured and period of the new + /* + * Allow configuration changes if one of the following conditions + * satisfy. + * 1. No devices have been configured. + * 2. Only one device has been configured and the new request is for + * the same device. + * 3. Only one device has been configured and the new request is for + * a new device and period of the new device is same as the current + * configured period. + * 4. More than one devices are configured and period of the new * requestis the same as the current period. */ if (!ncfg || @@ -144,7 +196,11 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, ((ncfg == 1) && (pwm->hwpwm != cur->hwpwm) && period_same) || ((ncfg > 1) && period_same)) { /* Enable clock before writing to PWM registers. */ - ret = clk_enable(pc->clk); + ret = clk_enable(pc->pwm_clk); + if (ret) + return ret; + + ret = clk_enable(pc->cpt_clk); if (ret) return ret; @@ -153,15 +209,15 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, if (ret) goto clk_dis; - ret = - regmap_field_write(pc->prescale_low, - prescale & PWM_PRESCALE_LOW_MASK); + value = prescale & PWM_PRESCALE_LOW_MASK; + + ret = regmap_field_write(pc->prescale_low, value); if (ret) goto clk_dis; - ret = - regmap_field_write(pc->prescale_high, - (prescale & PWM_PRESCALE_HIGH_MASK) >> 4); + value = (prescale & PWM_PRESCALE_HIGH_MASK) >> 4; + + ret = regmap_field_write(pc->prescale_high, value); if (ret) goto clk_dis; } @@ -172,25 +228,26 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * PWM pulse = (max_pwm_count + 1) local cycles, * that is continuous pulse: signal never goes low. */ - pwmvalx = cdata->max_pwm_cnt * duty_ns / period_ns; + value = cdata->max_pwm_cnt * duty_ns / period_ns; - ret = regmap_write(pc->regmap, STI_DS_REG(pwm->hwpwm), pwmvalx); + ret = regmap_write(pc->regmap, PWM_OUT_VAL(pwm->hwpwm), value); if (ret) goto clk_dis; - ret = regmap_field_write(pc->pwm_int_en, 0); + ret = regmap_field_write(pc->pwm_cpt_int_en, 0); set_bit(pwm->hwpwm, &pc->configured); pc->cur = pwm; - dev_dbg(dev, "prescale:%u, period:%i, duty:%i, pwmvalx:%u\n", - prescale, period_ns, duty_ns, pwmvalx); + dev_dbg(dev, "prescale:%u, period:%i, duty:%i, value:%u\n", + prescale, period_ns, duty_ns, value); } else { return -EINVAL; } clk_dis: - clk_disable(pc->clk); + clk_disable(pc->pwm_clk); + clk_disable(pc->cpt_clk); return ret; } @@ -201,23 +258,30 @@ static int sti_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) int ret = 0; /* - * Since we have a common enable for all PWM channels, - * do not enable if already enabled. + * Since we have a common enable for all PWM devices, do not enable if + * already enabled. */ mutex_lock(&pc->sti_pwm_lock); + if (!pc->en_count) { - ret = clk_enable(pc->clk); + ret = clk_enable(pc->pwm_clk); + if (ret) + goto out; + + ret = clk_enable(pc->cpt_clk); if (ret) goto out; - ret = regmap_field_write(pc->pwm_en, 1); + ret = regmap_field_write(pc->pwm_out_en, 1); if (ret) { - dev_err(dev, "failed to enable PWM device:%d\n", - pwm->hwpwm); + dev_err(dev, "failed to enable PWM device %u: %d\n", + pwm->hwpwm, ret); goto out; } } + pc->en_count++; + out: mutex_unlock(&pc->sti_pwm_lock); return ret; @@ -228,13 +292,17 @@ static void sti_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) struct sti_pwm_chip *pc = to_sti_pwmchip(chip); mutex_lock(&pc->sti_pwm_lock); + if (--pc->en_count) { mutex_unlock(&pc->sti_pwm_lock); return; } - regmap_field_write(pc->pwm_en, 0); - clk_disable(pc->clk); + regmap_field_write(pc->pwm_out_en, 0); + + clk_disable(pc->pwm_clk); + clk_disable(pc->cpt_clk); + mutex_unlock(&pc->sti_pwm_lock); } @@ -245,7 +313,90 @@ static void sti_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) clear_bit(pwm->hwpwm, &pc->configured); } +static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_capture *result, unsigned long timeout) +{ + struct sti_pwm_chip *pc = to_sti_pwmchip(chip); + struct sti_pwm_compat_data *cdata = pc->cdata; + struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm); + struct device *dev = pc->dev; + unsigned int effective_ticks; + unsigned long long high, low; + int ret; + + if (pwm->hwpwm >= cdata->cpt_num_devs) { + dev_err(dev, "device %u is not valid\n", pwm->hwpwm); + return -EINVAL; + } + + mutex_lock(&ddata->lock); + ddata->index = 0; + + /* Prepare capture measurement */ + regmap_write(pc->regmap, PWM_CPT_EDGE(pwm->hwpwm), CPT_EDGE_RISING); + regmap_field_write(pc->pwm_cpt_int_en, BIT(pwm->hwpwm)); + + /* Enable capture */ + ret = regmap_field_write(pc->pwm_cpt_en, 1); + if (ret) { + dev_err(dev, "failed to enable PWM capture %u: %d\n", + pwm->hwpwm, ret); + goto out; + } + + ret = wait_event_interruptible_timeout(ddata->wait, ddata->index > 1, + msecs_to_jiffies(timeout)); + + regmap_write(pc->regmap, PWM_CPT_EDGE(pwm->hwpwm), CPT_EDGE_DISABLED); + + if (ret == -ERESTARTSYS) + goto out; + + switch (ddata->index) { + case 0: + case 1: + /* + * Getting here could mean: + * - input signal is constant of less than 1 Hz + * - there is no input signal at all + * + * In such case the frequency is rounded down to 0 + */ + result->period = 0; + result->duty_cycle = 0; + + break; + + case 2: + /* We have everying we need */ + high = ddata->snapshot[1] - ddata->snapshot[0]; + low = ddata->snapshot[2] - ddata->snapshot[1]; + + effective_ticks = clk_get_rate(pc->cpt_clk); + + result->period = (high + low) * NSEC_PER_SEC; + result->period /= effective_ticks; + + result->duty_cycle = high * NSEC_PER_SEC; + result->duty_cycle /= effective_ticks; + + break; + + default: + dev_err(dev, "internal error\n"); + break; + } + +out: + /* Disable capture */ + regmap_field_write(pc->pwm_cpt_en, 0); + + mutex_unlock(&ddata->lock); + return ret; +} + static const struct pwm_ops sti_pwm_ops = { + .capture = sti_pwm_capture, .config = sti_pwm_config, .enable = sti_pwm_enable, .disable = sti_pwm_disable, @@ -253,17 +404,98 @@ static const struct pwm_ops sti_pwm_ops = { .owner = THIS_MODULE, }; +static irqreturn_t sti_pwm_interrupt(int irq, void *data) +{ + struct sti_pwm_chip *pc = data; + struct device *dev = pc->dev; + struct sti_cpt_ddata *ddata; + int devicenum; + unsigned int cpt_int_stat; + unsigned int reg; + int ret = IRQ_NONE; + + ret = regmap_field_read(pc->pwm_cpt_int_stat, &cpt_int_stat); + if (ret) + return ret; + + while (cpt_int_stat) { + devicenum = ffs(cpt_int_stat) - 1; + + ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]); + + /* + * Capture input: + * _______ _______ + * | | | | + * __| |_________________| |________ + * ^0 ^1 ^2 + * + * Capture start by the first available rising edge. When a + * capture event occurs, capture value (CPT_VALx) is stored, + * index incremented, capture edge changed. + * + * After the capture, if the index > 1, we have collected the + * necessary data so we signal the thread waiting for it and + * disable the capture by setting capture edge to none + */ + + regmap_read(pc->regmap, + PWM_CPT_VAL(devicenum), + &ddata->snapshot[ddata->index]); + + switch (ddata->index) { + case 0: + case 1: + regmap_read(pc->regmap, PWM_CPT_EDGE(devicenum), ®); + reg ^= PWM_CPT_EDGE_MASK; + regmap_write(pc->regmap, PWM_CPT_EDGE(devicenum), reg); + + ddata->index++; + break; + + case 2: + regmap_write(pc->regmap, + PWM_CPT_EDGE(devicenum), + CPT_EDGE_DISABLED); + wake_up(&ddata->wait); + break; + + default: + dev_err(dev, "Internal error\n"); + } + + cpt_int_stat &= ~BIT_MASK(devicenum); + + ret = IRQ_HANDLED; + } + + /* Just ACK everything */ + regmap_write(pc->regmap, PWM_INT_ACK, PWM_INT_ACK_MASK); + + return ret; +} + static int sti_pwm_probe_dt(struct sti_pwm_chip *pc) { struct device *dev = pc->dev; const struct reg_field *reg_fields; struct device_node *np = dev->of_node; struct sti_pwm_compat_data *cdata = pc->cdata; - u32 num_chan; + u32 num_devs; + int ret; + + ret = of_property_read_u32(np, "st,pwm-num-chan", &num_devs); + if (!ret) + cdata->pwm_num_devs = num_devs; + + ret = of_property_read_u32(np, "st,capture-num-chan", &num_devs); + if (!ret) + cdata->cpt_num_devs = num_devs; - of_property_read_u32(np, "st,pwm-num-chan", &num_chan); - if (num_chan) - cdata->num_chan = num_chan; + if (!cdata->pwm_num_devs && !cdata->cpt_num_devs) { + dev_err(dev, "No channels configured\n"); + return -EINVAL; + } reg_fields = cdata->reg_fields; @@ -277,15 +509,26 @@ static int sti_pwm_probe_dt(struct sti_pwm_chip *pc) if (IS_ERR(pc->prescale_high)) return PTR_ERR(pc->prescale_high); - pc->pwm_en = devm_regmap_field_alloc(dev, pc->regmap, - reg_fields[PWM_EN]); - if (IS_ERR(pc->pwm_en)) - return PTR_ERR(pc->pwm_en); - pc->pwm_int_en = devm_regmap_field_alloc(dev, pc->regmap, - reg_fields[PWM_INT_EN]); - if (IS_ERR(pc->pwm_int_en)) - return PTR_ERR(pc->pwm_int_en); + pc->pwm_out_en = devm_regmap_field_alloc(dev, pc->regmap, + reg_fields[PWM_OUT_EN]); + if (IS_ERR(pc->pwm_out_en)) + return PTR_ERR(pc->pwm_out_en); + + pc->pwm_cpt_en = devm_regmap_field_alloc(dev, pc->regmap, + reg_fields[PWM_CPT_EN]); + if (IS_ERR(pc->pwm_cpt_en)) + return PTR_ERR(pc->pwm_cpt_en); + + pc->pwm_cpt_int_en = devm_regmap_field_alloc(dev, pc->regmap, + reg_fields[PWM_CPT_INT_EN]); + if (IS_ERR(pc->pwm_cpt_int_en)) + return PTR_ERR(pc->pwm_cpt_int_en); + + pc->pwm_cpt_int_stat = devm_regmap_field_alloc(dev, pc->regmap, + reg_fields[PWM_CPT_INT_STAT]); + if (PTR_ERR_OR_ZERO(pc->pwm_cpt_int_stat)) + return PTR_ERR(pc->pwm_cpt_int_stat); return 0; } @@ -302,7 +545,8 @@ static int sti_pwm_probe(struct platform_device *pdev) struct sti_pwm_compat_data *cdata; struct sti_pwm_chip *pc; struct resource *res; - int ret; + unsigned int i; + int irq, ret; pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL); if (!pc) @@ -323,14 +567,28 @@ static int sti_pwm_probe(struct platform_device *pdev) if (IS_ERR(pc->regmap)) return PTR_ERR(pc->regmap); + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to obtain IRQ\n"); + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, sti_pwm_interrupt, 0, + pdev->name, pc); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to request IRQ\n"); + return ret; + } + /* * Setup PWM data with default values: some values could be replaced * with specific ones provided from Device Tree. */ - cdata->reg_fields = &sti_pwm_regfields[0]; + cdata->reg_fields = sti_pwm_regfields; cdata->max_prescale = 0xff; - cdata->max_pwm_cnt = 255; - cdata->num_chan = 1; + cdata->max_pwm_cnt = 255; + cdata->pwm_num_devs = 0; + cdata->cpt_num_devs = 0; pc->cdata = cdata; pc->dev = dev; @@ -341,36 +599,64 @@ static int sti_pwm_probe(struct platform_device *pdev) if (ret) return ret; - pc->clk = of_clk_get_by_name(dev->of_node, "pwm"); - if (IS_ERR(pc->clk)) { + if (!cdata->pwm_num_devs) + goto skip_pwm; + + pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm"); + if (IS_ERR(pc->pwm_clk)) { dev_err(dev, "failed to get PWM clock\n"); - return PTR_ERR(pc->clk); + return PTR_ERR(pc->pwm_clk); } - pc->clk_rate = clk_get_rate(pc->clk); - if (!pc->clk_rate) { - dev_err(dev, "failed to get clock rate\n"); - return -EINVAL; + ret = clk_prepare(pc->pwm_clk); + if (ret) { + dev_err(dev, "failed to prepare clock\n"); + return ret; } - ret = clk_prepare(pc->clk); +skip_pwm: + if (!cdata->cpt_num_devs) + goto skip_cpt; + + pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture"); + if (IS_ERR(pc->cpt_clk)) { + dev_err(dev, "failed to get PWM capture clock\n"); + return PTR_ERR(pc->cpt_clk); + } + + ret = clk_prepare(pc->cpt_clk); if (ret) { dev_err(dev, "failed to prepare clock\n"); return ret; } +skip_cpt: pc->chip.dev = dev; pc->chip.ops = &sti_pwm_ops; pc->chip.base = -1; - pc->chip.npwm = pc->cdata->num_chan; + pc->chip.npwm = pc->cdata->pwm_num_devs; pc->chip.can_sleep = true; ret = pwmchip_add(&pc->chip); if (ret < 0) { - clk_unprepare(pc->clk); + clk_unprepare(pc->pwm_clk); + clk_unprepare(pc->cpt_clk); return ret; } + for (i = 0; i < cdata->cpt_num_devs; i++) { + struct sti_cpt_ddata *ddata; + + ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + init_waitqueue_head(&ddata->wait); + mutex_init(&ddata->lock); + + pwm_set_chip_data(&pc->chip.pwms[i], ddata); + } + platform_set_drvdata(pdev, pc); return 0; @@ -381,10 +667,11 @@ static int sti_pwm_remove(struct platform_device *pdev) struct sti_pwm_chip *pc = platform_get_drvdata(pdev); unsigned int i; - for (i = 0; i < pc->cdata->num_chan; i++) + for (i = 0; i < pc->cdata->pwm_num_devs; i++) pwm_disable(&pc->chip.pwms[i]); - clk_unprepare(pc->clk); + clk_unprepare(pc->pwm_clk); + clk_unprepare(pc->cpt_clk); return pwmchip_remove(&pc->chip); } diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index 03a99a53c39e..b0803f6c64d9 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -284,6 +284,12 @@ static const struct sun4i_pwm_data sun4i_pwm_data_a20 = { .npwm = 2, }; +static const struct sun4i_pwm_data sun4i_pwm_data_h3 = { + .has_prescaler_bypass = true, + .has_rdy = true, + .npwm = 1, +}; + static const struct of_device_id sun4i_pwm_dt_ids[] = { { .compatible = "allwinner,sun4i-a10-pwm", @@ -298,6 +304,9 @@ static const struct of_device_id sun4i_pwm_dt_ids[] = { .compatible = "allwinner,sun7i-a20-pwm", .data = &sun4i_pwm_data_a20, }, { + .compatible = "allwinner,sun8i-h3-pwm", + .data = &sun4i_pwm_data_h3, + }, { /* sentinel */ }, }; diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c index 829f4991c96f..7fa85a1604da 100644 --- a/drivers/pwm/pwm-tipwmss.c +++ b/drivers/pwm/pwm-tipwmss.c @@ -34,7 +34,6 @@ static int pwmss_probe(struct platform_device *pdev) struct device_node *node = pdev->dev.of_node; pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); /* Populate all the child nodes here... */ ret = of_platform_populate(node, NULL, NULL, &pdev->dev); @@ -46,31 +45,13 @@ static int pwmss_probe(struct platform_device *pdev) static int pwmss_remove(struct platform_device *pdev) { - pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } -#ifdef CONFIG_PM_SLEEP -static int pwmss_suspend(struct device *dev) -{ - pm_runtime_put_sync(dev); - return 0; -} - -static int pwmss_resume(struct device *dev) -{ - pm_runtime_get_sync(dev); - return 0; -} -#endif - -static SIMPLE_DEV_PM_OPS(pwmss_pm_ops, pwmss_suspend, pwmss_resume); - static struct platform_driver pwmss_driver = { .driver = { .name = "pwmss", - .pm = &pwmss_pm_ops, .of_match_table = pwmss_of_match, }, .probe = pwmss_probe, diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c index 04f76725d591..7a993b056638 100644 --- a/drivers/pwm/pwm-twl.c +++ b/drivers/pwm/pwm-twl.c @@ -269,6 +269,22 @@ static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) goto out; } + val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXEN); + + ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG); + if (ret < 0) { + dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label); + goto out; + } + + val &= ~TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXEN); + + ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG); + if (ret < 0) { + dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label); + goto out; + } + twl->twl6030_toggle3 = val; out: mutex_unlock(&twl->mutex); diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 18ed725594c3..0296d8178ae2 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -409,6 +409,24 @@ void pwmchip_sysfs_unexport(struct pwm_chip *chip) } } +void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) +{ + struct device *parent; + unsigned int i; + + parent = class_find_device(&pwm_class, NULL, chip, + pwmchip_sysfs_match); + if (!parent) + return; + + for (i = 0; i < chip->npwm; i++) { + struct pwm_device *pwm = &chip->pwms[i]; + + if (test_bit(PWMF_EXPORTED, &pwm->flags)) + pwm_unexport_child(parent, pwm); + } +} + static int __init pwm_sysfs_init(void) { return class_register(&pwm_class); diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c index 3958f50c5975..e0c747aa9f85 100644 --- a/drivers/regulator/max8973-regulator.c +++ b/drivers/regulator/max8973-regulator.c @@ -495,7 +495,8 @@ static irqreturn_t max8973_thermal_irq(int irq, void *data) { struct max8973_chip *mchip = data; - thermal_zone_device_update(mchip->tz_device); + thermal_zone_device_update(mchip->tz_device, + THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index d1e080701264..e859d148aba9 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -208,14 +208,14 @@ config RTC_DRV_AS3722 will be called rtc-as3722. config RTC_DRV_DS1307 - tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025" + tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025, ISL12057" help If you say yes here you get support for various compatible RTC chips (often with battery backup) connected with I2C. This driver should handle DS1307, DS1337, DS1338, DS1339, DS1340, ST M41T00, - EPSON RX-8025 and probably other chips. In some cases the RTC - must already have been initialized (by manufacturing or a - bootloader). + EPSON RX-8025, Intersil ISL12057 and probably other chips. In some + cases the RTC must already have been initialized (by manufacturing or + a bootloader). The first seven registers on these chips hold an RTC, and other registers may add features such as NVRAM, a trickle charger for @@ -234,6 +234,20 @@ config RTC_DRV_DS1307_HWMON Say Y here if you want to expose temperature sensor data on rtc-ds1307 (only DS3231) +config RTC_DRV_DS1307_CENTURY + bool "Century bit support for rtc-ds1307" + depends on RTC_DRV_DS1307 + default n + help + The DS1307 driver suffered from a bug where it was enabling the + century bit inconditionnally but never used it when reading the time. + It made the driver unable to support dates beyond 2099. + Setting this option will add proper support for the century bit but if + the time was previously set using a kernel predating this option, + reading the date will return a date in the next century. + To solve that, you could boot a kernel without this option set, set + the RTC date and then boot a kernel with this option set. + config RTC_DRV_DS1374 tristate "Dallas/Maxim DS1374" help @@ -374,16 +388,6 @@ config RTC_DRV_ISL12022 This driver can also be built as a module. If so, the module will be called rtc-isl12022. -config RTC_DRV_ISL12057 - select REGMAP_I2C - tristate "Intersil ISL12057" - help - If you say yes here you get support for the Intersil ISL12057 - I2C RTC chip. - - This driver can also be built as a module. If so, the module - will be called rtc-isl12057. - config RTC_DRV_X1205 tristate "Xicor/Intersil X1205" help @@ -661,6 +665,7 @@ config RTC_DRV_DS1343 will be called rtc-ds1343. config RTC_DRV_DS1347 + select REGMAP_SPI tristate "Dallas/Maxim DS1347" help If you say yes here you get support for the @@ -1201,7 +1206,7 @@ comment "on-CPU RTC drivers" config RTC_DRV_ASM9260 tristate "Alphascale asm9260 RTC" - depends on MACH_ASM9260 + depends on MACH_ASM9260 || COMPILE_TEST help If you say yes here you get support for the RTC on the Alphascale asm9260 SoC. @@ -1241,6 +1246,9 @@ config RTC_DRV_IMXDI config RTC_DRV_OMAP tristate "TI OMAP Real Time Clock" depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST + depends on OF + depends on PINCTRL + select GENERIC_PINCONF help Say "yes" here to support the on chip real time clock present on TI OMAP1, AM33xx, DA8xx/OMAP-L13x, AM43xx and DRA7xx. diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 8fb994bacdf7..1ac694a330c8 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -72,7 +72,6 @@ obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o -obj-$(CONFIG_RTC_DRV_ISL12057) += rtc-isl12057.o obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o obj-$(CONFIG_RTC_DRV_LP8788) += rtc-lp8788.o diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c index 70b4fd0f6122..9e336184491c 100644 --- a/drivers/rtc/rtc-ac100.c +++ b/drivers/rtc/rtc-ac100.c @@ -327,6 +327,8 @@ static int ac100_rtc_register_clks(struct ac100_rtc_dev *chip) .flags = 0, }; + of_property_read_string_index(np, "clock-output-names", + i, &init.name); clk->regmap = chip->regmap; clk->offset = AC100_CLKOUT_CTRL1 + i; clk->hw.init = &init; @@ -552,6 +554,9 @@ static int ac100_rtc_probe(struct platform_device *pdev) int ret; chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + platform_set_drvdata(pdev, chip); chip->dev = &pdev->dev; chip->regmap = ac100->regmap; diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c index 5219916ce11d..18a93d3e3f93 100644 --- a/drivers/rtc/rtc-asm9260.c +++ b/drivers/rtc/rtc-asm9260.c @@ -112,8 +112,6 @@ struct asm9260_rtc_priv { void __iomem *iobase; struct rtc_device *rtc; struct clk *clk; - /* io lock */ - spinlock_t lock; }; static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id) @@ -122,11 +120,15 @@ static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id) u32 isr; unsigned long events = 0; + mutex_lock(&priv->rtc->ops_lock); isr = ioread32(priv->iobase + HW_CIIR); - if (!isr) + if (!isr) { + mutex_unlock(&priv->rtc->ops_lock); return IRQ_NONE; + } iowrite32(0, priv->iobase + HW_CIIR); + mutex_unlock(&priv->rtc->ops_lock); events |= RTC_AF | RTC_IRQF; @@ -139,9 +141,7 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct asm9260_rtc_priv *priv = dev_get_drvdata(dev); u32 ctime0, ctime1, ctime2; - unsigned long irq_flags; - spin_lock_irqsave(&priv->lock, irq_flags); ctime0 = ioread32(priv->iobase + HW_CTIME0); ctime1 = ioread32(priv->iobase + HW_CTIME1); ctime2 = ioread32(priv->iobase + HW_CTIME2); @@ -155,7 +155,6 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm) ctime1 = ioread32(priv->iobase + HW_CTIME1); ctime2 = ioread32(priv->iobase + HW_CTIME2); } - spin_unlock_irqrestore(&priv->lock, irq_flags); tm->tm_sec = (ctime0 >> BM_CTIME0_SEC_S) & BM_CTIME0_SEC_M; tm->tm_min = (ctime0 >> BM_CTIME0_MIN_S) & BM_CTIME0_MIN_M; @@ -174,9 +173,7 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm) static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct asm9260_rtc_priv *priv = dev_get_drvdata(dev); - unsigned long irq_flags; - spin_lock_irqsave(&priv->lock, irq_flags); /* * make sure SEC counter will not flip other counter on write time, * real value will be written at the enf of sequence. @@ -191,7 +188,6 @@ static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm) iowrite32(tm->tm_hour, priv->iobase + HW_HOUR); iowrite32(tm->tm_min, priv->iobase + HW_MIN); iowrite32(tm->tm_sec, priv->iobase + HW_SEC); - spin_unlock_irqrestore(&priv->lock, irq_flags); return 0; } @@ -199,9 +195,7 @@ static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm) static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct asm9260_rtc_priv *priv = dev_get_drvdata(dev); - unsigned long irq_flags; - spin_lock_irqsave(&priv->lock, irq_flags); alrm->time.tm_year = ioread32(priv->iobase + HW_ALYEAR); alrm->time.tm_mon = ioread32(priv->iobase + HW_ALMON); alrm->time.tm_mday = ioread32(priv->iobase + HW_ALDOM); @@ -213,7 +207,6 @@ static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) alrm->enabled = ioread32(priv->iobase + HW_AMR) ? 1 : 0; alrm->pending = ioread32(priv->iobase + HW_CIIR) ? 1 : 0; - spin_unlock_irqrestore(&priv->lock, irq_flags); return rtc_valid_tm(&alrm->time); } @@ -221,9 +214,7 @@ static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct asm9260_rtc_priv *priv = dev_get_drvdata(dev); - unsigned long irq_flags; - spin_lock_irqsave(&priv->lock, irq_flags); iowrite32(alrm->time.tm_year, priv->iobase + HW_ALYEAR); iowrite32(alrm->time.tm_mon, priv->iobase + HW_ALMON); iowrite32(alrm->time.tm_mday, priv->iobase + HW_ALDOM); @@ -234,7 +225,6 @@ static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) iowrite32(alrm->time.tm_sec, priv->iobase + HW_ALSEC); iowrite32(alrm->enabled ? 0 : BM_AMR_OFF, priv->iobase + HW_AMR); - spin_unlock_irqrestore(&priv->lock, irq_flags); return 0; } diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c index 83ac2337c0f7..de8bf56a41e7 100644 --- a/drivers/rtc/rtc-at32ap700x.c +++ b/drivers/rtc/rtc-at32ap700x.c @@ -187,7 +187,7 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id) return ret; } -static struct rtc_class_ops at32_rtc_ops = { +static const struct rtc_class_ops at32_rtc_ops = { .read_time = at32_rtc_readtime, .set_time = at32_rtc_settime, .read_alarm = at32_rtc_readalarm, diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c index 0299988b4f13..397742446007 100644 --- a/drivers/rtc/rtc-bq32k.c +++ b/drivers/rtc/rtc-bq32k.c @@ -93,8 +93,15 @@ static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm) if (error) return error; + /* + * In case of oscillator failure, the register contents should be + * considered invalid. The flag is cleared the next time the RTC is set. + */ + if (regs.minutes & BQ32K_OF) + return -EINVAL; + tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK); - tm->tm_min = bcd2bin(regs.minutes & BQ32K_SECONDS_MASK); + tm->tm_min = bcd2bin(regs.minutes & BQ32K_MINUTES_MASK); tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK); tm->tm_mday = bcd2bin(regs.date); tm->tm_wday = bcd2bin(regs.day) - 1; @@ -204,13 +211,10 @@ static int bq32k_probe(struct i2c_client *client, /* Check Oscillator Failure flag */ error = bq32k_read(dev, ®, BQ32K_MINUTES, 1); - if (!error && (reg & BQ32K_OF)) { - dev_warn(dev, "Oscillator Failure. Check RTC battery.\n"); - reg &= ~BQ32K_OF; - error = bq32k_write(dev, ®, BQ32K_MINUTES, 1); - } if (error) return error; + if (reg & BQ32K_OF) + dev_warn(dev, "Oscillator Failure. Check RTC battery.\n"); if (client->dev.of_node) trickle_charger_of_init(dev, client->dev.of_node); diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 43745cac0141..dd3d59806ffa 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -62,6 +62,8 @@ struct cmos_rtc { u8 day_alrm; u8 mon_alrm; u8 century; + + struct rtc_wkalrm saved_wkalrm; }; /* both platform and pnp busses use negative numbers for invalid irqs */ @@ -707,6 +709,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) goto cleanup1; } + hpet_rtc_timer_init(); + if (is_valid_irq(rtc_irq)) { irq_handler_t rtc_cmos_int_handler; @@ -714,6 +718,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) rtc_cmos_int_handler = hpet_rtc_interrupt; retval = hpet_register_irq_handler(cmos_interrupt); if (retval) { + hpet_mask_rtc_irq_bit(RTC_IRQMASK); dev_warn(dev, "hpet_register_irq_handler " " failed in rtc_init()."); goto cleanup1; @@ -729,7 +734,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) goto cleanup1; } } - hpet_rtc_timer_init(); /* export at least the first block of NVRAM */ nvram.size = address_space - NVRAM_OFFSET; @@ -844,8 +848,6 @@ static int cmos_aie_poweroff(struct device *dev) return retval; } -#ifdef CONFIG_PM - static int cmos_suspend(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); @@ -877,6 +879,8 @@ static int cmos_suspend(struct device *dev) enable_irq_wake(cmos->irq); } + cmos_read_alarm(dev, &cmos->saved_wkalrm); + dev_dbg(dev, "suspend%s, ctrl %02x\n", (tmp & RTC_AIE) ? ", alarm may wake" : "", tmp); @@ -892,12 +896,32 @@ static int cmos_suspend(struct device *dev) */ static inline int cmos_poweroff(struct device *dev) { + if (!IS_ENABLED(CONFIG_PM)) + return -ENOSYS; + return cmos_suspend(dev); } -#ifdef CONFIG_PM_SLEEP +static void cmos_check_wkalrm(struct device *dev) +{ + struct cmos_rtc *cmos = dev_get_drvdata(dev); + struct rtc_wkalrm current_alarm; + time64_t t_current_expires; + time64_t t_saved_expires; + + cmos_read_alarm(dev, ¤t_alarm); + t_current_expires = rtc_tm_to_time64(¤t_alarm.time); + t_saved_expires = rtc_tm_to_time64(&cmos->saved_wkalrm.time); + if (t_current_expires != t_saved_expires || + cmos->saved_wkalrm.enabled != current_alarm.enabled) { + cmos_set_alarm(dev, &cmos->saved_wkalrm); + } +} + +static void cmos_check_acpi_rtc_status(struct device *dev, + unsigned char *rtc_control); -static int cmos_resume(struct device *dev) +static int __maybe_unused cmos_resume(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char tmp; @@ -910,6 +934,9 @@ static int cmos_resume(struct device *dev) cmos->enabled_wake = 0; } + /* The BIOS might have changed the alarm, restore it */ + cmos_check_wkalrm(dev); + spin_lock_irq(&rtc_lock); tmp = cmos->suspend_ctrl; cmos->suspend_ctrl = 0; @@ -936,6 +963,9 @@ static int cmos_resume(struct device *dev) tmp &= ~RTC_AIE; hpet_mask_rtc_irq_bit(RTC_AIE); } while (mask & RTC_AIE); + + if (tmp & RTC_AIE) + cmos_check_acpi_rtc_status(dev, &tmp); } spin_unlock_irq(&rtc_lock); @@ -944,16 +974,6 @@ static int cmos_resume(struct device *dev) return 0; } -#endif -#else - -static inline int cmos_poweroff(struct device *dev) -{ - return -ENOSYS; -} - -#endif - static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); /*----------------------------------------------------------------*/ @@ -973,6 +993,20 @@ static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); static u32 rtc_handler(void *context) { struct device *dev = context; + struct cmos_rtc *cmos = dev_get_drvdata(dev); + unsigned char rtc_control = 0; + unsigned char rtc_intr; + + spin_lock_irq(&rtc_lock); + if (cmos_rtc.suspend_ctrl) + rtc_control = CMOS_READ(RTC_CONTROL); + if (rtc_control & RTC_AIE) { + cmos_rtc.suspend_ctrl &= ~RTC_AIE; + CMOS_WRITE(rtc_control, RTC_CONTROL); + rtc_intr = CMOS_READ(RTC_INTR_FLAGS); + rtc_update_irq(cmos->rtc, 1, rtc_intr); + } + spin_unlock_irq(&rtc_lock); pm_wakeup_event(dev, 0); acpi_clear_event(ACPI_EVENT_RTC); @@ -1039,12 +1073,39 @@ static void cmos_wake_setup(struct device *dev) device_init_wakeup(dev, 1); } +static void cmos_check_acpi_rtc_status(struct device *dev, + unsigned char *rtc_control) +{ + struct cmos_rtc *cmos = dev_get_drvdata(dev); + acpi_event_status rtc_status; + acpi_status status; + + if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC) + return; + + status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Could not get RTC status\n"); + } else if (rtc_status & ACPI_EVENT_FLAG_SET) { + unsigned char mask; + *rtc_control &= ~RTC_AIE; + CMOS_WRITE(*rtc_control, RTC_CONTROL); + mask = CMOS_READ(RTC_INTR_FLAGS); + rtc_update_irq(cmos->rtc, 1, mask); + } +} + #else static void cmos_wake_setup(struct device *dev) { } +static void cmos_check_acpi_rtc_status(struct device *dev, + unsigned char *rtc_control) +{ +} + #endif #ifdef CONFIG_PNP @@ -1206,9 +1267,7 @@ static struct platform_driver cmos_platform_driver = { .shutdown = cmos_platform_shutdown, .driver = { .name = driver_name, -#ifdef CONFIG_PM .pm = &cmos_pm_ops, -#endif .of_match_table = of_match_ptr(of_cmos_match), } }; diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c index 101b7a240e0f..cfc4141d99cd 100644 --- a/drivers/rtc/rtc-coh901331.c +++ b/drivers/rtc/rtc-coh901331.c @@ -140,7 +140,7 @@ static int coh901331_alarm_irq_enable(struct device *dev, unsigned int enabled) return 0; } -static struct rtc_class_ops coh901331_ops = { +static const struct rtc_class_ops coh901331_ops = { .read_time = coh901331_read_time, .set_mmss = coh901331_set_mmss, .read_alarm = coh901331_read_alarm, diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c index dba60c1dfce2..caf35567e14c 100644 --- a/drivers/rtc/rtc-davinci.c +++ b/drivers/rtc/rtc-davinci.c @@ -469,7 +469,7 @@ static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) return 0; } -static struct rtc_class_ops davinci_rtc_ops = { +static const struct rtc_class_ops davinci_rtc_ops = { .ioctl = davinci_rtc_ioctl, .read_time = davinci_rtc_read_time, .set_time = davinci_rtc_set_time, diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c index 8d05596a6765..b253bf1b3531 100644 --- a/drivers/rtc/rtc-digicolor.c +++ b/drivers/rtc/rtc-digicolor.c @@ -159,7 +159,7 @@ static int dc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) return 0; } -static struct rtc_class_ops dc_rtc_ops = { +static const struct rtc_class_ops dc_rtc_ops = { .read_time = dc_rtc_read_time, .set_mmss = dc_rtc_set_mmss, .read_alarm = dc_rtc_read_alarm, diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c index f5dd09fe5add..0ec4be62322b 100644 --- a/drivers/rtc/rtc-ds1302.c +++ b/drivers/rtc/rtc-ds1302.c @@ -102,7 +102,7 @@ static int ds1302_rtc_get_time(struct device *dev, struct rtc_time *time) return rtc_valid_tm(time); } -static struct rtc_class_ops ds1302_rtc_ops = { +static const struct rtc_class_ops ds1302_rtc_ops = { .read_time = ds1302_rtc_get_time, .set_time = ds1302_rtc_set_time, }; diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 8e1c5cb6ece6..4e31036ee259 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -186,6 +186,7 @@ static const struct i2c_device_id ds1307_id[] = { { "mcp7941x", mcp794xx }, { "pt7c4338", ds_1307 }, { "rx8025", rx_8025 }, + { "isl12057", ds_1337 }, { } }; MODULE_DEVICE_TABLE(i2c, ds1307_id); @@ -382,10 +383,25 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) t->tm_mday = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f); tmp = ds1307->regs[DS1307_REG_MONTH] & 0x1f; t->tm_mon = bcd2bin(tmp) - 1; - - /* assume 20YY not 19YY, and ignore DS1337_BIT_CENTURY */ t->tm_year = bcd2bin(ds1307->regs[DS1307_REG_YEAR]) + 100; +#ifdef CONFIG_RTC_DRV_DS1307_CENTURY + switch (ds1307->type) { + case ds_1337: + case ds_1339: + case ds_3231: + if (ds1307->regs[DS1307_REG_MONTH] & DS1337_BIT_CENTURY) + t->tm_year += 100; + break; + case ds_1340: + if (ds1307->regs[DS1307_REG_HOUR] & DS1340_BIT_CENTURY) + t->tm_year += 100; + break; + default: + break; + } +#endif + dev_dbg(dev, "%s secs=%d, mins=%d, " "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n", "read", t->tm_sec, t->tm_min, @@ -409,6 +425,27 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) t->tm_hour, t->tm_mday, t->tm_mon, t->tm_year, t->tm_wday); +#ifdef CONFIG_RTC_DRV_DS1307_CENTURY + if (t->tm_year < 100) + return -EINVAL; + + switch (ds1307->type) { + case ds_1337: + case ds_1339: + case ds_3231: + case ds_1340: + if (t->tm_year > 299) + return -EINVAL; + default: + if (t->tm_year > 199) + return -EINVAL; + break; + } +#else + if (t->tm_year < 100 || t->tm_year > 199) + return -EINVAL; +#endif + buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec); buf[DS1307_REG_MIN] = bin2bcd(t->tm_min); buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour); @@ -424,11 +461,13 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) case ds_1337: case ds_1339: case ds_3231: - buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY; + if (t->tm_year > 199) + buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY; break; case ds_1340: - buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN - | DS1340_BIT_CENTURY; + buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN; + if (t->tm_year > 199) + buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY; break; case mcp794xx: /* @@ -1295,6 +1334,11 @@ static int ds1307_probe(struct i2c_client *client, if (of_property_read_bool(client->dev.of_node, "wakeup-source")) { ds1307_can_wakeup_device = true; } + /* Intersil ISL12057 DT backward compatibility */ + if (of_property_read_bool(client->dev.of_node, + "isil,irq2-can-wakeup-machine")) { + ds1307_can_wakeup_device = true; + } #endif switch (ds1307->type) { diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c index 641e8e8a0dd7..ccfc9d43eb1e 100644 --- a/drivers/rtc/rtc-ds1347.c +++ b/drivers/rtc/rtc-ds1347.c @@ -18,6 +18,7 @@ #include <linux/rtc.h> #include <linux/spi/spi.h> #include <linux/bcd.h> +#include <linux/regmap.h> /* Registers in ds1347 rtc */ @@ -32,37 +33,28 @@ #define DS1347_STATUS_REG 0x17 #define DS1347_CLOCK_BURST 0x3F -static int ds1347_read_reg(struct device *dev, unsigned char address, - unsigned char *data) -{ - struct spi_device *spi = to_spi_device(dev); - - *data = address | 0x80; - - return spi_write_then_read(spi, data, 1, data, 1); -} - -static int ds1347_write_reg(struct device *dev, unsigned char address, - unsigned char data) -{ - struct spi_device *spi = to_spi_device(dev); - unsigned char buf[2]; - - buf[0] = address & 0x7F; - buf[1] = data; +static const struct regmap_range ds1347_ranges[] = { + { + .range_min = DS1347_SECONDS_REG, + .range_max = DS1347_STATUS_REG, + }, +}; - return spi_write_then_read(spi, buf, 2, NULL, 0); -} +static const struct regmap_access_table ds1347_access_table = { + .yes_ranges = ds1347_ranges, + .n_yes_ranges = ARRAY_SIZE(ds1347_ranges), +}; static int ds1347_read_time(struct device *dev, struct rtc_time *dt) { struct spi_device *spi = to_spi_device(dev); + struct regmap *map; int err; unsigned char buf[8]; - buf[0] = DS1347_CLOCK_BURST | 0x80; + map = spi_get_drvdata(spi); - err = spi_write_then_read(spi, buf, 1, buf, 8); + err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8); if (err) return err; @@ -80,25 +72,27 @@ static int ds1347_read_time(struct device *dev, struct rtc_time *dt) static int ds1347_set_time(struct device *dev, struct rtc_time *dt) { struct spi_device *spi = to_spi_device(dev); - unsigned char buf[9]; + struct regmap *map; + unsigned char buf[8]; + + map = spi_get_drvdata(spi); - buf[0] = DS1347_CLOCK_BURST & 0x7F; - buf[1] = bin2bcd(dt->tm_sec); - buf[2] = bin2bcd(dt->tm_min); - buf[3] = (bin2bcd(dt->tm_hour) & 0x3F); - buf[4] = bin2bcd(dt->tm_mday); - buf[5] = bin2bcd(dt->tm_mon + 1); - buf[6] = bin2bcd(dt->tm_wday + 1); + buf[0] = bin2bcd(dt->tm_sec); + buf[1] = bin2bcd(dt->tm_min); + buf[2] = (bin2bcd(dt->tm_hour) & 0x3F); + buf[3] = bin2bcd(dt->tm_mday); + buf[4] = bin2bcd(dt->tm_mon + 1); + buf[5] = bin2bcd(dt->tm_wday + 1); /* year in linux is from 1900 i.e in range of 100 in rtc it is from 00 to 99 */ dt->tm_year = dt->tm_year % 100; - buf[7] = bin2bcd(dt->tm_year); - buf[8] = bin2bcd(0x00); + buf[6] = bin2bcd(dt->tm_year); + buf[7] = bin2bcd(0x00); /* write the rtc settings */ - return spi_write_then_read(spi, buf, 9, NULL, 0); + return regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8); } static const struct rtc_class_ops ds1347_rtc_ops = { @@ -109,35 +103,53 @@ static const struct rtc_class_ops ds1347_rtc_ops = { static int ds1347_probe(struct spi_device *spi) { struct rtc_device *rtc; - unsigned char data; + struct regmap_config config; + struct regmap *map; + unsigned int data; int res; + memset(&config, 0, sizeof(config)); + config.reg_bits = 8; + config.val_bits = 8; + config.read_flag_mask = 0x80; + config.max_register = 0x3F; + config.wr_table = &ds1347_access_table; + /* spi setup with ds1347 in mode 3 and bits per word as 8 */ spi->mode = SPI_MODE_3; spi->bits_per_word = 8; spi_setup(spi); + map = devm_regmap_init_spi(spi, &config); + + if (IS_ERR(map)) { + dev_err(&spi->dev, "ds1347 regmap init spi failed\n"); + return PTR_ERR(map); + } + + spi_set_drvdata(spi, map); + /* RTC Settings */ - res = ds1347_read_reg(&spi->dev, DS1347_SECONDS_REG, &data); + res = regmap_read(map, DS1347_SECONDS_REG, &data); if (res) return res; /* Disable the write protect of rtc */ - ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data); + regmap_read(map, DS1347_CONTROL_REG, &data); data = data & ~(1<<7); - ds1347_write_reg(&spi->dev, DS1347_CONTROL_REG, data); + regmap_write(map, DS1347_CONTROL_REG, data); /* Enable the oscillator , disable the oscillator stop flag, and glitch filter to reduce current consumption */ - ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data); + regmap_read(map, DS1347_STATUS_REG, &data); data = data & 0x1B; - ds1347_write_reg(&spi->dev, DS1347_STATUS_REG, data); + regmap_write(map, DS1347_STATUS_REG, data); /* display the settings */ - ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data); + regmap_read(map, DS1347_CONTROL_REG, &data); dev_info(&spi->dev, "DS1347 RTC CTRL Reg = 0x%02x\n", data); - ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data); + regmap_read(map, DS1347_STATUS_REG, &data); dev_info(&spi->dev, "DS1347 RTC Status Reg = 0x%02x\n", data); rtc = devm_rtc_device_register(&spi->dev, "ds1347", @@ -146,8 +158,6 @@ static int ds1347_probe(struct spi_device *spi) if (IS_ERR(rtc)) return PTR_ERR(rtc); - spi_set_drvdata(spi, rtc); - return 0; } diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-gemini.c index b57505efadbc..688debc14348 100644 --- a/drivers/rtc/rtc-gemini.c +++ b/drivers/rtc/rtc-gemini.c @@ -110,7 +110,7 @@ static int gemini_rtc_set_time(struct device *dev, struct rtc_time *tm) return 0; } -static struct rtc_class_ops gemini_rtc_ops = { +static const struct rtc_class_ops gemini_rtc_ops = { .read_time = gemini_rtc_read_time, .set_time = gemini_rtc_set_time, }; diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c deleted file mode 100644 index 0e7f0f52bfe4..000000000000 --- a/drivers/rtc/rtc-isl12057.c +++ /dev/null @@ -1,643 +0,0 @@ -/* - * rtc-isl12057 - Driver for Intersil ISL12057 I2C Real Time Clock - * - * Copyright (C) 2013, Arnaud EBALARD <arno@natisbad.org> - * - * This work is largely based on Intersil ISL1208 driver developed by - * Hebert Valerio Riedel <hvr@gnu.org>. - * - * Detailed datasheet on which this development is based is available here: - * - * http://natisbad.org/NAS2/refs/ISL12057.pdf - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/rtc.h> -#include <linux/i2c.h> -#include <linux/bcd.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/regmap.h> - -#define DRV_NAME "rtc-isl12057" - -/* RTC section */ -#define ISL12057_REG_RTC_SC 0x00 /* Seconds */ -#define ISL12057_REG_RTC_MN 0x01 /* Minutes */ -#define ISL12057_REG_RTC_HR 0x02 /* Hours */ -#define ISL12057_REG_RTC_HR_PM BIT(5) /* AM/PM bit in 12h format */ -#define ISL12057_REG_RTC_HR_MIL BIT(6) /* 24h/12h format */ -#define ISL12057_REG_RTC_DW 0x03 /* Day of the Week */ -#define ISL12057_REG_RTC_DT 0x04 /* Date */ -#define ISL12057_REG_RTC_MO 0x05 /* Month */ -#define ISL12057_REG_RTC_MO_CEN BIT(7) /* Century bit */ -#define ISL12057_REG_RTC_YR 0x06 /* Year */ -#define ISL12057_RTC_SEC_LEN 7 - -/* Alarm 1 section */ -#define ISL12057_REG_A1_SC 0x07 /* Alarm 1 Seconds */ -#define ISL12057_REG_A1_MN 0x08 /* Alarm 1 Minutes */ -#define ISL12057_REG_A1_HR 0x09 /* Alarm 1 Hours */ -#define ISL12057_REG_A1_HR_PM BIT(5) /* AM/PM bit in 12h format */ -#define ISL12057_REG_A1_HR_MIL BIT(6) /* 24h/12h format */ -#define ISL12057_REG_A1_DWDT 0x0A /* Alarm 1 Date / Day of the week */ -#define ISL12057_REG_A1_DWDT_B BIT(6) /* DW / DT selection bit */ -#define ISL12057_A1_SEC_LEN 4 - -/* Alarm 2 section */ -#define ISL12057_REG_A2_MN 0x0B /* Alarm 2 Minutes */ -#define ISL12057_REG_A2_HR 0x0C /* Alarm 2 Hours */ -#define ISL12057_REG_A2_DWDT 0x0D /* Alarm 2 Date / Day of the week */ -#define ISL12057_A2_SEC_LEN 3 - -/* Control/Status registers */ -#define ISL12057_REG_INT 0x0E -#define ISL12057_REG_INT_A1IE BIT(0) /* Alarm 1 interrupt enable bit */ -#define ISL12057_REG_INT_A2IE BIT(1) /* Alarm 2 interrupt enable bit */ -#define ISL12057_REG_INT_INTCN BIT(2) /* Interrupt control enable bit */ -#define ISL12057_REG_INT_RS1 BIT(3) /* Freq out control bit 1 */ -#define ISL12057_REG_INT_RS2 BIT(4) /* Freq out control bit 2 */ -#define ISL12057_REG_INT_EOSC BIT(7) /* Oscillator enable bit */ - -#define ISL12057_REG_SR 0x0F -#define ISL12057_REG_SR_A1F BIT(0) /* Alarm 1 interrupt bit */ -#define ISL12057_REG_SR_A2F BIT(1) /* Alarm 2 interrupt bit */ -#define ISL12057_REG_SR_OSF BIT(7) /* Oscillator failure bit */ - -/* Register memory map length */ -#define ISL12057_MEM_MAP_LEN 0x10 - -struct isl12057_rtc_data { - struct rtc_device *rtc; - struct regmap *regmap; - struct mutex lock; - int irq; -}; - -static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs) -{ - tm->tm_sec = bcd2bin(regs[ISL12057_REG_RTC_SC]); - tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]); - - if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */ - tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x1f); - if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM) - tm->tm_hour += 12; - } else { /* 24 hour mode */ - tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x3f); - } - - tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]); - tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */ - tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO] & 0x1f) - 1; /* ditto */ - tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100; - - /* Check if years register has overflown from 99 to 00 */ - if (regs[ISL12057_REG_RTC_MO] & ISL12057_REG_RTC_MO_CEN) - tm->tm_year += 100; -} - -static int isl12057_rtc_tm_to_regs(u8 *regs, struct rtc_time *tm) -{ - u8 century_bit; - - /* - * The clock has an 8 bit wide bcd-coded register for the year. - * It also has a century bit encoded in MO flag which provides - * information about overflow of year register from 99 to 00. - * tm_year is an offset from 1900 and we are interested in the - * 2000-2199 range, so any value less than 100 or larger than - * 299 is invalid. - */ - if (tm->tm_year < 100 || tm->tm_year > 299) - return -EINVAL; - - century_bit = (tm->tm_year > 199) ? ISL12057_REG_RTC_MO_CEN : 0; - - regs[ISL12057_REG_RTC_SC] = bin2bcd(tm->tm_sec); - regs[ISL12057_REG_RTC_MN] = bin2bcd(tm->tm_min); - regs[ISL12057_REG_RTC_HR] = bin2bcd(tm->tm_hour); /* 24-hour format */ - regs[ISL12057_REG_RTC_DT] = bin2bcd(tm->tm_mday); - regs[ISL12057_REG_RTC_MO] = bin2bcd(tm->tm_mon + 1) | century_bit; - regs[ISL12057_REG_RTC_YR] = bin2bcd(tm->tm_year % 100); - regs[ISL12057_REG_RTC_DW] = bin2bcd(tm->tm_wday + 1); - - return 0; -} - -/* - * Try and match register bits w/ fixed null values to see whether we - * are dealing with an ISL12057. Note: this function is called early - * during init and hence does need mutex protection. - */ -static int isl12057_i2c_validate_chip(struct regmap *regmap) -{ - u8 regs[ISL12057_MEM_MAP_LEN]; - static const u8 mask[ISL12057_MEM_MAP_LEN] = { 0x80, 0x80, 0x80, 0xf8, - 0xc0, 0x60, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x60, 0x7c }; - int ret, i; - - ret = regmap_bulk_read(regmap, 0, regs, ISL12057_MEM_MAP_LEN); - if (ret) - return ret; - - for (i = 0; i < ISL12057_MEM_MAP_LEN; ++i) { - if (regs[i] & mask[i]) /* check if bits are cleared */ - return -ENODEV; - } - - return 0; -} - -static int _isl12057_rtc_clear_alarm(struct device *dev) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - int ret; - - ret = regmap_update_bits(data->regmap, ISL12057_REG_SR, - ISL12057_REG_SR_A1F, 0); - if (ret) - dev_err(dev, "%s: clearing alarm failed (%d)\n", __func__, ret); - - return ret; -} - -static int _isl12057_rtc_update_alarm(struct device *dev, int enable) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - int ret; - - ret = regmap_update_bits(data->regmap, ISL12057_REG_INT, - ISL12057_REG_INT_A1IE, - enable ? ISL12057_REG_INT_A1IE : 0); - if (ret) - dev_err(dev, "%s: changing alarm interrupt flag failed (%d)\n", - __func__, ret); - - return ret; -} - -/* - * Note: as we only read from device and do not perform any update, there is - * no need for an equivalent function which would try and get driver's main - * lock. Here, it is safe for everyone if we just use regmap internal lock - * on the device when reading. - */ -static int _isl12057_rtc_read_time(struct device *dev, struct rtc_time *tm) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - u8 regs[ISL12057_RTC_SEC_LEN]; - unsigned int sr; - int ret; - - ret = regmap_read(data->regmap, ISL12057_REG_SR, &sr); - if (ret) { - dev_err(dev, "%s: unable to read oscillator status flag (%d)\n", - __func__, ret); - goto out; - } else { - if (sr & ISL12057_REG_SR_OSF) { - ret = -ENODATA; - goto out; - } - } - - ret = regmap_bulk_read(data->regmap, ISL12057_REG_RTC_SC, regs, - ISL12057_RTC_SEC_LEN); - if (ret) - dev_err(dev, "%s: unable to read RTC time section (%d)\n", - __func__, ret); - -out: - if (ret) - return ret; - - isl12057_rtc_regs_to_tm(tm, regs); - - return rtc_valid_tm(tm); -} - -static int isl12057_rtc_update_alarm(struct device *dev, int enable) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - int ret; - - mutex_lock(&data->lock); - ret = _isl12057_rtc_update_alarm(dev, enable); - mutex_unlock(&data->lock); - - return ret; -} - -static int isl12057_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - struct rtc_time *alarm_tm = &alarm->time; - u8 regs[ISL12057_A1_SEC_LEN]; - unsigned int ir; - int ret; - - mutex_lock(&data->lock); - ret = regmap_bulk_read(data->regmap, ISL12057_REG_A1_SC, regs, - ISL12057_A1_SEC_LEN); - if (ret) { - dev_err(dev, "%s: reading alarm section failed (%d)\n", - __func__, ret); - goto err_unlock; - } - - alarm_tm->tm_sec = bcd2bin(regs[0] & 0x7f); - alarm_tm->tm_min = bcd2bin(regs[1] & 0x7f); - alarm_tm->tm_hour = bcd2bin(regs[2] & 0x3f); - alarm_tm->tm_mday = bcd2bin(regs[3] & 0x3f); - - ret = regmap_read(data->regmap, ISL12057_REG_INT, &ir); - if (ret) { - dev_err(dev, "%s: reading alarm interrupt flag failed (%d)\n", - __func__, ret); - goto err_unlock; - } - - alarm->enabled = !!(ir & ISL12057_REG_INT_A1IE); - -err_unlock: - mutex_unlock(&data->lock); - - return ret; -} - -static int isl12057_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - struct rtc_time *alarm_tm = &alarm->time; - unsigned long rtc_secs, alarm_secs; - u8 regs[ISL12057_A1_SEC_LEN]; - struct rtc_time rtc_tm; - int ret, enable = 1; - - mutex_lock(&data->lock); - ret = _isl12057_rtc_read_time(dev, &rtc_tm); - if (ret) - goto err_unlock; - - ret = rtc_tm_to_time(&rtc_tm, &rtc_secs); - if (ret) - goto err_unlock; - - ret = rtc_tm_to_time(alarm_tm, &alarm_secs); - if (ret) - goto err_unlock; - - /* If alarm time is before current time, disable the alarm */ - if (!alarm->enabled || alarm_secs <= rtc_secs) { - enable = 0; - } else { - /* - * Chip only support alarms up to one month in the future. Let's - * return an error if we get something after that limit. - * Comparison is done by incrementing rtc_tm month field by one - * and checking alarm value is still below. - */ - if (rtc_tm.tm_mon == 11) { /* handle year wrapping */ - rtc_tm.tm_mon = 0; - rtc_tm.tm_year += 1; - } else { - rtc_tm.tm_mon += 1; - } - - ret = rtc_tm_to_time(&rtc_tm, &rtc_secs); - if (ret) - goto err_unlock; - - if (alarm_secs > rtc_secs) { - dev_err(dev, "%s: max for alarm is one month (%d)\n", - __func__, ret); - ret = -EINVAL; - goto err_unlock; - } - } - - /* Disable the alarm before modifying it */ - ret = _isl12057_rtc_update_alarm(dev, 0); - if (ret < 0) { - dev_err(dev, "%s: unable to disable the alarm (%d)\n", - __func__, ret); - goto err_unlock; - } - - /* Program alarm registers */ - regs[0] = bin2bcd(alarm_tm->tm_sec) & 0x7f; - regs[1] = bin2bcd(alarm_tm->tm_min) & 0x7f; - regs[2] = bin2bcd(alarm_tm->tm_hour) & 0x3f; - regs[3] = bin2bcd(alarm_tm->tm_mday) & 0x3f; - - ret = regmap_bulk_write(data->regmap, ISL12057_REG_A1_SC, regs, - ISL12057_A1_SEC_LEN); - if (ret < 0) { - dev_err(dev, "%s: writing alarm section failed (%d)\n", - __func__, ret); - goto err_unlock; - } - - /* Enable or disable alarm */ - ret = _isl12057_rtc_update_alarm(dev, enable); - -err_unlock: - mutex_unlock(&data->lock); - - return ret; -} - -static int isl12057_rtc_set_time(struct device *dev, struct rtc_time *tm) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - u8 regs[ISL12057_RTC_SEC_LEN]; - int ret; - - ret = isl12057_rtc_tm_to_regs(regs, tm); - if (ret) - return ret; - - mutex_lock(&data->lock); - ret = regmap_bulk_write(data->regmap, ISL12057_REG_RTC_SC, regs, - ISL12057_RTC_SEC_LEN); - if (ret) { - dev_err(dev, "%s: unable to write RTC time section (%d)\n", - __func__, ret); - goto out; - } - - /* - * Now that RTC time has been updated, let's clear oscillator - * failure flag, if needed. - */ - ret = regmap_update_bits(data->regmap, ISL12057_REG_SR, - ISL12057_REG_SR_OSF, 0); - if (ret < 0) - dev_err(dev, "%s: unable to clear osc. failure bit (%d)\n", - __func__, ret); - -out: - mutex_unlock(&data->lock); - - return ret; -} - -/* - * Check current RTC status and enable/disable what needs to be. Return 0 if - * everything went ok and a negative value upon error. Note: this function - * is called early during init and hence does need mutex protection. - */ -static int isl12057_check_rtc_status(struct device *dev, struct regmap *regmap) -{ - int ret; - - /* Enable oscillator if not already running */ - ret = regmap_update_bits(regmap, ISL12057_REG_INT, - ISL12057_REG_INT_EOSC, 0); - if (ret < 0) { - dev_err(dev, "%s: unable to enable oscillator (%d)\n", - __func__, ret); - return ret; - } - - /* Clear alarm bit if needed */ - ret = regmap_update_bits(regmap, ISL12057_REG_SR, - ISL12057_REG_SR_A1F, 0); - if (ret < 0) { - dev_err(dev, "%s: unable to clear alarm bit (%d)\n", - __func__, ret); - return ret; - } - - return 0; -} - -#ifdef CONFIG_OF -/* - * One would expect the device to be marked as a wakeup source only - * when an IRQ pin of the RTC is routed to an interrupt line of the - * CPU. In practice, such an IRQ pin can be connected to a PMIC and - * this allows the device to be powered up when RTC alarm rings. This - * is for instance the case on ReadyNAS 102, 104 and 2120. On those - * devices with no IRQ driectly connected to the SoC, the RTC chip - * can be forced as a wakeup source by stating that explicitly in - * the device's .dts file using the "wakeup-source" boolean property. - * This will guarantee 'wakealarm' sysfs entry is available on the device. - * - * The function below returns 1, i.e. the capability of the chip to - * wakeup the device, based on IRQ availability or if the boolean - * property has been set in the .dts file. Otherwise, it returns 0. - */ - -static bool isl12057_can_wakeup_machine(struct device *dev) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - - return data->irq || of_property_read_bool(dev->of_node, "wakeup-source") - || of_property_read_bool(dev->of_node, /* legacy */ - "isil,irq2-can-wakeup-machine"); -} -#else -static bool isl12057_can_wakeup_machine(struct device *dev) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - - return !!data->irq; -} -#endif - -static int isl12057_rtc_alarm_irq_enable(struct device *dev, - unsigned int enable) -{ - struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev); - int ret = -ENOTTY; - - if (rtc_data->irq) - ret = isl12057_rtc_update_alarm(dev, enable); - - return ret; -} - -static irqreturn_t isl12057_rtc_interrupt(int irq, void *data) -{ - struct i2c_client *client = data; - struct isl12057_rtc_data *rtc_data = dev_get_drvdata(&client->dev); - struct rtc_device *rtc = rtc_data->rtc; - int ret, handled = IRQ_NONE; - unsigned int sr; - - ret = regmap_read(rtc_data->regmap, ISL12057_REG_SR, &sr); - if (!ret && (sr & ISL12057_REG_SR_A1F)) { - dev_dbg(&client->dev, "RTC alarm!\n"); - - rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); - - /* Acknowledge and disable the alarm */ - _isl12057_rtc_clear_alarm(&client->dev); - _isl12057_rtc_update_alarm(&client->dev, 0); - - handled = IRQ_HANDLED; - } - - return handled; -} - -static const struct rtc_class_ops rtc_ops = { - .read_time = _isl12057_rtc_read_time, - .set_time = isl12057_rtc_set_time, - .read_alarm = isl12057_rtc_read_alarm, - .set_alarm = isl12057_rtc_set_alarm, - .alarm_irq_enable = isl12057_rtc_alarm_irq_enable, -}; - -static const struct regmap_config isl12057_rtc_regmap_config = { - .reg_bits = 8, - .val_bits = 8, -}; - -static int isl12057_probe(struct i2c_client *client, - const struct i2c_device_id *id) -{ - struct device *dev = &client->dev; - struct isl12057_rtc_data *data; - struct regmap *regmap; - int ret; - - if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C | - I2C_FUNC_SMBUS_BYTE_DATA | - I2C_FUNC_SMBUS_I2C_BLOCK)) - return -ENODEV; - - regmap = devm_regmap_init_i2c(client, &isl12057_rtc_regmap_config); - if (IS_ERR(regmap)) { - ret = PTR_ERR(regmap); - dev_err(dev, "%s: regmap allocation failed (%d)\n", - __func__, ret); - return ret; - } - - ret = isl12057_i2c_validate_chip(regmap); - if (ret) - return ret; - - ret = isl12057_check_rtc_status(dev, regmap); - if (ret) - return ret; - - data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - mutex_init(&data->lock); - data->regmap = regmap; - dev_set_drvdata(dev, data); - - if (client->irq > 0) { - ret = devm_request_threaded_irq(dev, client->irq, NULL, - isl12057_rtc_interrupt, - IRQF_SHARED|IRQF_ONESHOT, - DRV_NAME, client); - if (!ret) - data->irq = client->irq; - else - dev_err(dev, "%s: irq %d unavailable (%d)\n", __func__, - client->irq, ret); - } - - if (isl12057_can_wakeup_machine(dev)) - device_init_wakeup(dev, true); - - data->rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops, - THIS_MODULE); - ret = PTR_ERR_OR_ZERO(data->rtc); - if (ret) { - dev_err(dev, "%s: unable to register RTC device (%d)\n", - __func__, ret); - goto err; - } - - /* We cannot support UIE mode if we do not have an IRQ line */ - if (!data->irq) - data->rtc->uie_unsupported = 1; - -err: - return ret; -} - -static int isl12057_remove(struct i2c_client *client) -{ - if (isl12057_can_wakeup_machine(&client->dev)) - device_init_wakeup(&client->dev, false); - - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int isl12057_rtc_suspend(struct device *dev) -{ - struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev); - - if (rtc_data->irq && device_may_wakeup(dev)) - return enable_irq_wake(rtc_data->irq); - - return 0; -} - -static int isl12057_rtc_resume(struct device *dev) -{ - struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev); - - if (rtc_data->irq && device_may_wakeup(dev)) - return disable_irq_wake(rtc_data->irq); - - return 0; -} -#endif - -static SIMPLE_DEV_PM_OPS(isl12057_rtc_pm_ops, isl12057_rtc_suspend, - isl12057_rtc_resume); - -#ifdef CONFIG_OF -static const struct of_device_id isl12057_dt_match[] = { - { .compatible = "isl,isl12057" }, /* for backward compat., don't use */ - { .compatible = "isil,isl12057" }, - { }, -}; -MODULE_DEVICE_TABLE(of, isl12057_dt_match); -#endif - -static const struct i2c_device_id isl12057_id[] = { - { "isl12057", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, isl12057_id); - -static struct i2c_driver isl12057_driver = { - .driver = { - .name = DRV_NAME, - .pm = &isl12057_rtc_pm_ops, - .of_match_table = of_match_ptr(isl12057_dt_match), - }, - .probe = isl12057_probe, - .remove = isl12057_remove, - .id_table = isl12057_id, -}; -module_i2c_driver(isl12057_driver); - -MODULE_AUTHOR("Arnaud EBALARD <arno@natisbad.org>"); -MODULE_DESCRIPTION("Intersil ISL12057 RTC driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c index b2bcfc0bf2e5..5e14651b71a8 100644 --- a/drivers/rtc/rtc-jz4740.c +++ b/drivers/rtc/rtc-jz4740.c @@ -174,7 +174,7 @@ static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AF_IRQ, enable); } -static struct rtc_class_ops jz4740_rtc_ops = { +static const struct rtc_class_ops jz4740_rtc_ops = { .read_time = jz4740_rtc_read_time, .set_mmss = jz4740_rtc_set_mmss, .read_alarm = jz4740_rtc_read_alarm, diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c index 025bb33b9cd2..4021fd04cb0a 100644 --- a/drivers/rtc/rtc-mcp795.c +++ b/drivers/rtc/rtc-mcp795.c @@ -151,7 +151,7 @@ static int mcp795_read_time(struct device *dev, struct rtc_time *tim) return rtc_valid_tm(tim); } -static struct rtc_class_ops mcp795_rtc_ops = { +static const struct rtc_class_ops mcp795_rtc_ops = { .read_time = mcp795_read_time, .set_time = mcp795_set_time }; diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c index 44f622c3e048..1a61fa56f3ad 100644 --- a/drivers/rtc/rtc-mt6397.c +++ b/drivers/rtc/rtc-mt6397.c @@ -301,7 +301,7 @@ exit: return ret; } -static struct rtc_class_ops mtk_rtc_ops = { +static const struct rtc_class_ops mtk_rtc_ops = { .read_time = mtk_rtc_read_time, .set_time = mtk_rtc_set_time, .read_alarm = mtk_rtc_read_alarm, diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c index 09fc1c19f0df..b1b6b3041bfb 100644 --- a/drivers/rtc/rtc-nuc900.c +++ b/drivers/rtc/rtc-nuc900.c @@ -214,7 +214,7 @@ static int nuc900_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) return 0; } -static struct rtc_class_ops nuc900_rtc_ops = { +static const struct rtc_class_ops nuc900_rtc_ops = { .read_time = nuc900_rtc_read_time, .set_time = nuc900_rtc_set_time, .read_alarm = nuc900_rtc_read_alarm, diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index ec2e9c5fb993..b04ea9b5ae67 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -13,19 +13,23 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/kernel.h> +#include <dt-bindings/gpio/gpio.h> +#include <linux/bcd.h> +#include <linux/clk.h> +#include <linux/delay.h> #include <linux/init.h> -#include <linux/module.h> +#include <linux/io.h> #include <linux/ioport.h> -#include <linux/delay.h> -#include <linux/rtc.h> -#include <linux/bcd.h> -#include <linux/platform_device.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <linux/io.h> -#include <linux/clk.h> +#include <linux/rtc.h> /* * The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock @@ -115,6 +119,8 @@ /* OMAP_RTC_PMIC bit fields: */ #define OMAP_RTC_PMIC_POWER_EN_EN BIT(16) +#define OMAP_RTC_PMIC_EXT_WKUP_EN(x) BIT(x) +#define OMAP_RTC_PMIC_EXT_WKUP_POL(x) BIT(4 + x) /* OMAP_RTC_KICKER values */ #define KICK0_VALUE 0x83e70b13 @@ -141,6 +147,7 @@ struct omap_rtc { bool is_pmic_controller; bool has_ext_clk; const struct omap_rtc_device_type *type; + struct pinctrl_dev *pctldev; }; static inline u8 rtc_read(struct omap_rtc *rtc, unsigned int reg) @@ -469,7 +476,7 @@ static void omap_rtc_power_off(void) mdelay(2500); } -static struct rtc_class_ops omap_rtc_ops = { +static const struct rtc_class_ops omap_rtc_ops = { .read_time = omap_rtc_read_time, .set_time = omap_rtc_set_time, .read_alarm = omap_rtc_read_alarm, @@ -525,6 +532,139 @@ static const struct of_device_id omap_rtc_of_match[] = { }; MODULE_DEVICE_TABLE(of, omap_rtc_of_match); +static const struct pinctrl_pin_desc rtc_pins_desc[] = { + PINCTRL_PIN(0, "ext_wakeup0"), + PINCTRL_PIN(1, "ext_wakeup1"), + PINCTRL_PIN(2, "ext_wakeup2"), + PINCTRL_PIN(3, "ext_wakeup3"), +}; + +static int rtc_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) +{ + return 0; +} + +static const char *rtc_pinctrl_get_group_name(struct pinctrl_dev *pctldev, + unsigned int group) +{ + return NULL; +} + +static const struct pinctrl_ops rtc_pinctrl_ops = { + .get_groups_count = rtc_pinctrl_get_groups_count, + .get_group_name = rtc_pinctrl_get_group_name, + .dt_node_to_map = pinconf_generic_dt_node_to_map_pin, + .dt_free_map = pinconf_generic_dt_free_map, +}; + +enum rtc_pin_config_param { + PIN_CONFIG_ACTIVE_HIGH = PIN_CONFIG_END + 1, +}; + +static const struct pinconf_generic_params rtc_params[] = { + {"ti,active-high", PIN_CONFIG_ACTIVE_HIGH, 0}, +}; + +#ifdef CONFIG_DEBUG_FS +static const struct pin_config_item rtc_conf_items[ARRAY_SIZE(rtc_params)] = { + PCONFDUMP(PIN_CONFIG_ACTIVE_HIGH, "input active high", NULL, false), +}; +#endif + +static int rtc_pinconf_get(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *config) +{ + struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev); + unsigned int param = pinconf_to_config_param(*config); + u32 val; + u16 arg = 0; + + rtc->type->unlock(rtc); + val = rtc_readl(rtc, OMAP_RTC_PMIC_REG); + rtc->type->lock(rtc); + + switch (param) { + case PIN_CONFIG_INPUT_ENABLE: + if (!(val & OMAP_RTC_PMIC_EXT_WKUP_EN(pin))) + return -EINVAL; + break; + case PIN_CONFIG_ACTIVE_HIGH: + if (val & OMAP_RTC_PMIC_EXT_WKUP_POL(pin)) + return -EINVAL; + break; + default: + return -ENOTSUPP; + }; + + *config = pinconf_to_config_packed(param, arg); + + return 0; +} + +static int rtc_pinconf_set(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *configs, + unsigned int num_configs) +{ + struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev); + u32 val; + unsigned int param; + u16 param_val; + int i; + + rtc->type->unlock(rtc); + val = rtc_readl(rtc, OMAP_RTC_PMIC_REG); + rtc->type->lock(rtc); + + /* active low by default */ + val |= OMAP_RTC_PMIC_EXT_WKUP_POL(pin); + + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(configs[i]); + param_val = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_INPUT_ENABLE: + if (param_val) + val |= OMAP_RTC_PMIC_EXT_WKUP_EN(pin); + else + val &= ~OMAP_RTC_PMIC_EXT_WKUP_EN(pin); + break; + case PIN_CONFIG_ACTIVE_HIGH: + val &= ~OMAP_RTC_PMIC_EXT_WKUP_POL(pin); + break; + default: + dev_err(&rtc->rtc->dev, "Property %u not supported\n", + param); + return -ENOTSUPP; + } + } + + rtc->type->unlock(rtc); + rtc_writel(rtc, OMAP_RTC_PMIC_REG, val); + rtc->type->lock(rtc); + + return 0; +} + +static const struct pinconf_ops rtc_pinconf_ops = { + .is_generic = true, + .pin_config_get = rtc_pinconf_get, + .pin_config_set = rtc_pinconf_set, +}; + +static struct pinctrl_desc rtc_pinctrl_desc = { + .pins = rtc_pins_desc, + .npins = ARRAY_SIZE(rtc_pins_desc), + .pctlops = &rtc_pinctrl_ops, + .confops = &rtc_pinconf_ops, + .custom_params = rtc_params, + .num_custom_params = ARRAY_SIZE(rtc_params), +#ifdef CONFIG_DEBUG_FS + .custom_conf_items = rtc_conf_items, +#endif + .owner = THIS_MODULE, +}; + static int omap_rtc_probe(struct platform_device *pdev) { struct omap_rtc *rtc; @@ -681,6 +821,15 @@ static int omap_rtc_probe(struct platform_device *pdev) } } + /* Support ext_wakeup pinconf */ + rtc_pinctrl_desc.name = dev_name(&pdev->dev); + + rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc); + if (IS_ERR(rtc->pctldev)) { + dev_err(&pdev->dev, "Couldn't register pinctrl driver\n"); + return PTR_ERR(rtc->pctldev); + } + return 0; err: @@ -724,6 +873,9 @@ static int __exit omap_rtc_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); + /* Remove ext_wakeup pinconf */ + pinctrl_unregister(rtc->pctldev); + return 0; } diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c index 6080e0edef63..4bcfb88674d3 100644 --- a/drivers/rtc/rtc-palmas.c +++ b/drivers/rtc/rtc-palmas.c @@ -225,7 +225,7 @@ static irqreturn_t palmas_rtc_interrupt(int irq, void *context) return IRQ_HANDLED; } -static struct rtc_class_ops palmas_rtc_ops = { +static const struct rtc_class_ops palmas_rtc_ops = { .read_time = palmas_rtc_read_time, .set_time = palmas_rtc_set_time, .read_alarm = palmas_rtc_read_alarm, diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c index b4478cc92b55..8895f77726e8 100644 --- a/drivers/rtc/rtc-pcf2123.c +++ b/drivers/rtc/rtc-pcf2123.c @@ -182,7 +182,8 @@ static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr, } static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr, - const char *buffer, size_t count) { + const char *buffer, size_t count) +{ struct pcf2123_sysfs_reg *r; unsigned long reg; unsigned long val; @@ -199,7 +200,7 @@ static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr, if (ret) return ret; - pcf2123_write_reg(dev, reg, val); + ret = pcf2123_write_reg(dev, reg, val); if (ret < 0) return -EIO; return count; diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c index e6b6911c8e05..00c31c91b245 100644 --- a/drivers/rtc/rtc-pcf50633.c +++ b/drivers/rtc/rtc-pcf50633.c @@ -232,7 +232,7 @@ static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) return ret; } -static struct rtc_class_ops pcf50633_rtc_ops = { +static const struct rtc_class_ops pcf50633_rtc_ops = { .read_time = pcf50633_rtc_read_time, .set_time = pcf50633_rtc_set_time, .read_alarm = pcf50633_rtc_read_alarm, diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c index 64e1e4578492..5cfb6df5c430 100644 --- a/drivers/rtc/rtc-pic32.c +++ b/drivers/rtc/rtc-pic32.c @@ -400,7 +400,6 @@ static struct platform_driver pic32_rtc_driver = { .remove = pic32_rtc_remove, .driver = { .name = "pic32-rtc", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(pic32_rtc_dt_ids), }, }; diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index 9a2f6a95d5a7..f9277e536f7e 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -52,11 +52,21 @@ #define RV8803_CTRL_TIE BIT(4) #define RV8803_CTRL_UIE BIT(5) +#define RX8900_BACKUP_CTRL 0x18 +#define RX8900_FLAG_SWOFF BIT(2) +#define RX8900_FLAG_VDETOFF BIT(3) + +enum rv8803_type { + rv_8803, + rx_8900 +}; + struct rv8803_data { struct i2c_client *client; struct rtc_device *rtc; struct mutex flags_lock; u8 ctrl; + enum rv8803_type type; }; static int rv8803_read_reg(const struct i2c_client *client, u8 reg) @@ -497,6 +507,35 @@ static struct rtc_class_ops rv8803_rtc_ops = { .ioctl = rv8803_ioctl, }; +static int rx8900_trickle_charger_init(struct rv8803_data *rv8803) +{ + struct i2c_client *client = rv8803->client; + struct device_node *node = client->dev.of_node; + int err; + u8 flags; + + if (!node) + return 0; + + if (rv8803->type != rx_8900) + return 0; + + err = i2c_smbus_read_byte_data(rv8803->client, RX8900_BACKUP_CTRL); + if (err < 0) + return err; + + flags = ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF) & (u8)err; + + if (of_property_read_bool(node, "epson,vdet-disable")) + flags |= RX8900_FLAG_VDETOFF; + + if (of_property_read_bool(node, "trickle-diode-disable")) + flags |= RX8900_FLAG_SWOFF; + + return i2c_smbus_write_byte_data(rv8803->client, RX8900_BACKUP_CTRL, + flags); +} + static int rv8803_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -517,6 +556,7 @@ static int rv8803_probe(struct i2c_client *client, mutex_init(&rv8803->flags_lock); rv8803->client = client; + rv8803->type = id->driver_data; i2c_set_clientdata(client, rv8803); flags = rv8803_read_reg(client, RV8803_FLAG); @@ -558,6 +598,12 @@ static int rv8803_probe(struct i2c_client *client, if (err) return err; + err = rx8900_trickle_charger_init(rv8803); + if (err) { + dev_err(&client->dev, "failed to init charger\n"); + return err; + } + err = device_create_bin_file(&client->dev, &rv8803_nvram_attr); if (err) return err; @@ -575,8 +621,8 @@ static int rv8803_remove(struct i2c_client *client) } static const struct i2c_device_id rv8803_id[] = { - { "rv8803", 0 }, - { "rx8900", 0 }, + { "rv8803", rv_8803 }, + { "rx8900", rx_8900 }, { } }; MODULE_DEVICE_TABLE(i2c, rv8803_id); diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c index bbad00b233bc..7c9c08eab5e5 100644 --- a/drivers/rtc/rtc-rx6110.c +++ b/drivers/rtc/rtc-rx6110.c @@ -317,7 +317,7 @@ static int rx6110_init(struct rx6110_data *rx6110) return ret; } -static struct rtc_class_ops rx6110_rtc_ops = { +static const struct rtc_class_ops rx6110_rtc_ops = { .read_time = rx6110_get_time, .set_time = rx6110_set_time, }; @@ -388,7 +388,6 @@ MODULE_DEVICE_TABLE(spi, rx6110_id); static struct spi_driver rx6110_driver = { .driver = { .name = RX6110_DRIVER_NAME, - .owner = THIS_MODULE, }, .probe = rx6110_probe, .remove = rx6110_remove, diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c index 2b85cc7a24e7..91857d8d2df8 100644 --- a/drivers/rtc/rtc-rx8025.c +++ b/drivers/rtc/rtc-rx8025.c @@ -403,7 +403,7 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled) return 0; } -static struct rtc_class_ops rx8025_rtc_ops = { +static const struct rtc_class_ops rx8025_rtc_ops = { .read_time = rx8025_get_time, .set_time = rx8025_set_time, .read_alarm = rx8025_read_alarm, diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c index f05ef8568480..e377f42abae7 100644 --- a/drivers/rtc/rtc-spear.c +++ b/drivers/rtc/rtc-spear.c @@ -343,7 +343,7 @@ static int spear_alarm_irq_enable(struct device *dev, unsigned int enabled) return ret; } -static struct rtc_class_ops spear_rtc_ops = { +static const struct rtc_class_ops spear_rtc_ops = { .read_time = spear_rtc_read_time, .set_time = spear_rtc_set_time, .read_alarm = spear_rtc_read_alarm, diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c index e6aaaa52e7fe..d578e40d5a50 100644 --- a/drivers/rtc/rtc-stmp3xxx.c +++ b/drivers/rtc/rtc-stmp3xxx.c @@ -231,7 +231,7 @@ static int stmp3xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) return 0; } -static struct rtc_class_ops stmp3xxx_rtc_ops = { +static const struct rtc_class_ops stmp3xxx_rtc_ops = { .alarm_irq_enable = stmp3xxx_alarm_irq_enable, .read_time = stmp3xxx_rtc_gettime, diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c index 63b9fb1318c2..1218d5d4224d 100644 --- a/drivers/rtc/rtc-sysfs.c +++ b/drivers/rtc/rtc-sysfs.c @@ -160,7 +160,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr, unsigned long push = 0; struct rtc_wkalrm alm; struct rtc_device *rtc = to_rtc_device(dev); - char *buf_ptr; + const char *buf_ptr; int adjust = 0; /* Only request alarms that trigger in the future. Disable them @@ -171,7 +171,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr, return retval; rtc_tm_to_time(&alm.time, &now); - buf_ptr = (char *)buf; + buf_ptr = buf; if (*buf_ptr == '+') { buf_ptr++; if (*buf_ptr == '=') { diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c index 15ac597d54da..3853ba963bb5 100644 --- a/drivers/rtc/rtc-tegra.c +++ b/drivers/rtc/rtc-tegra.c @@ -291,7 +291,7 @@ static irqreturn_t tegra_rtc_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static struct rtc_class_ops tegra_rtc_ops = { +static const struct rtc_class_ops tegra_rtc_ops = { .read_time = tegra_rtc_read_time, .set_time = tegra_rtc_set_time, .read_alarm = tegra_rtc_read_alarm, diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index 2dc787dc06c1..176720b7b9e5 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c @@ -462,7 +462,7 @@ out: return ret; } -static struct rtc_class_ops twl_rtc_ops = { +static const struct rtc_class_ops twl_rtc_ops = { .read_time = twl_rtc_read_time, .set_time = twl_rtc_set_time, .read_alarm = twl_rtc_read_alarm, diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 6a6906f847db..68138a647dfc 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -61,7 +61,7 @@ MODULE_PARM_DESC(be_max_phys_size, "memory that can be allocated. Range is 16 - 128"); #define beiscsi_disp_param(_name)\ -ssize_t \ +static ssize_t \ beiscsi_##_name##_disp(struct device *dev,\ struct device_attribute *attrib, char *buf) \ { \ @@ -74,7 +74,7 @@ beiscsi_##_name##_disp(struct device *dev,\ } #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ -int \ +static int \ beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ {\ if (val >= _minval && val <= _maxval) {\ @@ -93,7 +93,7 @@ beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ } #define beiscsi_store_param(_name) \ -ssize_t \ +static ssize_t \ beiscsi_##_name##_store(struct device *dev,\ struct device_attribute *attr, const char *buf,\ size_t count) \ @@ -112,7 +112,7 @@ beiscsi_##_name##_store(struct device *dev,\ } #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ -int \ +static int \ beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ { \ if (val >= _minval && val <= _maxval) {\ @@ -4584,7 +4584,7 @@ free_hndls: io_task->cmd_bhs = NULL; return -ENOMEM; } -int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, +static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, unsigned int num_sg, unsigned int xferlen, unsigned int writedir) { @@ -4973,7 +4973,7 @@ static int beiscsi_bsg_request(struct bsg_job *job) return rc; } -void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) +static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) { /* Set the logging parameter */ beiscsi_log_enable_init(phba, beiscsi_log_enable); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 7c0d7af0d3b7..0039bebaa9e2 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -685,6 +685,11 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) req_completion); csk->snd_nxt += len; cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); + } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) && + (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { + struct cpl_close_con_req *req = + (struct cpl_close_con_req *)skb->data; + req->wr.wr_hi |= htonl(FW_WR_COMPL_F); } total_size += skb->truesize; t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 516bd6c4f442..cbf010324c18 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -30,161 +30,41 @@ #include "NCR5380.h" #include <linux/init.h> #include <linux/ioport.h> -#include <linux/isapnp.h> +#include <linux/isa.h> +#include <linux/pnp.h> #include <linux/interrupt.h> +#define MAX_CARDS 8 + +/* old-style parameters for compatibility */ static int ncr_irq; -static int ncr_dma; static int ncr_addr; static int ncr_5380; static int ncr_53c400; static int ncr_53c400a; static int dtc_3181e; static int hp_c2502; +module_param(ncr_irq, int, 0); +module_param(ncr_addr, int, 0); +module_param(ncr_5380, int, 0); +module_param(ncr_53c400, int, 0); +module_param(ncr_53c400a, int, 0); +module_param(dtc_3181e, int, 0); +module_param(hp_c2502, int, 0); -static struct override { - NCR5380_map_type NCR5380_map_name; - int irq; - int dma; - int board; /* Use NCR53c400, Ricoh, etc. extensions ? */ -} overrides -#ifdef GENERIC_NCR5380_OVERRIDE -[] __initdata = GENERIC_NCR5380_OVERRIDE; -#else -[1] __initdata = { { 0,},}; -#endif - -#define NO_OVERRIDES ARRAY_SIZE(overrides) - -#ifndef MODULE - -/** - * internal_setup - handle lilo command string override - * @board: BOARD_* identifier for the board - * @str: unused - * @ints: numeric parameters - * - * Do LILO command line initialization of the overrides array. Display - * errors when needed - * - * Locks: none - */ - -static void __init internal_setup(int board, char *str, int *ints) -{ - static int commandline_current; - switch (board) { - case BOARD_NCR5380: - if (ints[0] != 2 && ints[0] != 3) { - printk(KERN_ERR "generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n"); - return; - } - break; - case BOARD_NCR53C400: - if (ints[0] != 2) { - printk(KERN_ERR "generic_NCR53C400_setup : usage ncr53c400=" STRVAL(NCR5380_map_name) ",irq\n"); - return; - } - break; - case BOARD_NCR53C400A: - if (ints[0] != 2) { - printk(KERN_ERR "generic_NCR53C400A_setup : usage ncr53c400a=" STRVAL(NCR5380_map_name) ",irq\n"); - return; - } - break; - case BOARD_DTC3181E: - if (ints[0] != 2) { - printk("generic_DTC3181E_setup : usage dtc3181e=" STRVAL(NCR5380_map_name) ",irq\n"); - return; - } - break; - } - - if (commandline_current < NO_OVERRIDES) { - overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type) ints[1]; - overrides[commandline_current].irq = ints[2]; - if (ints[0] == 3) - overrides[commandline_current].dma = ints[3]; - else - overrides[commandline_current].dma = DMA_NONE; - overrides[commandline_current].board = board; - ++commandline_current; - } -} - - -/** - * do_NCR53C80_setup - set up entry point - * @str: unused - * - * Setup function invoked at boot to parse the ncr5380= command - * line. - */ - -static int __init do_NCR5380_setup(char *str) -{ - int ints[10]; - - get_options(str, ARRAY_SIZE(ints), ints); - internal_setup(BOARD_NCR5380, str, ints); - return 1; -} - -/** - * do_NCR53C400_setup - set up entry point - * @str: unused - * @ints: integer parameters from kernel setup code - * - * Setup function invoked at boot to parse the ncr53c400= command - * line. - */ - -static int __init do_NCR53C400_setup(char *str) -{ - int ints[10]; - - get_options(str, ARRAY_SIZE(ints), ints); - internal_setup(BOARD_NCR53C400, str, ints); - return 1; -} - -/** - * do_NCR53C400A_setup - set up entry point - * @str: unused - * @ints: integer parameters from kernel setup code - * - * Setup function invoked at boot to parse the ncr53c400a= command - * line. - */ - -static int __init do_NCR53C400A_setup(char *str) -{ - int ints[10]; - - get_options(str, ARRAY_SIZE(ints), ints); - internal_setup(BOARD_NCR53C400A, str, ints); - return 1; -} - -/** - * do_DTC3181E_setup - set up entry point - * @str: unused - * @ints: integer parameters from kernel setup code - * - * Setup function invoked at boot to parse the dtc3181e= command - * line. - */ +static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; +module_param_array(irq, int, NULL, 0); +MODULE_PARM_DESC(irq, "IRQ number(s)"); -static int __init do_DTC3181E_setup(char *str) -{ - int ints[10]; +static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; +module_param_array(base, int, NULL, 0); +MODULE_PARM_DESC(base, "base address(es)"); - get_options(str, ARRAY_SIZE(ints), ints); - internal_setup(BOARD_DTC3181E, str, ints); - return 1; -} +static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +module_param_array(card, int, NULL, 0); +MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)"); -#endif +MODULE_LICENSE("GPL"); #ifndef SCSI_G_NCR5380_MEM /* @@ -210,21 +90,9 @@ static void magic_configure(int idx, u8 irq, u8 magic[]) } #endif -/** - * generic_NCR5380_detect - look for NCR5380 controllers - * @tpnt: the scsi template - * - * Scan for the present of NCR5380, NCR53C400, NCR53C400A, DTC3181E - * and DTC436(ISAPnP) controllers. If overrides have been set we use - * them. - * - * Locks: none - */ - -static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) +static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, + struct device *pdev, int base, int irq, int board) { - static int current_override; - int count; unsigned int *ports; u8 *magic = NULL; #ifndef SCSI_G_NCR5380_MEM @@ -232,272 +100,222 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) int port_idx = -1; unsigned long region_size; #endif - static unsigned int __initdata ncr_53c400a_ports[] = { + static unsigned int ncr_53c400a_ports[] = { 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0 }; - static unsigned int __initdata dtc_3181e_ports[] = { + static unsigned int dtc_3181e_ports[] = { 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0 }; - static u8 ncr_53c400a_magic[] __initdata = { /* 53C400A & DTC436 */ + static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */ 0x59, 0xb9, 0xc5, 0xae, 0xa6 }; - static u8 hp_c2502_magic[] __initdata = { /* HP C2502 */ + static u8 hp_c2502_magic[] = { /* HP C2502 */ 0x0f, 0x22, 0xf0, 0x20, 0x80 }; - int flags; + int flags, ret; struct Scsi_Host *instance; struct NCR5380_hostdata *hostdata; #ifdef SCSI_G_NCR5380_MEM - unsigned long base; void __iomem *iomem; resource_size_t iomem_size; #endif - if (ncr_irq) - overrides[0].irq = ncr_irq; - if (ncr_dma) - overrides[0].dma = ncr_dma; - if (ncr_addr) - overrides[0].NCR5380_map_name = (NCR5380_map_type) ncr_addr; - if (ncr_5380) - overrides[0].board = BOARD_NCR5380; - else if (ncr_53c400) - overrides[0].board = BOARD_NCR53C400; - else if (ncr_53c400a) - overrides[0].board = BOARD_NCR53C400A; - else if (dtc_3181e) - overrides[0].board = BOARD_DTC3181E; - else if (hp_c2502) - overrides[0].board = BOARD_HP_C2502; -#ifndef SCSI_G_NCR5380_MEM - if (!current_override && isapnp_present()) { - struct pnp_dev *dev = NULL; - count = 0; - while ((dev = pnp_find_dev(NULL, ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e), dev))) { - if (count >= NO_OVERRIDES) - break; - if (pnp_device_attach(dev) < 0) - continue; - if (pnp_activate_dev(dev) < 0) { - printk(KERN_ERR "dtc436e probe: activate failed\n"); - pnp_device_detach(dev); - continue; - } - if (!pnp_port_valid(dev, 0)) { - printk(KERN_ERR "dtc436e probe: no valid port\n"); - pnp_device_detach(dev); - continue; - } - if (pnp_irq_valid(dev, 0)) - overrides[count].irq = pnp_irq(dev, 0); - else - overrides[count].irq = NO_IRQ; - if (pnp_dma_valid(dev, 0)) - overrides[count].dma = pnp_dma(dev, 0); - else - overrides[count].dma = DMA_NONE; - overrides[count].NCR5380_map_name = (NCR5380_map_type) pnp_port_start(dev, 0); - overrides[count].board = BOARD_DTC3181E; - count++; - } + ports = NULL; + flags = 0; + switch (board) { + case BOARD_NCR5380: + flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; + break; + case BOARD_NCR53C400A: + ports = ncr_53c400a_ports; + magic = ncr_53c400a_magic; + break; + case BOARD_HP_C2502: + ports = ncr_53c400a_ports; + magic = hp_c2502_magic; + break; + case BOARD_DTC3181E: + ports = dtc_3181e_ports; + magic = ncr_53c400a_magic; + break; } -#endif - - for (count = 0; current_override < NO_OVERRIDES; ++current_override) { - if (!(overrides[current_override].NCR5380_map_name)) - continue; - - ports = NULL; - flags = 0; - switch (overrides[current_override].board) { - case BOARD_NCR5380: - flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; - break; - case BOARD_NCR53C400A: - ports = ncr_53c400a_ports; - magic = ncr_53c400a_magic; - break; - case BOARD_HP_C2502: - ports = ncr_53c400a_ports; - magic = hp_c2502_magic; - break; - case BOARD_DTC3181E: - ports = dtc_3181e_ports; - magic = ncr_53c400a_magic; - break; - } #ifndef SCSI_G_NCR5380_MEM - if (ports && magic) { - /* wakeup sequence for the NCR53C400A and DTC3181E */ - - /* Disable the adapter and look for a free io port */ - magic_configure(-1, 0, magic); - - region_size = 16; - - if (overrides[current_override].NCR5380_map_name != PORT_AUTO) - for (i = 0; ports[i]; i++) { - if (!request_region(ports[i], region_size, "ncr53c80")) - continue; - if (overrides[current_override].NCR5380_map_name == ports[i]) - break; - release_region(ports[i], region_size); - } else - for (i = 0; ports[i]; i++) { - if (!request_region(ports[i], region_size, "ncr53c80")) - continue; - if (inb(ports[i]) == 0xff) - break; - release_region(ports[i], region_size); + if (ports && magic) { + /* wakeup sequence for the NCR53C400A and DTC3181E */ + + /* Disable the adapter and look for a free io port */ + magic_configure(-1, 0, magic); + + region_size = 16; + if (base) + for (i = 0; ports[i]; i++) { + if (base == ports[i]) { /* index found */ + if (!request_region(ports[i], + region_size, + "ncr53c80")) + return -EBUSY; + break; } - if (ports[i]) { - /* At this point we have our region reserved */ - magic_configure(i, 0, magic); /* no IRQ yet */ - outb(0xc0, ports[i] + 9); - if (inb(ports[i] + 9) != 0x80) - continue; - overrides[current_override].NCR5380_map_name = ports[i]; - port_idx = i; - } else - continue; - } + } else - { - /* Not a 53C400A style setup - just grab */ - region_size = 8; - if (!request_region(overrides[current_override].NCR5380_map_name, - region_size, "ncr5380")) - continue; - } + for (i = 0; ports[i]; i++) { + if (!request_region(ports[i], region_size, + "ncr53c80")) + continue; + if (inb(ports[i]) == 0xff) + break; + release_region(ports[i], region_size); + } + if (ports[i]) { + /* At this point we have our region reserved */ + magic_configure(i, 0, magic); /* no IRQ yet */ + outb(0xc0, ports[i] + 9); + if (inb(ports[i] + 9) != 0x80) { + ret = -ENODEV; + goto out_release; + } + base = ports[i]; + port_idx = i; + } else + return -EINVAL; + } + else + { + /* NCR5380 - no configuration, just grab */ + region_size = 8; + if (!base || !request_region(base, region_size, "ncr5380")) + return -EBUSY; + } #else - base = overrides[current_override].NCR5380_map_name; - iomem_size = NCR53C400_region_size; - if (!request_mem_region(base, iomem_size, "ncr5380")) - continue; - iomem = ioremap(base, iomem_size); - if (!iomem) { - release_mem_region(base, iomem_size); - continue; - } + iomem_size = NCR53C400_region_size; + if (!request_mem_region(base, iomem_size, "ncr5380")) + return -EBUSY; + iomem = ioremap(base, iomem_size); + if (!iomem) { + release_mem_region(base, iomem_size); + return -ENOMEM; + } #endif - instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata)); - if (instance == NULL) - goto out_release; - hostdata = shost_priv(instance); + instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata)); + if (instance == NULL) { + ret = -ENOMEM; + goto out_release; + } + hostdata = shost_priv(instance); #ifndef SCSI_G_NCR5380_MEM - instance->io_port = overrides[current_override].NCR5380_map_name; - instance->n_io_port = region_size; - hostdata->io_width = 1; /* 8-bit PDMA by default */ - - /* - * On NCR53C400 boards, NCR5380 registers are mapped 8 past - * the base address. - */ - switch (overrides[current_override].board) { - case BOARD_NCR53C400: - instance->io_port += 8; - hostdata->c400_ctl_status = 0; - hostdata->c400_blk_cnt = 1; - hostdata->c400_host_buf = 4; - break; - case BOARD_DTC3181E: - hostdata->io_width = 2; /* 16-bit PDMA */ - /* fall through */ - case BOARD_NCR53C400A: - case BOARD_HP_C2502: - hostdata->c400_ctl_status = 9; - hostdata->c400_blk_cnt = 10; - hostdata->c400_host_buf = 8; - break; - } + instance->io_port = base; + instance->n_io_port = region_size; + hostdata->io_width = 1; /* 8-bit PDMA by default */ + + /* + * On NCR53C400 boards, NCR5380 registers are mapped 8 past + * the base address. + */ + switch (board) { + case BOARD_NCR53C400: + instance->io_port += 8; + hostdata->c400_ctl_status = 0; + hostdata->c400_blk_cnt = 1; + hostdata->c400_host_buf = 4; + break; + case BOARD_DTC3181E: + hostdata->io_width = 2; /* 16-bit PDMA */ + /* fall through */ + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + hostdata->c400_ctl_status = 9; + hostdata->c400_blk_cnt = 10; + hostdata->c400_host_buf = 8; + break; + } #else - instance->base = overrides[current_override].NCR5380_map_name; - hostdata->iomem = iomem; - hostdata->iomem_size = iomem_size; - switch (overrides[current_override].board) { - case BOARD_NCR53C400: - hostdata->c400_ctl_status = 0x100; - hostdata->c400_blk_cnt = 0x101; - hostdata->c400_host_buf = 0x104; - break; - case BOARD_DTC3181E: - case BOARD_NCR53C400A: - case BOARD_HP_C2502: - pr_err(DRV_MODULE_NAME ": unknown register offsets\n"); - goto out_unregister; - } + instance->base = base; + hostdata->iomem = iomem; + hostdata->iomem_size = iomem_size; + switch (board) { + case BOARD_NCR53C400: + hostdata->c400_ctl_status = 0x100; + hostdata->c400_blk_cnt = 0x101; + hostdata->c400_host_buf = 0x104; + break; + case BOARD_DTC3181E: + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + pr_err(DRV_MODULE_NAME ": unknown register offsets\n"); + ret = -EINVAL; + goto out_unregister; + } #endif - if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP)) - goto out_unregister; + ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP); + if (ret) + goto out_unregister; - switch (overrides[current_override].board) { - case BOARD_NCR53C400: - case BOARD_DTC3181E: - case BOARD_NCR53C400A: - case BOARD_HP_C2502: - NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); - } + switch (board) { + case BOARD_NCR53C400: + case BOARD_DTC3181E: + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); + } - NCR5380_maybe_reset_bus(instance); + NCR5380_maybe_reset_bus(instance); - if (overrides[current_override].irq != IRQ_AUTO) - instance->irq = overrides[current_override].irq; - else - instance->irq = NCR5380_probe_irq(instance, 0xffff); + if (irq != IRQ_AUTO) + instance->irq = irq; + else + instance->irq = NCR5380_probe_irq(instance, 0xffff); - /* Compatibility with documented NCR5380 kernel parameters */ - if (instance->irq == 255) - instance->irq = NO_IRQ; + /* Compatibility with documented NCR5380 kernel parameters */ + if (instance->irq == 255) + instance->irq = NO_IRQ; - if (instance->irq != NO_IRQ) { + if (instance->irq != NO_IRQ) { #ifndef SCSI_G_NCR5380_MEM - /* set IRQ for HP C2502 */ - if (overrides[current_override].board == BOARD_HP_C2502) - magic_configure(port_idx, instance->irq, magic); + /* set IRQ for HP C2502 */ + if (board == BOARD_HP_C2502) + magic_configure(port_idx, instance->irq, magic); #endif - if (request_irq(instance->irq, generic_NCR5380_intr, - 0, "NCR5380", instance)) { - printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); - instance->irq = NO_IRQ; - } - } - - if (instance->irq == NO_IRQ) { - printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); - printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); + if (request_irq(instance->irq, generic_NCR5380_intr, + 0, "NCR5380", instance)) { + printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); + instance->irq = NO_IRQ; } + } - ++current_override; - ++count; + if (instance->irq == NO_IRQ) { + printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); + printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); } - return count; + ret = scsi_add_host(instance, pdev); + if (ret) + goto out_free_irq; + scsi_scan_host(instance); + dev_set_drvdata(pdev, instance); + return 0; + +out_free_irq: + if (instance->irq != NO_IRQ) + free_irq(instance->irq, instance); + NCR5380_exit(instance); out_unregister: - scsi_unregister(instance); + scsi_host_put(instance); out_release: #ifndef SCSI_G_NCR5380_MEM - release_region(overrides[current_override].NCR5380_map_name, region_size); + release_region(base, region_size); #else iounmap(iomem); release_mem_region(base, iomem_size); #endif - return count; + return ret; } -/** - * generic_NCR5380_release_resources - free resources - * @instance: host adapter to clean up - * - * Free the generic interface resources from this adapter. - * - * Locks: none - */ - -static int generic_NCR5380_release_resources(struct Scsi_Host *instance) +static void generic_NCR5380_release_resources(struct Scsi_Host *instance) { + scsi_remove_host(instance); if (instance->irq != NO_IRQ) free_irq(instance->irq, instance); NCR5380_exit(instance); @@ -511,7 +329,7 @@ static int generic_NCR5380_release_resources(struct Scsi_Host *instance) release_mem_region(instance->base, hostdata->iomem_size); } #endif - return 0; + scsi_host_put(instance); } /** @@ -701,10 +519,9 @@ static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance, #include "NCR5380.c" static struct scsi_host_template driver_template = { + .module = THIS_MODULE, .proc_name = DRV_MODULE_NAME, .name = "Generic NCR5380/NCR53C400 SCSI", - .detect = generic_NCR5380_detect, - .release = generic_NCR5380_release_resources, .info = generic_NCR5380_info, .queuecommand = generic_NCR5380_queue_command, .eh_abort_handler = generic_NCR5380_abort, @@ -718,31 +535,115 @@ static struct scsi_host_template driver_template = { .max_sectors = 128, }; -#include "scsi_module.c" -module_param(ncr_irq, int, 0); -module_param(ncr_dma, int, 0); -module_param(ncr_addr, int, 0); -module_param(ncr_5380, int, 0); -module_param(ncr_53c400, int, 0); -module_param(ncr_53c400a, int, 0); -module_param(dtc_3181e, int, 0); -module_param(hp_c2502, int, 0); -MODULE_LICENSE("GPL"); +static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev) +{ + int ret = generic_NCR5380_init_one(&driver_template, pdev, base[ndev], + irq[ndev], card[ndev]); + if (ret) { + if (base[ndev]) + printk(KERN_WARNING "Card not found at address 0x%03x\n", + base[ndev]); + return 0; + } -#if !defined(SCSI_G_NCR5380_MEM) && defined(MODULE) -static struct isapnp_device_id id_table[] = { - { - ISAPNP_ANY_ID, ISAPNP_ANY_ID, - ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e), - 0}, - {0} + return 1; +} + +static int generic_NCR5380_isa_remove(struct device *pdev, + unsigned int ndev) +{ + generic_NCR5380_release_resources(dev_get_drvdata(pdev)); + dev_set_drvdata(pdev, NULL); + return 0; +} + +static struct isa_driver generic_NCR5380_isa_driver = { + .match = generic_NCR5380_isa_match, + .remove = generic_NCR5380_isa_remove, + .driver = { + .name = DRV_MODULE_NAME + }, +}; + +#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) +static struct pnp_device_id generic_NCR5380_pnp_ids[] = { + { .id = "DTC436e", .driver_data = BOARD_DTC3181E }, + { .id = "" } +}; +MODULE_DEVICE_TABLE(pnp, generic_NCR5380_pnp_ids); + +static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev, + const struct pnp_device_id *id) +{ + int base, irq; + + if (pnp_activate_dev(pdev) < 0) + return -EBUSY; + + base = pnp_port_start(pdev, 0); + irq = pnp_irq(pdev, 0); + + return generic_NCR5380_init_one(&driver_template, &pdev->dev, base, irq, + id->driver_data); +} + +static void generic_NCR5380_pnp_remove(struct pnp_dev *pdev) +{ + generic_NCR5380_release_resources(pnp_get_drvdata(pdev)); + pnp_set_drvdata(pdev, NULL); +} + +static struct pnp_driver generic_NCR5380_pnp_driver = { + .name = DRV_MODULE_NAME, + .id_table = generic_NCR5380_pnp_ids, + .probe = generic_NCR5380_pnp_probe, + .remove = generic_NCR5380_pnp_remove, }; +#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */ + +static int pnp_registered, isa_registered; + +static int __init generic_NCR5380_init(void) +{ + int ret = 0; + + /* compatibility with old-style parameters */ + if (irq[0] == 0 && base[0] == 0 && card[0] == -1) { + irq[0] = ncr_irq; + base[0] = ncr_addr; + if (ncr_5380) + card[0] = BOARD_NCR5380; + if (ncr_53c400) + card[0] = BOARD_NCR53C400; + if (ncr_53c400a) + card[0] = BOARD_NCR53C400A; + if (dtc_3181e) + card[0] = BOARD_DTC3181E; + if (hp_c2502) + card[0] = BOARD_HP_C2502; + } -MODULE_DEVICE_TABLE(isapnp, id_table); +#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) + if (!pnp_register_driver(&generic_NCR5380_pnp_driver)) + pnp_registered = 1; #endif + ret = isa_register_driver(&generic_NCR5380_isa_driver, MAX_CARDS); + if (!ret) + isa_registered = 1; + + return (pnp_registered || isa_registered) ? 0 : ret; +} + +static void __exit generic_NCR5380_exit(void) +{ +#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) + if (pnp_registered) + pnp_unregister_driver(&generic_NCR5380_pnp_driver); +#endif + if (isa_registered) + isa_unregister_driver(&generic_NCR5380_isa_driver); +} -__setup("ncr5380=", do_NCR5380_setup); -__setup("ncr53c400=", do_NCR53C400_setup); -__setup("ncr53c400a=", do_NCR53C400A_setup); -__setup("dtc3181e=", do_DTC3181E_setup); +module_init(generic_NCR5380_init); +module_exit(generic_NCR5380_exit); diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index 595177428d76..b175b9234458 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h @@ -14,15 +14,9 @@ #ifndef GENERIC_NCR5380_H #define GENERIC_NCR5380_H -#define __STRVAL(x) #x -#define STRVAL(x) __STRVAL(x) - #ifndef SCSI_G_NCR5380_MEM #define DRV_MODULE_NAME "g_NCR5380" -#define NCR5380_map_type int -#define NCR5380_map_name port - #define NCR5380_read(reg) \ inb(instance->io_port + (reg)) #define NCR5380_write(reg, value) \ @@ -38,8 +32,6 @@ /* therefore SCSI_G_NCR5380_MEM */ #define DRV_MODULE_NAME "g_NCR5380_mmio" -#define NCR5380_map_type unsigned long -#define NCR5380_map_name base #define NCR53C400_mem_base 0x3880 #define NCR53C400_host_buffer 0x3900 #define NCR53C400_region_size 0x3a00 diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 47966909286d..e27b4d4e6ae2 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -63,7 +63,7 @@ config SCSI_UFSHCD_PCI config SCSI_UFS_DWC_TC_PCI tristate "DesignWare pci support using a G210 Test Chip" - depends on SCSI_UFSHCD && PCI + depends on SCSI_UFSHCD_PCI ---help--- Synopsys Test Chip is a PHY for prototyping purposes. diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index ee4ab85e2801..22f881e9253a 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -25,6 +25,7 @@ #define UFS_VENDOR_TOSHIBA 0x198 #define UFS_VENDOR_SAMSUNG 0x1CE +#define UFS_VENDOR_SKHYNIX 0x1AD /** * ufs_device_info - ufs device details @@ -145,6 +146,7 @@ static struct ufs_dev_fix ufs_fixups[] = { UFS_DEVICE_QUIRK_PA_TACTIVATE), UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG", UFS_DEVICE_QUIRK_PA_TACTIVATE), + UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), END_FIX }; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 37f3c51e9d92..05c745663c10 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1266,9 +1266,12 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( 0, query->request.query_func, 0, 0); - /* Data segment length */ - ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD( - 0, 0, len >> 8, (u8)len); + /* Data segment length only need for WRITE_DESC */ + if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) + ucd_req_ptr->header.dword_2 = + UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); + else + ucd_req_ptr->header.dword_2 = 0; /* Copy the Query Request buffer as is */ memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, @@ -6500,6 +6503,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) if (IS_ERR(hba->devfreq)) { dev_err(hba->dev, "Unable to register with devfreq %ld\n", PTR_ERR(hba->devfreq)); + err = PTR_ERR(hba->devfreq); goto out_remove_scsi_host; } /* Suspend devfreq until the UFS device is detected */ diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index fe42a2fdf351..e6e90e80519a 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -1,6 +1,7 @@ menu "SOC (System On Chip) specific Drivers" source "drivers/soc/bcm/Kconfig" +source "drivers/soc/fsl/qbman/Kconfig" source "drivers/soc/fsl/qe/Kconfig" source "drivers/soc/mediatek/Kconfig" source "drivers/soc/qcom/Kconfig" diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile index 203307fd92c1..75e1f5334821 100644 --- a/drivers/soc/fsl/Makefile +++ b/drivers/soc/fsl/Makefile @@ -2,5 +2,6 @@ # Makefile for the Linux Kernel SOC fsl specific device drivers # +obj-$(CONFIG_FSL_DPAA) += qbman/ obj-$(CONFIG_QUICC_ENGINE) += qe/ obj-$(CONFIG_CPM) += qe/ diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig new file mode 100644 index 000000000000..757033c0586c --- /dev/null +++ b/drivers/soc/fsl/qbman/Kconfig @@ -0,0 +1,67 @@ +menuconfig FSL_DPAA + bool "Freescale DPAA 1.x support" + depends on FSL_SOC_BOOKE + select GENERIC_ALLOCATOR + help + The Freescale Data Path Acceleration Architecture (DPAA) is a set of + hardware components on specific QorIQ multicore processors. + This architecture provides the infrastructure to support simplified + sharing of networking interfaces and accelerators by multiple CPUs. + The major h/w blocks composing DPAA are BMan and QMan. + + The Buffer Manager (BMan) is a hardware buffer pool management block + that allows software and accelerators on the datapath to acquire and + release buffers in order to build frames. + + The Queue Manager (QMan) is a hardware queue management block + that allows software and accelerators on the datapath to enqueue and + dequeue frames in order to communicate. + +if FSL_DPAA + +config FSL_DPAA_CHECKING + bool "Additional driver checking" + help + Compiles in additional checks, to sanity-check the drivers and + any use of the exported API. Not recommended for performance. + +config FSL_BMAN_TEST + tristate "BMan self-tests" + help + Compile the BMan self-test code. These tests will + exercise the BMan APIs to confirm functionality + of both the software drivers and hardware device. + +config FSL_BMAN_TEST_API + bool "High-level API self-test" + depends on FSL_BMAN_TEST + default y + help + This requires the presence of cpu-affine portals, and performs + high-level API testing with them (whichever portal(s) are affine + to the cpu(s) the test executes on). + +config FSL_QMAN_TEST + tristate "QMan self-tests" + help + Compile self-test code for QMan. + +config FSL_QMAN_TEST_API + bool "QMan high-level self-test" + depends on FSL_QMAN_TEST + default y + help + This requires the presence of cpu-affine portals, and performs + high-level API testing with them (whichever portal(s) are affine to + the cpu(s) the test executes on). + +config FSL_QMAN_TEST_STASH + bool "QMan 'hot potato' data-stashing self-test" + depends on FSL_QMAN_TEST + default y + help + This performs a "hot potato" style test enqueuing/dequeuing a frame + across a series of FQs scheduled to different portals (and cpus), with + DQRR, data and context stashing always on. + +endif # FSL_DPAA diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile new file mode 100644 index 000000000000..7ae199f1664e --- /dev/null +++ b/drivers/soc/fsl/qbman/Makefile @@ -0,0 +1,12 @@ +obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \ + bman_portal.o qman_portal.o \ + bman.o qman.o + +obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o +bman-test-y = bman_test.o +bman-test-$(CONFIG_FSL_BMAN_TEST_API) += bman_test_api.o + +obj-$(CONFIG_FSL_QMAN_TEST) += qman-test.o +qman-test-y = qman_test.o +qman-test-$(CONFIG_FSL_QMAN_TEST_API) += qman_test_api.o +qman-test-$(CONFIG_FSL_QMAN_TEST_STASH) += qman_test_stash.o diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c new file mode 100644 index 000000000000..ffa48fdbb1a9 --- /dev/null +++ b/drivers/soc/fsl/qbman/bman.c @@ -0,0 +1,797 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_priv.h" + +#define IRQNAME "BMan portal %d" +#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */ + +/* Portal register assists */ + +/* Cache-inhibited register offsets */ +#define BM_REG_RCR_PI_CINH 0x0000 +#define BM_REG_RCR_CI_CINH 0x0004 +#define BM_REG_RCR_ITR 0x0008 +#define BM_REG_CFG 0x0100 +#define BM_REG_SCN(n) (0x0200 + ((n) << 2)) +#define BM_REG_ISR 0x0e00 +#define BM_REG_IER 0x0e04 +#define BM_REG_ISDR 0x0e08 +#define BM_REG_IIR 0x0e0c + +/* Cache-enabled register offsets */ +#define BM_CL_CR 0x0000 +#define BM_CL_RR0 0x0100 +#define BM_CL_RR1 0x0140 +#define BM_CL_RCR 0x1000 +#define BM_CL_RCR_PI_CENA 0x3000 +#define BM_CL_RCR_CI_CENA 0x3100 + +/* + * Portal modes. + * Enum types; + * pmode == production mode + * cmode == consumption mode, + * Enum values use 3 letter codes. First letter matches the portal mode, + * remaining two letters indicate; + * ci == cache-inhibited portal register + * ce == cache-enabled portal register + * vb == in-band valid-bit (cache-enabled) + */ +enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */ + bm_rcr_pci = 0, /* PI index, cache-inhibited */ + bm_rcr_pce = 1, /* PI index, cache-enabled */ + bm_rcr_pvb = 2 /* valid-bit */ +}; +enum bm_rcr_cmode { /* s/w-only */ + bm_rcr_cci, /* CI index, cache-inhibited */ + bm_rcr_cce /* CI index, cache-enabled */ +}; + + +/* --- Portal structures --- */ + +#define BM_RCR_SIZE 8 + +/* Release Command */ +struct bm_rcr_entry { + union { + struct { + u8 _ncw_verb; /* writes to this are non-coherent */ + u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */ + u8 __reserved1[62]; + }; + struct bm_buffer bufs[8]; + }; +}; +#define BM_RCR_VERB_VBIT 0x80 +#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */ +#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20 +#define BM_RCR_VERB_CMD_BPID_MULTI 0x30 +#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */ + +struct bm_rcr { + struct bm_rcr_entry *ring, *cursor; + u8 ci, available, ithresh, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + u32 busy; + enum bm_rcr_pmode pmode; + enum bm_rcr_cmode cmode; +#endif +}; + +/* MC (Management Command) command */ +struct bm_mc_command { + u8 _ncw_verb; /* writes to this are non-coherent */ + u8 bpid; /* used by acquire command */ + u8 __reserved[62]; +}; +#define BM_MCC_VERB_VBIT 0x80 +#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */ +#define BM_MCC_VERB_CMD_ACQUIRE 0x10 +#define BM_MCC_VERB_CMD_QUERY 0x40 +#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */ + +/* MC result, Acquire and Query Response */ +union bm_mc_result { + struct { + u8 verb; + u8 bpid; + u8 __reserved[62]; + }; + struct bm_buffer bufs[8]; +}; +#define BM_MCR_VERB_VBIT 0x80 +#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK +#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE +#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY +#define BM_MCR_VERB_CMD_ERR_INVALID 0x60 +#define BM_MCR_VERB_CMD_ERR_ECC 0x70 +#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */ +#define BM_MCR_TIMEOUT 10000 /* us */ + +struct bm_mc { + struct bm_mc_command *cr; + union bm_mc_result *rr; + u8 rridx, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + enum { + /* Can only be _mc_start()ed */ + mc_idle, + /* Can only be _mc_commit()ed or _mc_abort()ed */ + mc_user, + /* Can only be _mc_retry()ed */ + mc_hw + } state; +#endif +}; + +struct bm_addr { + void __iomem *ce; /* cache-enabled */ + void __iomem *ci; /* cache-inhibited */ +}; + +struct bm_portal { + struct bm_addr addr; + struct bm_rcr rcr; + struct bm_mc mc; +} ____cacheline_aligned; + +/* Cache-inhibited register access. */ +static inline u32 bm_in(struct bm_portal *p, u32 offset) +{ + return __raw_readl(p->addr.ci + offset); +} + +static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) +{ + __raw_writel(val, p->addr.ci + offset); +} + +/* Cache Enabled Portal Access */ +static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset) +{ + dpaa_invalidate(p->addr.ce + offset); +} + +static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) +{ + dpaa_touch_ro(p->addr.ce + offset); +} + +static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) +{ + return __raw_readl(p->addr.ce + offset); +} + +struct bman_portal { + struct bm_portal p; + /* interrupt sources processed by portal_isr(), configurable */ + unsigned long irq_sources; + /* probing time config params for cpu-affine portals */ + const struct bm_portal_config *config; + char irqname[MAX_IRQNAME]; +}; + +static cpumask_t affine_mask; +static DEFINE_SPINLOCK(affine_mask_lock); +static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal); + +static inline struct bman_portal *get_affine_portal(void) +{ + return &get_cpu_var(bman_affine_portal); +} + +static inline void put_affine_portal(void) +{ + put_cpu_var(bman_affine_portal); +} + +/* + * This object type refers to a pool, it isn't *the* pool. There may be + * more than one such object per BMan buffer pool, eg. if different users of the + * pool are operating via different portals. + */ +struct bman_pool { + /* index of the buffer pool to encapsulate (0-63) */ + u32 bpid; + /* Used for hash-table admin when using depletion notifications. */ + struct bman_portal *portal; + struct bman_pool *next; +}; + +static u32 poll_portal_slow(struct bman_portal *p, u32 is); + +static irqreturn_t portal_isr(int irq, void *ptr) +{ + struct bman_portal *p = ptr; + struct bm_portal *portal = &p->p; + u32 clear = p->irq_sources; + u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources; + + if (unlikely(!is)) + return IRQ_NONE; + + clear |= poll_portal_slow(p, is); + bm_out(portal, BM_REG_ISR, clear); + return IRQ_HANDLED; +} + +/* --- RCR API --- */ + +#define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry)) +#define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT) + +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ +static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p) +{ + uintptr_t addr = (uintptr_t)p; + + addr &= ~RCR_CARRY; + + return (struct bm_rcr_entry *)addr; +} + +#ifdef CONFIG_FSL_DPAA_CHECKING +/* Bit-wise logic to convert a ring pointer to a ring index */ +static int rcr_ptr2idx(struct bm_rcr_entry *e) +{ + return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1); +} +#endif + +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ +static inline void rcr_inc(struct bm_rcr *rcr) +{ + /* increment to the next RCR pointer and handle overflow and 'vbit' */ + struct bm_rcr_entry *partial = rcr->cursor + 1; + + rcr->cursor = rcr_carryclear(partial); + if (partial != rcr->cursor) + rcr->vbit ^= BM_RCR_VERB_VBIT; +} + +static int bm_rcr_get_avail(struct bm_portal *portal) +{ + struct bm_rcr *rcr = &portal->rcr; + + return rcr->available; +} + +static int bm_rcr_get_fill(struct bm_portal *portal) +{ + struct bm_rcr *rcr = &portal->rcr; + + return BM_RCR_SIZE - 1 - rcr->available; +} + +static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh) +{ + struct bm_rcr *rcr = &portal->rcr; + + rcr->ithresh = ithresh; + bm_out(portal, BM_REG_RCR_ITR, ithresh); +} + +static void bm_rcr_cce_prefetch(struct bm_portal *portal) +{ + __maybe_unused struct bm_rcr *rcr = &portal->rcr; + + DPAA_ASSERT(rcr->cmode == bm_rcr_cce); + bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA); +} + +static u8 bm_rcr_cce_update(struct bm_portal *portal) +{ + struct bm_rcr *rcr = &portal->rcr; + u8 diff, old_ci = rcr->ci; + + DPAA_ASSERT(rcr->cmode == bm_rcr_cce); + rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1); + bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA); + diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); + rcr->available += diff; + return diff; +} + +static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal) +{ + struct bm_rcr *rcr = &portal->rcr; + + DPAA_ASSERT(!rcr->busy); + if (!rcr->available) + return NULL; +#ifdef CONFIG_FSL_DPAA_CHECKING + rcr->busy = 1; +#endif + dpaa_zero(rcr->cursor); + return rcr->cursor; +} + +static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb) +{ + struct bm_rcr *rcr = &portal->rcr; + struct bm_rcr_entry *rcursor; + + DPAA_ASSERT(rcr->busy); + DPAA_ASSERT(rcr->pmode == bm_rcr_pvb); + DPAA_ASSERT(rcr->available >= 1); + dma_wmb(); + rcursor = rcr->cursor; + rcursor->_ncw_verb = myverb | rcr->vbit; + dpaa_flush(rcursor); + rcr_inc(rcr); + rcr->available--; +#ifdef CONFIG_FSL_DPAA_CHECKING + rcr->busy = 0; +#endif +} + +static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode, + enum bm_rcr_cmode cmode) +{ + struct bm_rcr *rcr = &portal->rcr; + u32 cfg; + u8 pi; + + rcr->ring = portal->addr.ce + BM_CL_RCR; + rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); + pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); + rcr->cursor = rcr->ring + pi; + rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ? + BM_RCR_VERB_VBIT : 0; + rcr->available = BM_RCR_SIZE - 1 + - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi); + rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR); +#ifdef CONFIG_FSL_DPAA_CHECKING + rcr->busy = 0; + rcr->pmode = pmode; + rcr->cmode = cmode; +#endif + cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0) + | (pmode & 0x3); /* BCSP_CFG::RPM */ + bm_out(portal, BM_REG_CFG, cfg); + return 0; +} + +static void bm_rcr_finish(struct bm_portal *portal) +{ +#ifdef CONFIG_FSL_DPAA_CHECKING + struct bm_rcr *rcr = &portal->rcr; + int i; + + DPAA_ASSERT(!rcr->busy); + + i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); + if (i != rcr_ptr2idx(rcr->cursor)) + pr_crit("losing uncommited RCR entries\n"); + + i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); + if (i != rcr->ci) + pr_crit("missing existing RCR completions\n"); + if (rcr->ci != rcr_ptr2idx(rcr->cursor)) + pr_crit("RCR destroyed unquiesced\n"); +#endif +} + +/* --- Management command API --- */ +static int bm_mc_init(struct bm_portal *portal) +{ + struct bm_mc *mc = &portal->mc; + + mc->cr = portal->addr.ce + BM_CL_CR; + mc->rr = portal->addr.ce + BM_CL_RR0; + mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ? + 0 : 1; + mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = mc_idle; +#endif + return 0; +} + +static void bm_mc_finish(struct bm_portal *portal) +{ +#ifdef CONFIG_FSL_DPAA_CHECKING + struct bm_mc *mc = &portal->mc; + + DPAA_ASSERT(mc->state == mc_idle); + if (mc->state != mc_idle) + pr_crit("Losing incomplete MC command\n"); +#endif +} + +static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal) +{ + struct bm_mc *mc = &portal->mc; + + DPAA_ASSERT(mc->state == mc_idle); +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = mc_user; +#endif + dpaa_zero(mc->cr); + return mc->cr; +} + +static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb) +{ + struct bm_mc *mc = &portal->mc; + union bm_mc_result *rr = mc->rr + mc->rridx; + + DPAA_ASSERT(mc->state == mc_user); + dma_wmb(); + mc->cr->_ncw_verb = myverb | mc->vbit; + dpaa_flush(mc->cr); + dpaa_invalidate_touch_ro(rr); +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = mc_hw; +#endif +} + +static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal) +{ + struct bm_mc *mc = &portal->mc; + union bm_mc_result *rr = mc->rr + mc->rridx; + + DPAA_ASSERT(mc->state == mc_hw); + /* + * The inactive response register's verb byte always returns zero until + * its command is submitted and completed. This includes the valid-bit, + * in case you were wondering... + */ + if (!__raw_readb(&rr->verb)) { + dpaa_invalidate_touch_ro(rr); + return NULL; + } + mc->rridx ^= 1; + mc->vbit ^= BM_MCC_VERB_VBIT; +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = mc_idle; +#endif + return rr; +} + +static inline int bm_mc_result_timeout(struct bm_portal *portal, + union bm_mc_result **mcr) +{ + int timeout = BM_MCR_TIMEOUT; + + do { + *mcr = bm_mc_result(portal); + if (*mcr) + break; + udelay(1); + } while (--timeout); + + return timeout; +} + +/* Disable all BSCN interrupts for the portal */ +static void bm_isr_bscn_disable(struct bm_portal *portal) +{ + bm_out(portal, BM_REG_SCN(0), 0); + bm_out(portal, BM_REG_SCN(1), 0); +} + +static int bman_create_portal(struct bman_portal *portal, + const struct bm_portal_config *c) +{ + struct bm_portal *p; + int ret; + + p = &portal->p; + /* + * prep the low-level portal struct with the mapped addresses from the + * config, everything that follows depends on it and "config" is more + * for (de)reference... + */ + p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; + p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; + if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) { + dev_err(c->dev, "RCR initialisation failed\n"); + goto fail_rcr; + } + if (bm_mc_init(p)) { + dev_err(c->dev, "MC initialisation failed\n"); + goto fail_mc; + } + /* + * Default to all BPIDs disabled, we enable as required at + * run-time. + */ + bm_isr_bscn_disable(p); + + /* Write-to-clear any stale interrupt status bits */ + bm_out(p, BM_REG_ISDR, 0xffffffff); + portal->irq_sources = 0; + bm_out(p, BM_REG_IER, 0); + bm_out(p, BM_REG_ISR, 0xffffffff); + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); + if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { + dev_err(c->dev, "request_irq() failed\n"); + goto fail_irq; + } + if (c->cpu != -1 && irq_can_set_affinity(c->irq) && + irq_set_affinity(c->irq, cpumask_of(c->cpu))) { + dev_err(c->dev, "irq_set_affinity() failed\n"); + goto fail_affinity; + } + + /* Need RCR to be empty before continuing */ + ret = bm_rcr_get_fill(p); + if (ret) { + dev_err(c->dev, "RCR unclean\n"); + goto fail_rcr_empty; + } + /* Success */ + portal->config = c; + + bm_out(p, BM_REG_ISDR, 0); + bm_out(p, BM_REG_IIR, 0); + + return 0; + +fail_rcr_empty: +fail_affinity: + free_irq(c->irq, portal); +fail_irq: + bm_mc_finish(p); +fail_mc: + bm_rcr_finish(p); +fail_rcr: + return -EIO; +} + +struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c) +{ + struct bman_portal *portal; + int err; + + portal = &per_cpu(bman_affine_portal, c->cpu); + err = bman_create_portal(portal, c); + if (err) + return NULL; + + spin_lock(&affine_mask_lock); + cpumask_set_cpu(c->cpu, &affine_mask); + spin_unlock(&affine_mask_lock); + + return portal; +} + +static u32 poll_portal_slow(struct bman_portal *p, u32 is) +{ + u32 ret = is; + + if (is & BM_PIRQ_RCRI) { + bm_rcr_cce_update(&p->p); + bm_rcr_set_ithresh(&p->p, 0); + bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI); + is &= ~BM_PIRQ_RCRI; + } + + /* There should be no status register bits left undefined */ + DPAA_ASSERT(!is); + return ret; +} + +int bman_p_irqsource_add(struct bman_portal *p, u32 bits) +{ + unsigned long irqflags; + + local_irq_save(irqflags); + set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); + bm_out(&p->p, BM_REG_IER, p->irq_sources); + local_irq_restore(irqflags); + return 0; +} + +static int bm_shutdown_pool(u32 bpid) +{ + struct bm_mc_command *bm_cmd; + union bm_mc_result *bm_res; + + while (1) { + struct bman_portal *p = get_affine_portal(); + /* Acquire buffers until empty */ + bm_cmd = bm_mc_start(&p->p); + bm_cmd->bpid = bpid; + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1); + if (!bm_mc_result_timeout(&p->p, &bm_res)) { + put_affine_portal(); + pr_crit("BMan Acquire Command timedout\n"); + return -ETIMEDOUT; + } + if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { + put_affine_portal(); + /* Pool is empty */ + return 0; + } + put_affine_portal(); + } + + return 0; +} + +struct gen_pool *bm_bpalloc; + +static int bm_alloc_bpid_range(u32 *result, u32 count) +{ + unsigned long addr; + + addr = gen_pool_alloc(bm_bpalloc, count); + if (!addr) + return -ENOMEM; + + *result = addr & ~DPAA_GENALLOC_OFF; + + return 0; +} + +static int bm_release_bpid(u32 bpid) +{ + int ret; + + ret = bm_shutdown_pool(bpid); + if (ret) { + pr_debug("BPID %d leaked\n", bpid); + return ret; + } + + gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1); + return 0; +} + +struct bman_pool *bman_new_pool(void) +{ + struct bman_pool *pool = NULL; + u32 bpid; + + if (bm_alloc_bpid_range(&bpid, 1)) + return NULL; + + pool = kmalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + goto err; + + pool->bpid = bpid; + + return pool; +err: + bm_release_bpid(bpid); + kfree(pool); + return NULL; +} +EXPORT_SYMBOL(bman_new_pool); + +void bman_free_pool(struct bman_pool *pool) +{ + bm_release_bpid(pool->bpid); + + kfree(pool); +} +EXPORT_SYMBOL(bman_free_pool); + +int bman_get_bpid(const struct bman_pool *pool) +{ + return pool->bpid; +} +EXPORT_SYMBOL(bman_get_bpid); + +static void update_rcr_ci(struct bman_portal *p, int avail) +{ + if (avail) + bm_rcr_cce_prefetch(&p->p); + else + bm_rcr_cce_update(&p->p); +} + +int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num) +{ + struct bman_portal *p; + struct bm_rcr_entry *r; + unsigned long irqflags; + int avail, timeout = 1000; /* 1ms */ + int i = num - 1; + + DPAA_ASSERT(num > 0 && num <= 8); + + do { + p = get_affine_portal(); + local_irq_save(irqflags); + avail = bm_rcr_get_avail(&p->p); + if (avail < 2) + update_rcr_ci(p, avail); + r = bm_rcr_start(&p->p); + local_irq_restore(irqflags); + put_affine_portal(); + if (likely(r)) + break; + + udelay(1); + } while (--timeout); + + if (unlikely(!timeout)) + return -ETIMEDOUT; + + p = get_affine_portal(); + local_irq_save(irqflags); + /* + * we can copy all but the first entry, as this can trigger badness + * with the valid-bit + */ + bm_buffer_set64(r->bufs, bm_buffer_get64(bufs)); + bm_buffer_set_bpid(r->bufs, pool->bpid); + if (i) + memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); + + bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | + (num & BM_RCR_VERB_BUFCOUNT_MASK)); + + local_irq_restore(irqflags); + put_affine_portal(); + return 0; +} +EXPORT_SYMBOL(bman_release); + +int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num) +{ + struct bman_portal *p = get_affine_portal(); + struct bm_mc_command *mcc; + union bm_mc_result *mcr; + int ret; + + DPAA_ASSERT(num > 0 && num <= 8); + + mcc = bm_mc_start(&p->p); + mcc->bpid = pool->bpid; + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | + (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); + if (!bm_mc_result_timeout(&p->p, &mcr)) { + put_affine_portal(); + pr_crit("BMan Acquire Timeout\n"); + return -ETIMEDOUT; + } + ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; + if (bufs) + memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0])); + + put_affine_portal(); + if (ret != num) + ret = -ENOMEM; + return ret; +} +EXPORT_SYMBOL(bman_acquire); + +const struct bm_portal_config * +bman_get_bm_portal_config(const struct bman_portal *portal) +{ + return portal->config; +} diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c new file mode 100644 index 000000000000..9deb0524543f --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_ccsr.c @@ -0,0 +1,263 @@ +/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_priv.h" + +u16 bman_ip_rev; +EXPORT_SYMBOL(bman_ip_rev); + +/* Register offsets */ +#define REG_FBPR_FPC 0x0800 +#define REG_ECSR 0x0a00 +#define REG_ECIR 0x0a04 +#define REG_EADR 0x0a08 +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) +#define REG_IP_REV_1 0x0bf8 +#define REG_IP_REV_2 0x0bfc +#define REG_FBPR_BARE 0x0c00 +#define REG_FBPR_BAR 0x0c04 +#define REG_FBPR_AR 0x0c10 +#define REG_SRCIDR 0x0d04 +#define REG_LIODNR 0x0d08 +#define REG_ERR_ISR 0x0e00 +#define REG_ERR_IER 0x0e04 +#define REG_ERR_ISDR 0x0e08 + +/* Used by all error interrupt registers except 'inhibit' */ +#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */ +#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */ +#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */ +#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */ +#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */ + +struct bman_hwerr_txt { + u32 mask; + const char *txt; +}; + +static const struct bman_hwerr_txt bman_hwerr_txts[] = { + { BM_EIRQ_IVCI, "Invalid Command Verb" }, + { BM_EIRQ_FLWI, "FBPR Low Watermark" }, + { BM_EIRQ_MBEI, "Multi-bit ECC Error" }, + { BM_EIRQ_SBEI, "Single-bit ECC Error" }, + { BM_EIRQ_BSCN, "Pool State Change Notification" }, +}; + +/* Only trigger low water mark interrupt once only */ +#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI + +/* Pointer to the start of the BMan's CCSR space */ +static u32 __iomem *bm_ccsr_start; + +static inline u32 bm_ccsr_in(u32 offset) +{ + return ioread32be(bm_ccsr_start + offset/4); +} +static inline void bm_ccsr_out(u32 offset, u32 val) +{ + iowrite32be(val, bm_ccsr_start + offset/4); +} + +static void bm_get_version(u16 *id, u8 *major, u8 *minor) +{ + u32 v = bm_ccsr_in(REG_IP_REV_1); + *id = (v >> 16); + *major = (v >> 8) & 0xff; + *minor = v & 0xff; +} + +/* signal transactions for FBPRs with higher priority */ +#define FBPR_AR_RPRIO_HI BIT(30) + +static void bm_set_memory(u64 ba, u32 size) +{ + u32 exp = ilog2(size); + /* choke if size isn't within range */ + DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 && + is_power_of_2(size)); + /* choke if '[e]ba' has lower-alignment than 'size' */ + DPAA_ASSERT(!(ba & (size - 1))); + bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba)); + bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba)); + bm_ccsr_out(REG_FBPR_AR, exp - 1); +} + +/* + * Location and size of BMan private memory + * + * Ideally we would use the DMA API to turn rmem->base into a DMA address + * (especially if iommu translations ever get involved). Unfortunately, the + * DMA API currently does not allow mapping anything that is not backed with + * a struct page. + */ +static dma_addr_t fbpr_a; +static size_t fbpr_sz; + +static int bman_fbpr(struct reserved_mem *rmem) +{ + fbpr_a = rmem->base; + fbpr_sz = rmem->size; + + WARN_ON(!(fbpr_a && fbpr_sz)); + + return 0; +} +RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr); + +static irqreturn_t bman_isr(int irq, void *ptr) +{ + u32 isr_val, ier_val, ecsr_val, isr_mask, i; + struct device *dev = ptr; + + ier_val = bm_ccsr_in(REG_ERR_IER); + isr_val = bm_ccsr_in(REG_ERR_ISR); + ecsr_val = bm_ccsr_in(REG_ECSR); + isr_mask = isr_val & ier_val; + + if (!isr_mask) + return IRQ_NONE; + + for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) { + if (bman_hwerr_txts[i].mask & isr_mask) { + dev_err_ratelimited(dev, "ErrInt: %s\n", + bman_hwerr_txts[i].txt); + if (bman_hwerr_txts[i].mask & ecsr_val) { + /* Re-arm error capture registers */ + bm_ccsr_out(REG_ECSR, ecsr_val); + } + if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) { + dev_dbg(dev, "Disabling error 0x%x\n", + bman_hwerr_txts[i].mask); + ier_val &= ~bman_hwerr_txts[i].mask; + bm_ccsr_out(REG_ERR_IER, ier_val); + } + } + } + bm_ccsr_out(REG_ERR_ISR, isr_val); + + return IRQ_HANDLED; +} + +static int fsl_bman_probe(struct platform_device *pdev) +{ + int ret, err_irq; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + u16 id, bm_pool_cnt; + u8 major, minor; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n", + node->full_name); + return -ENXIO; + } + bm_ccsr_start = devm_ioremap(dev, res->start, + res->end - res->start + 1); + if (!bm_ccsr_start) + return -ENXIO; + + bm_get_version(&id, &major, &minor); + if (major == 1 && minor == 0) { + bman_ip_rev = BMAN_REV10; + bm_pool_cnt = BM_POOL_MAX; + } else if (major == 2 && minor == 0) { + bman_ip_rev = BMAN_REV20; + bm_pool_cnt = 8; + } else if (major == 2 && minor == 1) { + bman_ip_rev = BMAN_REV21; + bm_pool_cnt = BM_POOL_MAX; + } else { + dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n", + id, major, minor); + return -ENODEV; + } + + bm_set_memory(fbpr_a, fbpr_sz); + + err_irq = platform_get_irq(pdev, 0); + if (err_irq <= 0) { + dev_info(dev, "Can't get %s IRQ\n", node->full_name); + return -ENODEV; + } + ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err", + dev); + if (ret) { + dev_err(dev, "devm_request_irq() failed %d for '%s'\n", + ret, node->full_name); + return ret; + } + /* Disable Buffer Pool State Change */ + bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN); + /* + * Write-to-clear any stale bits, (eg. starvation being asserted prior + * to resource allocation during driver init). + */ + bm_ccsr_out(REG_ERR_ISR, 0xffffffff); + /* Enable Error Interrupts */ + bm_ccsr_out(REG_ERR_IER, 0xffffffff); + + bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc"); + if (IS_ERR(bm_bpalloc)) { + ret = PTR_ERR(bm_bpalloc); + dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret); + return ret; + } + + /* seed BMan resource pool */ + ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1); + if (ret) { + dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n", + 0, bm_pool_cnt - 1, ret); + return ret; + } + + return 0; +}; + +static const struct of_device_id fsl_bman_ids[] = { + { + .compatible = "fsl,bman", + }, + {} +}; + +static struct platform_driver fsl_bman_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = fsl_bman_ids, + .suppress_bind_attrs = true, + }, + .probe = fsl_bman_probe, +}; + +builtin_platform_driver(fsl_bman_driver); diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c new file mode 100644 index 000000000000..6579cc18811a --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_portal.c @@ -0,0 +1,219 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_priv.h" + +static struct bman_portal *affine_bportals[NR_CPUS]; +static struct cpumask portal_cpus; +/* protect bman global registers and global data shared among portals */ +static DEFINE_SPINLOCK(bman_lock); + +static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg) +{ + struct bman_portal *p = bman_create_affine_portal(pcfg); + + if (!p) { + dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n", + __func__, pcfg->cpu); + return NULL; + } + + bman_p_irqsource_add(p, BM_PIRQ_RCRI); + affine_bportals[pcfg->cpu] = p; + + dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); + + return p; +} + +static void bman_offline_cpu(unsigned int cpu) +{ + struct bman_portal *p = affine_bportals[cpu]; + const struct bm_portal_config *pcfg; + + if (!p) + return; + + pcfg = bman_get_bm_portal_config(p); + if (!pcfg) + return; + + irq_set_affinity(pcfg->irq, cpumask_of(0)); +} + +static void bman_online_cpu(unsigned int cpu) +{ + struct bman_portal *p = affine_bportals[cpu]; + const struct bm_portal_config *pcfg; + + if (!p) + return; + + pcfg = bman_get_bm_portal_config(p); + if (!pcfg) + return; + + irq_set_affinity(pcfg->irq, cpumask_of(cpu)); +} + +static int bman_hotplug_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + bman_online_cpu(cpu); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + bman_offline_cpu(cpu); + } + + return NOTIFY_OK; +} + +static struct notifier_block bman_hotplug_cpu_notifier = { + .notifier_call = bman_hotplug_cpu_callback, +}; + +static int bman_portal_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct bm_portal_config *pcfg; + struct resource *addr_phys[2]; + void __iomem *va; + int irq, cpu; + + pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); + if (!pcfg) + return -ENOMEM; + + pcfg->dev = dev; + + addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM, + DPAA_PORTAL_CE); + if (!addr_phys[0]) { + dev_err(dev, "Can't get %s property 'reg::CE'\n", + node->full_name); + return -ENXIO; + } + + addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, + DPAA_PORTAL_CI); + if (!addr_phys[1]) { + dev_err(dev, "Can't get %s property 'reg::CI'\n", + node->full_name); + return -ENXIO; + } + + pcfg->cpu = -1; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev, "Can't get %s IRQ'\n", node->full_name); + return -ENXIO; + } + pcfg->irq = irq; + + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); + if (!va) + goto err_ioremap1; + + pcfg->addr_virt[DPAA_PORTAL_CE] = va; + + va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), + _PAGE_GUARDED | _PAGE_NO_CACHE); + if (!va) + goto err_ioremap2; + + pcfg->addr_virt[DPAA_PORTAL_CI] = va; + + spin_lock(&bman_lock); + cpu = cpumask_next_zero(-1, &portal_cpus); + if (cpu >= nr_cpu_ids) { + /* unassigned portal, skip init */ + spin_unlock(&bman_lock); + return 0; + } + + cpumask_set_cpu(cpu, &portal_cpus); + spin_unlock(&bman_lock); + pcfg->cpu = cpu; + + if (!init_pcfg(pcfg)) + goto err_ioremap2; + + /* clear irq affinity if assigned cpu is offline */ + if (!cpu_online(cpu)) + bman_offline_cpu(cpu); + + return 0; + +err_ioremap2: + iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); +err_ioremap1: + dev_err(dev, "ioremap failed\n"); + return -ENXIO; +} + +static const struct of_device_id bman_portal_ids[] = { + { + .compatible = "fsl,bman-portal", + }, + {} +}; +MODULE_DEVICE_TABLE(of, bman_portal_ids); + +static struct platform_driver bman_portal_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = bman_portal_ids, + }, + .probe = bman_portal_probe, +}; + +static int __init bman_portal_driver_register(struct platform_driver *drv) +{ + int ret; + + ret = platform_driver_register(drv); + if (ret < 0) + return ret; + + register_hotcpu_notifier(&bman_hotplug_cpu_notifier); + + return 0; +} + +module_driver(bman_portal_driver, + bman_portal_driver_register, platform_driver_unregister); diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h new file mode 100644 index 000000000000..f6896a2f6d90 --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_priv.h @@ -0,0 +1,80 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "dpaa_sys.h" + +#include <soc/fsl/bman.h> + +/* Portal processing (interrupt) sources */ +#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */ + +/* Revision info (for errata and feature handling) */ +#define BMAN_REV10 0x0100 +#define BMAN_REV20 0x0200 +#define BMAN_REV21 0x0201 +extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */ + +extern struct gen_pool *bm_bpalloc; + +struct bm_portal_config { + /* + * Corenet portal addresses; + * [0]==cache-enabled, [1]==cache-inhibited. + */ + void __iomem *addr_virt[2]; + /* Allow these to be joined in lists */ + struct list_head list; + struct device *dev; + /* User-visible portal configuration settings */ + /* portal is affined to this cpu */ + int cpu; + /* portal interrupt line */ + int irq; +}; + +struct bman_portal *bman_create_affine_portal( + const struct bm_portal_config *config); +/* + * The below bman_p_***() variant might be called in a situation that the cpu + * which the portal affine to is not online yet. + * @bman_portal specifies which portal the API will use. + */ +int bman_p_irqsource_add(struct bman_portal *p, u32 bits); + +/* + * Used by all portal interrupt registers except 'inhibit' + * This mask contains all the "irqsource" bits visible to API users + */ +#define BM_PIRQ_VISIBLE BM_PIRQ_RCRI + +const struct bm_portal_config * +bman_get_bm_portal_config(const struct bman_portal *portal); diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c new file mode 100644 index 000000000000..09b1c960b26a --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_test.c @@ -0,0 +1,53 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_test.h" + +MODULE_AUTHOR("Geoff Thorpe"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("BMan testing"); + +static int test_init(void) +{ +#ifdef CONFIG_FSL_BMAN_TEST_API + int loop = 1; + + while (loop--) + bman_test_api(); +#endif + return 0; +} + +static void test_exit(void) +{ +} + +module_init(test_init); +module_exit(test_exit); diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h new file mode 100644 index 000000000000..037ed342add4 --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_test.h @@ -0,0 +1,35 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_priv.h" + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +void bman_test_api(void); diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c new file mode 100644 index 000000000000..6f6bdd154fe3 --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_test_api.c @@ -0,0 +1,151 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_test.h" + +#define NUM_BUFS 93 +#define LOOPS 3 +#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU + +static struct bman_pool *pool; +static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned; +static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned; +static int bufs_received; + +static void bufs_init(void) +{ + int i; + + for (i = 0; i < NUM_BUFS; i++) + bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i); + bufs_received = 0; +} + +static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b) +{ + if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) { + + /* + * On SoCs with BMan revison 2.0, BMan only respects the 40 + * LS-bits of buffer addresses, masking off the upper 8-bits on + * release commands. The API provides for 48-bit addresses + * because some SoCs support all 48-bits. When generating + * garbage addresses for testing, we either need to zero the + * upper 8-bits when releasing to BMan (otherwise we'll be + * disappointed when the buffers we acquire back from BMan + * don't match), or we need to mask the upper 8-bits off when + * comparing. We do the latter. + */ + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) < + (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) + return -1; + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) > + (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) + return 1; + } else { + if (bm_buffer_get64(a) < bm_buffer_get64(b)) + return -1; + if (bm_buffer_get64(a) > bm_buffer_get64(b)) + return 1; + } + + return 0; +} + +static void bufs_confirm(void) +{ + int i, j; + + for (i = 0; i < NUM_BUFS; i++) { + int matches = 0; + + for (j = 0; j < NUM_BUFS; j++) + if (!bufs_cmp(&bufs_in[i], &bufs_out[j])) + matches++; + WARN_ON(matches != 1); + } +} + +/* test */ +void bman_test_api(void) +{ + int i, loops = LOOPS; + + bufs_init(); + + pr_info("%s(): Starting\n", __func__); + + pool = bman_new_pool(); + if (!pool) { + pr_crit("bman_new_pool() failed\n"); + goto failed; + } + + /* Release buffers */ +do_loop: + i = 0; + while (i < NUM_BUFS) { + int num = 8; + + if (i + num > NUM_BUFS) + num = NUM_BUFS - i; + if (bman_release(pool, bufs_in + i, num)) { + pr_crit("bman_release() failed\n"); + goto failed; + } + i += num; + } + + /* Acquire buffers */ + while (i > 0) { + int tmp, num = 8; + + if (num > i) + num = i; + tmp = bman_acquire(pool, bufs_out + i - num, num); + WARN_ON(tmp != num); + i -= num; + } + i = bman_acquire(pool, NULL, 1); + WARN_ON(i > 0); + + bufs_confirm(); + + if (--loops) + goto do_loop; + + /* Clean up */ + bman_free_pool(pool); + pr_info("%s(): Finished\n", __func__); + return; + +failed: + WARN_ON(1); +} diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h new file mode 100644 index 000000000000..b63fd72295c6 --- /dev/null +++ b/drivers/soc/fsl/qbman/dpaa_sys.h @@ -0,0 +1,103 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DPAA_SYS_H +#define __DPAA_SYS_H + +#include <linux/cpu.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/kthread.h> +#include <linux/vmalloc.h> +#include <linux/platform_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/prefetch.h> +#include <linux/genalloc.h> +#include <asm/cacheflush.h> + +/* For 2-element tables related to cache-inhibited and cache-enabled mappings */ +#define DPAA_PORTAL_CE 0 +#define DPAA_PORTAL_CI 1 + +#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64) +#error "Unsupported Cacheline Size" +#endif + +static inline void dpaa_flush(void *p) +{ +#ifdef CONFIG_PPC + flush_dcache_range((unsigned long)p, (unsigned long)p+64); +#elif defined(CONFIG_ARM32) + __cpuc_flush_dcache_area(p, 64); +#elif defined(CONFIG_ARM64) + __flush_dcache_area(p, 64); +#endif +} + +#define dpaa_invalidate(p) dpaa_flush(p) + +#define dpaa_zero(p) memset(p, 0, 64) + +static inline void dpaa_touch_ro(void *p) +{ +#if (L1_CACHE_BYTES == 32) + prefetch(p+32); +#endif + prefetch(p); +} + +/* Commonly used combo */ +static inline void dpaa_invalidate_touch_ro(void *p) +{ + dpaa_invalidate(p); + dpaa_touch_ro(p); +} + + +#ifdef CONFIG_FSL_DPAA_CHECKING +#define DPAA_ASSERT(x) WARN_ON(!(x)) +#else +#define DPAA_ASSERT(x) +#endif + +/* cyclic helper for rings */ +static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last) +{ + /* 'first' is included, 'last' is excluded */ + if (first <= last) + return last - first; + return ringsize + last - first; +} + +/* Offset applied to genalloc pools due to zero being an error return */ +#define DPAA_GENALLOC_OFF 0x80000000 + +#endif /* __DPAA_SYS_H */ diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c new file mode 100644 index 000000000000..119054bc922b --- /dev/null +++ b/drivers/soc/fsl/qbman/qman.c @@ -0,0 +1,2881 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_priv.h" + +#define DQRR_MAXFILL 15 +#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ +#define IRQNAME "QMan portal %d" +#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ +#define QMAN_POLL_LIMIT 32 +#define QMAN_PIRQ_DQRR_ITHRESH 12 +#define QMAN_PIRQ_MR_ITHRESH 4 +#define QMAN_PIRQ_IPERIOD 100 + +/* Portal register assists */ + +/* Cache-inhibited register offsets */ +#define QM_REG_EQCR_PI_CINH 0x0000 +#define QM_REG_EQCR_CI_CINH 0x0004 +#define QM_REG_EQCR_ITR 0x0008 +#define QM_REG_DQRR_PI_CINH 0x0040 +#define QM_REG_DQRR_CI_CINH 0x0044 +#define QM_REG_DQRR_ITR 0x0048 +#define QM_REG_DQRR_DCAP 0x0050 +#define QM_REG_DQRR_SDQCR 0x0054 +#define QM_REG_DQRR_VDQCR 0x0058 +#define QM_REG_DQRR_PDQCR 0x005c +#define QM_REG_MR_PI_CINH 0x0080 +#define QM_REG_MR_CI_CINH 0x0084 +#define QM_REG_MR_ITR 0x0088 +#define QM_REG_CFG 0x0100 +#define QM_REG_ISR 0x0e00 +#define QM_REG_IER 0x0e04 +#define QM_REG_ISDR 0x0e08 +#define QM_REG_IIR 0x0e0c +#define QM_REG_ITPR 0x0e14 + +/* Cache-enabled register offsets */ +#define QM_CL_EQCR 0x0000 +#define QM_CL_DQRR 0x1000 +#define QM_CL_MR 0x2000 +#define QM_CL_EQCR_PI_CENA 0x3000 +#define QM_CL_EQCR_CI_CENA 0x3100 +#define QM_CL_DQRR_PI_CENA 0x3200 +#define QM_CL_DQRR_CI_CENA 0x3300 +#define QM_CL_MR_PI_CENA 0x3400 +#define QM_CL_MR_CI_CENA 0x3500 +#define QM_CL_CR 0x3800 +#define QM_CL_RR0 0x3900 +#define QM_CL_RR1 0x3940 + +/* + * BTW, the drivers (and h/w programming model) already obtain the required + * synchronisation for portal accesses and data-dependencies. Use of barrier()s + * or other order-preserving primitives simply degrade performance. Hence the + * use of the __raw_*() interfaces, which simply ensure that the compiler treats + * the portal registers as volatile + */ + +/* Cache-enabled ring access */ +#define qm_cl(base, idx) ((void *)base + ((idx) << 6)) + +/* + * Portal modes. + * Enum types; + * pmode == production mode + * cmode == consumption mode, + * dmode == h/w dequeue mode. + * Enum values use 3 letter codes. First letter matches the portal mode, + * remaining two letters indicate; + * ci == cache-inhibited portal register + * ce == cache-enabled portal register + * vb == in-band valid-bit (cache-enabled) + * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only + * As for "enum qm_dqrr_dmode", it should be self-explanatory. + */ +enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ + qm_eqcr_pci = 0, /* PI index, cache-inhibited */ + qm_eqcr_pce = 1, /* PI index, cache-enabled */ + qm_eqcr_pvb = 2 /* valid-bit */ +}; +enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ + qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ + qm_dqrr_dpull = 1 /* PDQCR */ +}; +enum qm_dqrr_pmode { /* s/w-only */ + qm_dqrr_pci, /* reads DQRR_PI_CINH */ + qm_dqrr_pce, /* reads DQRR_PI_CENA */ + qm_dqrr_pvb /* reads valid-bit */ +}; +enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ + qm_dqrr_cci = 0, /* CI index, cache-inhibited */ + qm_dqrr_cce = 1, /* CI index, cache-enabled */ + qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */ +}; +enum qm_mr_pmode { /* s/w-only */ + qm_mr_pci, /* reads MR_PI_CINH */ + qm_mr_pce, /* reads MR_PI_CENA */ + qm_mr_pvb /* reads valid-bit */ +}; +enum qm_mr_cmode { /* matches QCSP_CFG::MM */ + qm_mr_cci = 0, /* CI index, cache-inhibited */ + qm_mr_cce = 1 /* CI index, cache-enabled */ +}; + +/* --- Portal structures --- */ + +#define QM_EQCR_SIZE 8 +#define QM_DQRR_SIZE 16 +#define QM_MR_SIZE 8 + +/* "Enqueue Command" */ +struct qm_eqcr_entry { + u8 _ncw_verb; /* writes to this are non-coherent */ + u8 dca; + u16 seqnum; + u32 orp; /* 24-bit */ + u32 fqid; /* 24-bit */ + u32 tag; + struct qm_fd fd; + u8 __reserved3[32]; +} __packed; +#define QM_EQCR_VERB_VBIT 0x80 +#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ +#define QM_EQCR_VERB_CMD_ENQUEUE 0x01 +#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */ +#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */ +#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */ + +struct qm_eqcr { + struct qm_eqcr_entry *ring, *cursor; + u8 ci, available, ithresh, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + u32 busy; + enum qm_eqcr_pmode pmode; +#endif +}; + +struct qm_dqrr { + const struct qm_dqrr_entry *ring, *cursor; + u8 pi, ci, fill, ithresh, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + enum qm_dqrr_dmode dmode; + enum qm_dqrr_pmode pmode; + enum qm_dqrr_cmode cmode; +#endif +}; + +struct qm_mr { + union qm_mr_entry *ring, *cursor; + u8 pi, ci, fill, ithresh, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + enum qm_mr_pmode pmode; + enum qm_mr_cmode cmode; +#endif +}; + +/* MC (Management Command) command */ +/* "Query FQ" */ +struct qm_mcc_queryfq { + u8 _ncw_verb; + u8 __reserved1[3]; + u32 fqid; /* 24-bit */ + u8 __reserved2[56]; +} __packed; +/* "Alter FQ State Commands " */ +struct qm_mcc_alterfq { + u8 _ncw_verb; + u8 __reserved1[3]; + u32 fqid; /* 24-bit */ + u8 __reserved2; + u8 count; /* number of consecutive FQID */ + u8 __reserved3[10]; + u32 context_b; /* frame queue context b */ + u8 __reserved4[40]; +} __packed; + +/* "Query CGR" */ +struct qm_mcc_querycgr { + u8 _ncw_verb; + u8 __reserved1[30]; + u8 cgid; + u8 __reserved2[32]; +}; + +struct qm_mcc_querywq { + u8 _ncw_verb; + u8 __reserved; + /* select channel if verb != QUERYWQ_DEDICATED */ + u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ + u8 __reserved2[60]; +} __packed; + +#define QM_MCC_VERB_VBIT 0x80 +#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ +#define QM_MCC_VERB_INITFQ_PARKED 0x40 +#define QM_MCC_VERB_INITFQ_SCHED 0x41 +#define QM_MCC_VERB_QUERYFQ 0x44 +#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */ +#define QM_MCC_VERB_QUERYWQ 0x46 +#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47 +#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */ +#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */ +#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */ +#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */ +#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */ +#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */ +#define QM_MCC_VERB_INITCGR 0x50 +#define QM_MCC_VERB_MODIFYCGR 0x51 +#define QM_MCC_VERB_CGRTESTWRITE 0x52 +#define QM_MCC_VERB_QUERYCGR 0x58 +#define QM_MCC_VERB_QUERYCONGESTION 0x59 +union qm_mc_command { + struct { + u8 _ncw_verb; /* writes to this are non-coherent */ + u8 __reserved[63]; + }; + struct qm_mcc_initfq initfq; + struct qm_mcc_queryfq queryfq; + struct qm_mcc_alterfq alterfq; + struct qm_mcc_initcgr initcgr; + struct qm_mcc_querycgr querycgr; + struct qm_mcc_querywq querywq; + struct qm_mcc_queryfq_np queryfq_np; +}; + +/* MC (Management Command) result */ +/* "Query FQ" */ +struct qm_mcr_queryfq { + u8 verb; + u8 result; + u8 __reserved1[8]; + struct qm_fqd fqd; /* the FQD fields are here */ + u8 __reserved2[30]; +} __packed; + +/* "Alter FQ State Commands" */ +struct qm_mcr_alterfq { + u8 verb; + u8 result; + u8 fqs; /* Frame Queue Status */ + u8 __reserved1[61]; +}; +#define QM_MCR_VERB_RRID 0x80 +#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK +#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED +#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED +#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ +#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP +#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ +#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED +#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED +#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE +#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE +#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS +#define QM_MCR_RESULT_NULL 0x00 +#define QM_MCR_RESULT_OK 0xf0 +#define QM_MCR_RESULT_ERR_FQID 0xf1 +#define QM_MCR_RESULT_ERR_FQSTATE 0xf2 +#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ +#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 +#define QM_MCR_RESULT_PENDING 0xf8 +#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff +#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ +#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ +#define QM_MCR_TIMEOUT 10000 /* us */ +union qm_mc_result { + struct { + u8 verb; + u8 result; + u8 __reserved1[62]; + }; + struct qm_mcr_queryfq queryfq; + struct qm_mcr_alterfq alterfq; + struct qm_mcr_querycgr querycgr; + struct qm_mcr_querycongestion querycongestion; + struct qm_mcr_querywq querywq; + struct qm_mcr_queryfq_np queryfq_np; +}; + +struct qm_mc { + union qm_mc_command *cr; + union qm_mc_result *rr; + u8 rridx, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + enum { + /* Can be _mc_start()ed */ + qman_mc_idle, + /* Can be _mc_commit()ed or _mc_abort()ed */ + qman_mc_user, + /* Can only be _mc_retry()ed */ + qman_mc_hw + } state; +#endif +}; + +struct qm_addr { + void __iomem *ce; /* cache-enabled */ + void __iomem *ci; /* cache-inhibited */ +}; + +struct qm_portal { + /* + * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to + * and including 'mc' fits within a cacheline (yay!). The 'config' part + * is setup-only, so isn't a cause for a concern. In other words, don't + * rearrange this structure on a whim, there be dragons ... + */ + struct qm_addr addr; + struct qm_eqcr eqcr; + struct qm_dqrr dqrr; + struct qm_mr mr; + struct qm_mc mc; +} ____cacheline_aligned; + +/* Cache-inhibited register access. */ +static inline u32 qm_in(struct qm_portal *p, u32 offset) +{ + return __raw_readl(p->addr.ci + offset); +} + +static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) +{ + __raw_writel(val, p->addr.ci + offset); +} + +/* Cache Enabled Portal Access */ +static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset) +{ + dpaa_invalidate(p->addr.ce + offset); +} + +static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) +{ + dpaa_touch_ro(p->addr.ce + offset); +} + +static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) +{ + return __raw_readl(p->addr.ce + offset); +} + +/* --- EQCR API --- */ + +#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry)) +#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT) + +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ +static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p) +{ + uintptr_t addr = (uintptr_t)p; + + addr &= ~EQCR_CARRY; + + return (struct qm_eqcr_entry *)addr; +} + +/* Bit-wise logic to convert a ring pointer to a ring index */ +static int eqcr_ptr2idx(struct qm_eqcr_entry *e) +{ + return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1); +} + +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ +static inline void eqcr_inc(struct qm_eqcr *eqcr) +{ + /* increment to the next EQCR pointer and handle overflow and 'vbit' */ + struct qm_eqcr_entry *partial = eqcr->cursor + 1; + + eqcr->cursor = eqcr_carryclear(partial); + if (partial != eqcr->cursor) + eqcr->vbit ^= QM_EQCR_VERB_VBIT; +} + +static inline int qm_eqcr_init(struct qm_portal *portal, + enum qm_eqcr_pmode pmode, + unsigned int eq_stash_thresh, + int eq_stash_prio) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + u32 cfg; + u8 pi; + + eqcr->ring = portal->addr.ce + QM_CL_EQCR; + eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); + qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); + pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); + eqcr->cursor = eqcr->ring + pi; + eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ? + QM_EQCR_VERB_VBIT : 0; + eqcr->available = QM_EQCR_SIZE - 1 - + dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); + eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR); +#ifdef CONFIG_FSL_DPAA_CHECKING + eqcr->busy = 0; + eqcr->pmode = pmode; +#endif + cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) | + (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ + (eq_stash_prio << 26) | /* QCSP_CFG: EP */ + ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ + qm_out(portal, QM_REG_CFG, cfg); + return 0; +} + +static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal) +{ + return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7; +} + +static inline void qm_eqcr_finish(struct qm_portal *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); + u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); + + DPAA_ASSERT(!eqcr->busy); + if (pi != eqcr_ptr2idx(eqcr->cursor)) + pr_crit("losing uncommited EQCR entries\n"); + if (ci != eqcr->ci) + pr_crit("missing existing EQCR completions\n"); + if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) + pr_crit("EQCR destroyed unquiesced\n"); +} + +static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal + *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + + DPAA_ASSERT(!eqcr->busy); + if (!eqcr->available) + return NULL; + +#ifdef CONFIG_FSL_DPAA_CHECKING + eqcr->busy = 1; +#endif + dpaa_zero(eqcr->cursor); + return eqcr->cursor; +} + +static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal + *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + u8 diff, old_ci; + + DPAA_ASSERT(!eqcr->busy); + if (!eqcr->available) { + old_ci = eqcr->ci; + eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & + (QM_EQCR_SIZE - 1); + diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); + eqcr->available += diff; + if (!diff) + return NULL; + } +#ifdef CONFIG_FSL_DPAA_CHECKING + eqcr->busy = 1; +#endif + dpaa_zero(eqcr->cursor); + return eqcr->cursor; +} + +static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) +{ + DPAA_ASSERT(eqcr->busy); + DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); + DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); + DPAA_ASSERT(eqcr->available >= 1); +} + +static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + struct qm_eqcr_entry *eqcursor; + + eqcr_commit_checks(eqcr); + DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb); + dma_wmb(); + eqcursor = eqcr->cursor; + eqcursor->_ncw_verb = myverb | eqcr->vbit; + dpaa_flush(eqcursor); + eqcr_inc(eqcr); + eqcr->available--; +#ifdef CONFIG_FSL_DPAA_CHECKING + eqcr->busy = 0; +#endif +} + +static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) +{ + qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA); +} + +static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + u8 diff, old_ci = eqcr->ci; + + eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1); + qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); + diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); + eqcr->available += diff; + return diff; +} + +static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + + eqcr->ithresh = ithresh; + qm_out(portal, QM_REG_EQCR_ITR, ithresh); +} + +static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + + return eqcr->available; +} + +static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + + return QM_EQCR_SIZE - 1 - eqcr->available; +} + +/* --- DQRR API --- */ + +#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry)) +#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT) + +static const struct qm_dqrr_entry *dqrr_carryclear( + const struct qm_dqrr_entry *p) +{ + uintptr_t addr = (uintptr_t)p; + + addr &= ~DQRR_CARRY; + + return (const struct qm_dqrr_entry *)addr; +} + +static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e) +{ + return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1); +} + +static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e) +{ + return dqrr_carryclear(e + 1); +} + +static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) +{ + qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) | + ((mf & (QM_DQRR_SIZE - 1)) << 20)); +} + +static inline int qm_dqrr_init(struct qm_portal *portal, + const struct qm_portal_config *config, + enum qm_dqrr_dmode dmode, + enum qm_dqrr_pmode pmode, + enum qm_dqrr_cmode cmode, u8 max_fill) +{ + struct qm_dqrr *dqrr = &portal->dqrr; + u32 cfg; + + /* Make sure the DQRR will be idle when we enable */ + qm_out(portal, QM_REG_DQRR_SDQCR, 0); + qm_out(portal, QM_REG_DQRR_VDQCR, 0); + qm_out(portal, QM_REG_DQRR_PDQCR, 0); + dqrr->ring = portal->addr.ce + QM_CL_DQRR; + dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); + dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); + dqrr->cursor = dqrr->ring + dqrr->ci; + dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); + dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ? + QM_DQRR_VERB_VBIT : 0; + dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR); +#ifdef CONFIG_FSL_DPAA_CHECKING + dqrr->dmode = dmode; + dqrr->pmode = pmode; + dqrr->cmode = cmode; +#endif + /* Invalidate every ring entry before beginning */ + for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) + dpaa_invalidate(qm_cl(dqrr->ring, cfg)); + cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) | + ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ + ((dmode & 1) << 18) | /* DP */ + ((cmode & 3) << 16) | /* DCM */ + 0xa0 | /* RE+SE */ + (0 ? 0x40 : 0) | /* Ignore RP */ + (0 ? 0x10 : 0); /* Ignore SP */ + qm_out(portal, QM_REG_CFG, cfg); + qm_dqrr_set_maxfill(portal, max_fill); + return 0; +} + +static inline void qm_dqrr_finish(struct qm_portal *portal) +{ +#ifdef CONFIG_FSL_DPAA_CHECKING + struct qm_dqrr *dqrr = &portal->dqrr; + + if (dqrr->cmode != qm_dqrr_cdc && + dqrr->ci != dqrr_ptr2idx(dqrr->cursor)) + pr_crit("Ignoring completed DQRR entries\n"); +#endif +} + +static inline const struct qm_dqrr_entry *qm_dqrr_current( + struct qm_portal *portal) +{ + struct qm_dqrr *dqrr = &portal->dqrr; + + if (!dqrr->fill) + return NULL; + return dqrr->cursor; +} + +static inline u8 qm_dqrr_next(struct qm_portal *portal) +{ + struct qm_dqrr *dqrr = &portal->dqrr; + + DPAA_ASSERT(dqrr->fill); + dqrr->cursor = dqrr_inc(dqrr->cursor); + return --dqrr->fill; +} + +static inline void qm_dqrr_pvb_update(struct qm_portal *portal) +{ + struct qm_dqrr *dqrr = &portal->dqrr; + struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); + + DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb); +#ifndef CONFIG_FSL_PAMU + /* + * If PAMU is not available we need to invalidate the cache. + * When PAMU is available the cache is updated by stash + */ + dpaa_invalidate_touch_ro(res); +#endif + /* + * when accessing 'verb', use __raw_readb() to ensure that compiler + * inlining doesn't try to optimise out "excess reads". + */ + if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) { + dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); + if (!dqrr->pi) + dqrr->vbit ^= QM_DQRR_VERB_VBIT; + dqrr->fill++; + } +} + +static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, + const struct qm_dqrr_entry *dq, + int park) +{ + __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; + int idx = dqrr_ptr2idx(dq); + + DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + DPAA_ASSERT((dqrr->ring + idx) == dq); + DPAA_ASSERT(idx < QM_DQRR_SIZE); + qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ + ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ + idx); /* DQRR_DCAP::DCAP_CI */ +} + +static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask) +{ + __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; + + DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ + (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ +} + +static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) +{ + qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr); +} + +static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) +{ + qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr); +} + +static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + qm_out(portal, QM_REG_DQRR_ITR, ithresh); +} + +/* --- MR API --- */ + +#define MR_SHIFT ilog2(sizeof(union qm_mr_entry)) +#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT) + +static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p) +{ + uintptr_t addr = (uintptr_t)p; + + addr &= ~MR_CARRY; + + return (union qm_mr_entry *)addr; +} + +static inline int mr_ptr2idx(const union qm_mr_entry *e) +{ + return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1); +} + +static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e) +{ + return mr_carryclear(e + 1); +} + +static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, + enum qm_mr_cmode cmode) +{ + struct qm_mr *mr = &portal->mr; + u32 cfg; + + mr->ring = portal->addr.ce + QM_CL_MR; + mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1); + mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1); + mr->cursor = mr->ring + mr->ci; + mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); + mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE) + ? QM_MR_VERB_VBIT : 0; + mr->ithresh = qm_in(portal, QM_REG_MR_ITR); +#ifdef CONFIG_FSL_DPAA_CHECKING + mr->pmode = pmode; + mr->cmode = cmode; +#endif + cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) | + ((cmode & 1) << 8); /* QCSP_CFG:MM */ + qm_out(portal, QM_REG_CFG, cfg); + return 0; +} + +static inline void qm_mr_finish(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + + if (mr->ci != mr_ptr2idx(mr->cursor)) + pr_crit("Ignoring completed MR entries\n"); +} + +static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + + if (!mr->fill) + return NULL; + return mr->cursor; +} + +static inline int qm_mr_next(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + + DPAA_ASSERT(mr->fill); + mr->cursor = mr_inc(mr->cursor); + return --mr->fill; +} + +static inline void qm_mr_pvb_update(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); + + DPAA_ASSERT(mr->pmode == qm_mr_pvb); + /* + * when accessing 'verb', use __raw_readb() to ensure that compiler + * inlining doesn't try to optimise out "excess reads". + */ + if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { + mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); + if (!mr->pi) + mr->vbit ^= QM_MR_VERB_VBIT; + mr->fill++; + res = mr_inc(res); + } + dpaa_invalidate_touch_ro(res); +} + +static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) +{ + struct qm_mr *mr = &portal->mr; + + DPAA_ASSERT(mr->cmode == qm_mr_cci); + mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); + qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); +} + +static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + + DPAA_ASSERT(mr->cmode == qm_mr_cci); + mr->ci = mr_ptr2idx(mr->cursor); + qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); +} + +static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + qm_out(portal, QM_REG_MR_ITR, ithresh); +} + +/* --- Management command API --- */ + +static inline int qm_mc_init(struct qm_portal *portal) +{ + struct qm_mc *mc = &portal->mc; + + mc->cr = portal->addr.ce + QM_CL_CR; + mc->rr = portal->addr.ce + QM_CL_RR0; + mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT) + ? 0 : 1; + mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = qman_mc_idle; +#endif + return 0; +} + +static inline void qm_mc_finish(struct qm_portal *portal) +{ +#ifdef CONFIG_FSL_DPAA_CHECKING + struct qm_mc *mc = &portal->mc; + + DPAA_ASSERT(mc->state == qman_mc_idle); + if (mc->state != qman_mc_idle) + pr_crit("Losing incomplete MC command\n"); +#endif +} + +static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal) +{ + struct qm_mc *mc = &portal->mc; + + DPAA_ASSERT(mc->state == qman_mc_idle); +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = qman_mc_user; +#endif + dpaa_zero(mc->cr); + return mc->cr; +} + +static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) +{ + struct qm_mc *mc = &portal->mc; + union qm_mc_result *rr = mc->rr + mc->rridx; + + DPAA_ASSERT(mc->state == qman_mc_user); + dma_wmb(); + mc->cr->_ncw_verb = myverb | mc->vbit; + dpaa_flush(mc->cr); + dpaa_invalidate_touch_ro(rr); +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = qman_mc_hw; +#endif +} + +static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal) +{ + struct qm_mc *mc = &portal->mc; + union qm_mc_result *rr = mc->rr + mc->rridx; + + DPAA_ASSERT(mc->state == qman_mc_hw); + /* + * The inactive response register's verb byte always returns zero until + * its command is submitted and completed. This includes the valid-bit, + * in case you were wondering... + */ + if (!__raw_readb(&rr->verb)) { + dpaa_invalidate_touch_ro(rr); + return NULL; + } + mc->rridx ^= 1; + mc->vbit ^= QM_MCC_VERB_VBIT; +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = qman_mc_idle; +#endif + return rr; +} + +static inline int qm_mc_result_timeout(struct qm_portal *portal, + union qm_mc_result **mcr) +{ + int timeout = QM_MCR_TIMEOUT; + + do { + *mcr = qm_mc_result(portal); + if (*mcr) + break; + udelay(1); + } while (--timeout); + + return timeout; +} + +static inline void fq_set(struct qman_fq *fq, u32 mask) +{ + set_bits(mask, &fq->flags); +} + +static inline void fq_clear(struct qman_fq *fq, u32 mask) +{ + clear_bits(mask, &fq->flags); +} + +static inline int fq_isset(struct qman_fq *fq, u32 mask) +{ + return fq->flags & mask; +} + +static inline int fq_isclear(struct qman_fq *fq, u32 mask) +{ + return !(fq->flags & mask); +} + +struct qman_portal { + struct qm_portal p; + /* PORTAL_BITS_*** - dynamic, strictly internal */ + unsigned long bits; + /* interrupt sources processed by portal_isr(), configurable */ + unsigned long irq_sources; + u32 use_eqcr_ci_stashing; + /* only 1 volatile dequeue at a time */ + struct qman_fq *vdqcr_owned; + u32 sdqcr; + /* probing time config params for cpu-affine portals */ + const struct qm_portal_config *config; + /* needed for providing a non-NULL device to dma_map_***() */ + struct platform_device *pdev; + /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ + struct qman_cgrs *cgrs; + /* linked-list of CSCN handlers. */ + struct list_head cgr_cbs; + /* list lock */ + spinlock_t cgr_lock; + struct work_struct congestion_work; + struct work_struct mr_work; + char irqname[MAX_IRQNAME]; +}; + +static cpumask_t affine_mask; +static DEFINE_SPINLOCK(affine_mask_lock); +static u16 affine_channels[NR_CPUS]; +static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); +struct qman_portal *affine_portals[NR_CPUS]; + +static inline struct qman_portal *get_affine_portal(void) +{ + return &get_cpu_var(qman_affine_portal); +} + +static inline void put_affine_portal(void) +{ + put_cpu_var(qman_affine_portal); +} + +static struct workqueue_struct *qm_portal_wq; + +int qman_wq_alloc(void) +{ + qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1); + if (!qm_portal_wq) + return -ENOMEM; + return 0; +} + +/* + * This is what everything can wait on, even if it migrates to a different cpu + * to the one whose affine portal it is waiting on. + */ +static DECLARE_WAIT_QUEUE_HEAD(affine_queue); + +static struct qman_fq **fq_table; +static u32 num_fqids; + +int qman_alloc_fq_table(u32 _num_fqids) +{ + num_fqids = _num_fqids; + + fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *)); + if (!fq_table) + return -ENOMEM; + + pr_debug("Allocated fq lookup table at %p, entry count %u\n", + fq_table, num_fqids * 2); + return 0; +} + +static struct qman_fq *idx_to_fq(u32 idx) +{ + struct qman_fq *fq; + +#ifdef CONFIG_FSL_DPAA_CHECKING + if (WARN_ON(idx >= num_fqids * 2)) + return NULL; +#endif + fq = fq_table[idx]; + DPAA_ASSERT(!fq || idx == fq->idx); + + return fq; +} + +/* + * Only returns full-service fq objects, not enqueue-only + * references (QMAN_FQ_FLAG_NO_MODIFY). + */ +static struct qman_fq *fqid_to_fq(u32 fqid) +{ + return idx_to_fq(fqid * 2); +} + +static struct qman_fq *tag_to_fq(u32 tag) +{ +#if BITS_PER_LONG == 64 + return idx_to_fq(tag); +#else + return (struct qman_fq *)tag; +#endif +} + +static u32 fq_to_tag(struct qman_fq *fq) +{ +#if BITS_PER_LONG == 64 + return fq->idx; +#else + return (u32)fq; +#endif +} + +static u32 __poll_portal_slow(struct qman_portal *p, u32 is); +static inline unsigned int __poll_portal_fast(struct qman_portal *p, + unsigned int poll_limit); +static void qm_congestion_task(struct work_struct *work); +static void qm_mr_process_task(struct work_struct *work); + +static irqreturn_t portal_isr(int irq, void *ptr) +{ + struct qman_portal *p = ptr; + + u32 clear = QM_DQAVAIL_MASK | p->irq_sources; + u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; + + if (unlikely(!is)) + return IRQ_NONE; + + /* DQRR-handling if it's interrupt-driven */ + if (is & QM_PIRQ_DQRI) + __poll_portal_fast(p, QMAN_POLL_LIMIT); + /* Handling of anything else that's interrupt-driven */ + clear |= __poll_portal_slow(p, is); + qm_out(&p->p, QM_REG_ISR, clear); + return IRQ_HANDLED; +} + +static int drain_mr_fqrni(struct qm_portal *p) +{ + const union qm_mr_entry *msg; +loop: + msg = qm_mr_current(p); + if (!msg) { + /* + * if MR was full and h/w had other FQRNI entries to produce, we + * need to allow it time to produce those entries once the + * existing entries are consumed. A worst-case situation + * (fully-loaded system) means h/w sequencers may have to do 3-4 + * other things before servicing the portal's MR pump, each of + * which (if slow) may take ~50 qman cycles (which is ~200 + * processor cycles). So rounding up and then multiplying this + * worst-case estimate by a factor of 10, just to be + * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume + * one entry at a time, so h/w has an opportunity to produce new + * entries well before the ring has been fully consumed, so + * we're being *really* paranoid here. + */ + u64 now, then = jiffies; + + do { + now = jiffies; + } while ((then + 10000) > now); + msg = qm_mr_current(p); + if (!msg) + return 0; + } + if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { + /* We aren't draining anything but FQRNIs */ + pr_err("Found verb 0x%x in MR\n", msg->verb); + return -1; + } + qm_mr_next(p); + qm_mr_cci_consume(p, 1); + goto loop; +} + +static int qman_create_portal(struct qman_portal *portal, + const struct qm_portal_config *c, + const struct qman_cgrs *cgrs) +{ + struct qm_portal *p; + char buf[16]; + int ret; + u32 isdr; + + p = &portal->p; + +#ifdef CONFIG_FSL_PAMU + /* PAMU is required for stashing */ + portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); +#else + portal->use_eqcr_ci_stashing = 0; +#endif + /* + * prep the low-level portal struct with the mapped addresses from the + * config, everything that follows depends on it and "config" is more + * for (de)reference + */ + p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; + p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; + /* + * If CI-stashing is used, the current defaults use a threshold of 3, + * and stash with high-than-DQRR priority. + */ + if (qm_eqcr_init(p, qm_eqcr_pvb, + portal->use_eqcr_ci_stashing ? 3 : 0, 1)) { + dev_err(c->dev, "EQCR initialisation failed\n"); + goto fail_eqcr; + } + if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb, + qm_dqrr_cdc, DQRR_MAXFILL)) { + dev_err(c->dev, "DQRR initialisation failed\n"); + goto fail_dqrr; + } + if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) { + dev_err(c->dev, "MR initialisation failed\n"); + goto fail_mr; + } + if (qm_mc_init(p)) { + dev_err(c->dev, "MC initialisation failed\n"); + goto fail_mc; + } + /* static interrupt-gating controls */ + qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH); + qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH); + qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD); + portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); + if (!portal->cgrs) + goto fail_cgrs; + /* initial snapshot is no-depletion */ + qman_cgrs_init(&portal->cgrs[1]); + if (cgrs) + portal->cgrs[0] = *cgrs; + else + /* if the given mask is NULL, assume all CGRs can be seen */ + qman_cgrs_fill(&portal->cgrs[0]); + INIT_LIST_HEAD(&portal->cgr_cbs); + spin_lock_init(&portal->cgr_lock); + INIT_WORK(&portal->congestion_work, qm_congestion_task); + INIT_WORK(&portal->mr_work, qm_mr_process_task); + portal->bits = 0; + portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | + QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | + QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; + sprintf(buf, "qportal-%d", c->channel); + portal->pdev = platform_device_alloc(buf, -1); + if (!portal->pdev) + goto fail_devalloc; + if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) + goto fail_devadd; + ret = platform_device_add(portal->pdev); + if (ret) + goto fail_devadd; + isdr = 0xffffffff; + qm_out(p, QM_REG_ISDR, isdr); + portal->irq_sources = 0; + qm_out(p, QM_REG_IER, 0); + qm_out(p, QM_REG_ISR, 0xffffffff); + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); + if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { + dev_err(c->dev, "request_irq() failed\n"); + goto fail_irq; + } + if (c->cpu != -1 && irq_can_set_affinity(c->irq) && + irq_set_affinity(c->irq, cpumask_of(c->cpu))) { + dev_err(c->dev, "irq_set_affinity() failed\n"); + goto fail_affinity; + } + + /* Need EQCR to be empty before continuing */ + isdr &= ~QM_PIRQ_EQCI; + qm_out(p, QM_REG_ISDR, isdr); + ret = qm_eqcr_get_fill(p); + if (ret) { + dev_err(c->dev, "EQCR unclean\n"); + goto fail_eqcr_empty; + } + isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); + qm_out(p, QM_REG_ISDR, isdr); + if (qm_dqrr_current(p)) { + dev_err(c->dev, "DQRR unclean\n"); + qm_dqrr_cdc_consume_n(p, 0xffff); + } + if (qm_mr_current(p) && drain_mr_fqrni(p)) { + /* special handling, drain just in case it's a few FQRNIs */ + const union qm_mr_entry *e = qm_mr_current(p); + + dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x", + e->verb, e->ern.rc, e->ern.fd.addr_lo); + goto fail_dqrr_mr_empty; + } + /* Success */ + portal->config = c; + qm_out(p, QM_REG_ISDR, 0); + qm_out(p, QM_REG_IIR, 0); + /* Write a sane SDQCR */ + qm_dqrr_sdqcr_set(p, portal->sdqcr); + return 0; + +fail_dqrr_mr_empty: +fail_eqcr_empty: +fail_affinity: + free_irq(c->irq, portal); +fail_irq: + platform_device_del(portal->pdev); +fail_devadd: + platform_device_put(portal->pdev); +fail_devalloc: + kfree(portal->cgrs); +fail_cgrs: + qm_mc_finish(p); +fail_mc: + qm_mr_finish(p); +fail_mr: + qm_dqrr_finish(p); +fail_dqrr: + qm_eqcr_finish(p); +fail_eqcr: + return -EIO; +} + +struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c, + const struct qman_cgrs *cgrs) +{ + struct qman_portal *portal; + int err; + + portal = &per_cpu(qman_affine_portal, c->cpu); + err = qman_create_portal(portal, c, cgrs); + if (err) + return NULL; + + spin_lock(&affine_mask_lock); + cpumask_set_cpu(c->cpu, &affine_mask); + affine_channels[c->cpu] = c->channel; + affine_portals[c->cpu] = portal; + spin_unlock(&affine_mask_lock); + + return portal; +} + +static void qman_destroy_portal(struct qman_portal *qm) +{ + const struct qm_portal_config *pcfg; + + /* Stop dequeues on the portal */ + qm_dqrr_sdqcr_set(&qm->p, 0); + + /* + * NB we do this to "quiesce" EQCR. If we add enqueue-completions or + * something related to QM_PIRQ_EQCI, this may need fixing. + * Also, due to the prefetching model used for CI updates in the enqueue + * path, this update will only invalidate the CI cacheline *after* + * working on it, so we need to call this twice to ensure a full update + * irrespective of where the enqueue processing was at when the teardown + * began. + */ + qm_eqcr_cce_update(&qm->p); + qm_eqcr_cce_update(&qm->p); + pcfg = qm->config; + + free_irq(pcfg->irq, qm); + + kfree(qm->cgrs); + qm_mc_finish(&qm->p); + qm_mr_finish(&qm->p); + qm_dqrr_finish(&qm->p); + qm_eqcr_finish(&qm->p); + + platform_device_del(qm->pdev); + platform_device_put(qm->pdev); + + qm->config = NULL; +} + +const struct qm_portal_config *qman_destroy_affine_portal(void) +{ + struct qman_portal *qm = get_affine_portal(); + const struct qm_portal_config *pcfg; + int cpu; + + pcfg = qm->config; + cpu = pcfg->cpu; + + qman_destroy_portal(qm); + + spin_lock(&affine_mask_lock); + cpumask_clear_cpu(cpu, &affine_mask); + spin_unlock(&affine_mask_lock); + put_affine_portal(); + return pcfg; +} + +/* Inline helper to reduce nesting in __poll_portal_slow() */ +static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, + const union qm_mr_entry *msg, u8 verb) +{ + switch (verb) { + case QM_MR_VERB_FQRL: + DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); + fq_clear(fq, QMAN_FQ_STATE_ORL); + break; + case QM_MR_VERB_FQRN: + DPAA_ASSERT(fq->state == qman_fq_state_parked || + fq->state == qman_fq_state_sched); + DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); + fq_clear(fq, QMAN_FQ_STATE_CHANGING); + if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) + fq_set(fq, QMAN_FQ_STATE_NE); + if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) + fq_set(fq, QMAN_FQ_STATE_ORL); + fq->state = qman_fq_state_retired; + break; + case QM_MR_VERB_FQPN: + DPAA_ASSERT(fq->state == qman_fq_state_sched); + DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); + fq->state = qman_fq_state_parked; + } +} + +static void qm_congestion_task(struct work_struct *work) +{ + struct qman_portal *p = container_of(work, struct qman_portal, + congestion_work); + struct qman_cgrs rr, c; + union qm_mc_result *mcr; + struct qman_cgr *cgr; + + spin_lock(&p->cgr_lock); + qm_mc_start(&p->p); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + spin_unlock(&p->cgr_lock); + dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); + return; + } + /* mask out the ones I'm not interested in */ + qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state, + &p->cgrs[0]); + /* check previous snapshot for delta, enter/exit congestion */ + qman_cgrs_xor(&c, &rr, &p->cgrs[1]); + /* update snapshot */ + qman_cgrs_cp(&p->cgrs[1], &rr); + /* Invoke callback */ + list_for_each_entry(cgr, &p->cgr_cbs, node) + if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) + cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); + spin_unlock(&p->cgr_lock); +} + +static void qm_mr_process_task(struct work_struct *work) +{ + struct qman_portal *p = container_of(work, struct qman_portal, + mr_work); + const union qm_mr_entry *msg; + struct qman_fq *fq; + u8 verb, num = 0; + + preempt_disable(); + + while (1) { + qm_mr_pvb_update(&p->p); + msg = qm_mr_current(&p->p); + if (!msg) + break; + + verb = msg->verb & QM_MR_VERB_TYPE_MASK; + /* The message is a software ERN iff the 0x20 bit is clear */ + if (verb & 0x20) { + switch (verb) { + case QM_MR_VERB_FQRNI: + /* nada, we drop FQRNIs on the floor */ + break; + case QM_MR_VERB_FQRN: + case QM_MR_VERB_FQRL: + /* Lookup in the retirement table */ + fq = fqid_to_fq(msg->fq.fqid); + if (WARN_ON(!fq)) + break; + fq_state_change(p, fq, msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, msg); + break; + case QM_MR_VERB_FQPN: + /* Parked */ + fq = tag_to_fq(msg->fq.contextB); + fq_state_change(p, fq, msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, msg); + break; + case QM_MR_VERB_DC_ERN: + /* DCP ERN */ + pr_crit_once("Leaking DCP ERNs!\n"); + break; + default: + pr_crit("Invalid MR verb 0x%02x\n", verb); + } + } else { + /* Its a software ERN */ + fq = tag_to_fq(msg->ern.tag); + fq->cb.ern(p, fq, msg); + } + num++; + qm_mr_next(&p->p); + } + + qm_mr_cci_consume(&p->p, num); + preempt_enable(); +} + +static u32 __poll_portal_slow(struct qman_portal *p, u32 is) +{ + if (is & QM_PIRQ_CSCI) { + queue_work_on(smp_processor_id(), qm_portal_wq, + &p->congestion_work); + } + + if (is & QM_PIRQ_EQRI) { + qm_eqcr_cce_update(&p->p); + qm_eqcr_set_ithresh(&p->p, 0); + wake_up(&affine_queue); + } + + if (is & QM_PIRQ_MRI) { + queue_work_on(smp_processor_id(), qm_portal_wq, + &p->mr_work); + } + + return is; +} + +/* + * remove some slowish-path stuff from the "fast path" and make sure it isn't + * inlined. + */ +static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) +{ + p->vdqcr_owned = NULL; + fq_clear(fq, QMAN_FQ_STATE_VDQCR); + wake_up(&affine_queue); +} + +/* + * The only states that would conflict with other things if they ran at the + * same time on the same cpu are: + * + * (i) setting/clearing vdqcr_owned, and + * (ii) clearing the NE (Not Empty) flag. + * + * Both are safe. Because; + * + * (i) this clearing can only occur after qman_volatile_dequeue() has set the + * vdqcr_owned field (which it does before setting VDQCR), and + * qman_volatile_dequeue() blocks interrupts and preemption while this is + * done so that we can't interfere. + * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as + * with (i) that API prevents us from interfering until it's safe. + * + * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far + * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett + * advantage comes from this function not having to "lock" anything at all. + * + * Note also that the callbacks are invoked at points which are safe against the + * above potential conflicts, but that this function itself is not re-entrant + * (this is because the function tracks one end of each FIFO in the portal and + * we do *not* want to lock that). So the consequence is that it is safe for + * user callbacks to call into any QMan API. + */ +static inline unsigned int __poll_portal_fast(struct qman_portal *p, + unsigned int poll_limit) +{ + const struct qm_dqrr_entry *dq; + struct qman_fq *fq; + enum qman_cb_dqrr_result res; + unsigned int limit = 0; + + do { + qm_dqrr_pvb_update(&p->p); + dq = qm_dqrr_current(&p->p); + if (!dq) + break; + + if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { + /* + * VDQCR: don't trust contextB as the FQ may have + * been configured for h/w consumption and we're + * draining it post-retirement. + */ + fq = p->vdqcr_owned; + /* + * We only set QMAN_FQ_STATE_NE when retiring, so we + * only need to check for clearing it when doing + * volatile dequeues. It's one less thing to check + * in the critical path (SDQCR). + */ + if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) + fq_clear(fq, QMAN_FQ_STATE_NE); + /* + * This is duplicated from the SDQCR code, but we + * have stuff to do before *and* after this callback, + * and we don't want multiple if()s in the critical + * path (SDQCR). + */ + res = fq->cb.dqrr(p, fq, dq); + if (res == qman_cb_dqrr_stop) + break; + /* Check for VDQCR completion */ + if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) + clear_vdqcr(p, fq); + } else { + /* SDQCR: contextB points to the FQ */ + fq = tag_to_fq(dq->contextB); + /* Now let the callback do its stuff */ + res = fq->cb.dqrr(p, fq, dq); + /* + * The callback can request that we exit without + * consuming this entry nor advancing; + */ + if (res == qman_cb_dqrr_stop) + break; + } + /* Interpret 'dq' from a driver perspective. */ + /* + * Parking isn't possible unless HELDACTIVE was set. NB, + * FORCEELIGIBLE implies HELDACTIVE, so we only need to + * check for HELDACTIVE to cover both. + */ + DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || + (res != qman_cb_dqrr_park)); + /* just means "skip it, I'll consume it myself later on" */ + if (res != qman_cb_dqrr_defer) + qm_dqrr_cdc_consume_1ptr(&p->p, dq, + res == qman_cb_dqrr_park); + /* Move forward */ + qm_dqrr_next(&p->p); + /* + * Entry processed and consumed, increment our counter. The + * callback can request that we exit after consuming the + * entry, and we also exit if we reach our processing limit, + * so loop back only if neither of these conditions is met. + */ + } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop); + + return limit; +} + +void qman_p_irqsource_add(struct qman_portal *p, u32 bits) +{ + unsigned long irqflags; + + local_irq_save(irqflags); + set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources); + qm_out(&p->p, QM_REG_IER, p->irq_sources); + local_irq_restore(irqflags); +} +EXPORT_SYMBOL(qman_p_irqsource_add); + +void qman_p_irqsource_remove(struct qman_portal *p, u32 bits) +{ + unsigned long irqflags; + u32 ier; + + /* + * Our interrupt handler only processes+clears status register bits that + * are in p->irq_sources. As we're trimming that mask, if one of them + * were to assert in the status register just before we remove it from + * the enable register, there would be an interrupt-storm when we + * release the IRQ lock. So we wait for the enable register update to + * take effect in h/w (by reading it back) and then clear all other bits + * in the status register. Ie. we clear them from ISR once it's certain + * IER won't allow them to reassert. + */ + local_irq_save(irqflags); + bits &= QM_PIRQ_VISIBLE; + clear_bits(bits, &p->irq_sources); + qm_out(&p->p, QM_REG_IER, p->irq_sources); + ier = qm_in(&p->p, QM_REG_IER); + /* + * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a + * data-dependency, ie. to protect against re-ordering. + */ + qm_out(&p->p, QM_REG_ISR, ~ier); + local_irq_restore(irqflags); +} +EXPORT_SYMBOL(qman_p_irqsource_remove); + +const cpumask_t *qman_affine_cpus(void) +{ + return &affine_mask; +} +EXPORT_SYMBOL(qman_affine_cpus); + +u16 qman_affine_channel(int cpu) +{ + if (cpu < 0) { + struct qman_portal *portal = get_affine_portal(); + + cpu = portal->config->cpu; + put_affine_portal(); + } + WARN_ON(!cpumask_test_cpu(cpu, &affine_mask)); + return affine_channels[cpu]; +} +EXPORT_SYMBOL(qman_affine_channel); + +struct qman_portal *qman_get_affine_portal(int cpu) +{ + return affine_portals[cpu]; +} +EXPORT_SYMBOL(qman_get_affine_portal); + +int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) +{ + return __poll_portal_fast(p, limit); +} +EXPORT_SYMBOL(qman_p_poll_dqrr); + +void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) +{ + unsigned long irqflags; + + local_irq_save(irqflags); + pools &= p->config->pools; + p->sdqcr |= pools; + qm_dqrr_sdqcr_set(&p->p, p->sdqcr); + local_irq_restore(irqflags); +} +EXPORT_SYMBOL(qman_p_static_dequeue_add); + +/* Frame queue API */ + +static const char *mcr_result_str(u8 result) +{ + switch (result) { + case QM_MCR_RESULT_NULL: + return "QM_MCR_RESULT_NULL"; + case QM_MCR_RESULT_OK: + return "QM_MCR_RESULT_OK"; + case QM_MCR_RESULT_ERR_FQID: + return "QM_MCR_RESULT_ERR_FQID"; + case QM_MCR_RESULT_ERR_FQSTATE: + return "QM_MCR_RESULT_ERR_FQSTATE"; + case QM_MCR_RESULT_ERR_NOTEMPTY: + return "QM_MCR_RESULT_ERR_NOTEMPTY"; + case QM_MCR_RESULT_PENDING: + return "QM_MCR_RESULT_PENDING"; + case QM_MCR_RESULT_ERR_BADCOMMAND: + return "QM_MCR_RESULT_ERR_BADCOMMAND"; + } + return "<unknown MCR result>"; +} + +int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) +{ + if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { + int ret = qman_alloc_fqid(&fqid); + + if (ret) + return ret; + } + fq->fqid = fqid; + fq->flags = flags; + fq->state = qman_fq_state_oos; + fq->cgr_groupid = 0; + + /* A context_b of 0 is allegedly special, so don't use that fqid */ + if (fqid == 0 || fqid >= num_fqids) { + WARN(1, "bad fqid %d\n", fqid); + return -EINVAL; + } + + fq->idx = fqid * 2; + if (flags & QMAN_FQ_FLAG_NO_MODIFY) + fq->idx++; + + WARN_ON(fq_table[fq->idx]); + fq_table[fq->idx] = fq; + + return 0; +} +EXPORT_SYMBOL(qman_create_fq); + +void qman_destroy_fq(struct qman_fq *fq) +{ + /* + * We don't need to lock the FQ as it is a pre-condition that the FQ be + * quiesced. Instead, run some checks. + */ + switch (fq->state) { + case qman_fq_state_parked: + case qman_fq_state_oos: + if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) + qman_release_fqid(fq->fqid); + + DPAA_ASSERT(fq_table[fq->idx]); + fq_table[fq->idx] = NULL; + return; + default: + break; + } + DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); +} +EXPORT_SYMBOL(qman_destroy_fq); + +u32 qman_fq_fqid(struct qman_fq *fq) +{ + return fq->fqid; +} +EXPORT_SYMBOL(qman_fq_fqid); + +int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p; + u8 res, myverb; + int ret = 0; + + myverb = (flags & QMAN_INITFQ_FLAG_SCHED) + ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; + + if (fq->state != qman_fq_state_oos && + fq->state != qman_fq_state_parked) + return -EINVAL; +#ifdef CONFIG_FSL_DPAA_CHECKING + if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) + return -EINVAL; +#endif + if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { + /* And can't be set at the same time as TDTHRESH */ + if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) + return -EINVAL; + } + /* Issue an INITFQ_[PARKED|SCHED] management command */ + p = get_affine_portal(); + if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || + (fq->state != qman_fq_state_oos && + fq->state != qman_fq_state_parked)) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + if (opts) + mcc->initfq = *opts; + mcc->initfq.fqid = fq->fqid; + mcc->initfq.count = 0; + /* + * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a + * demux pointer. Otherwise, the caller-provided value is allowed to + * stand, don't overwrite it. + */ + if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { + dma_addr_t phys_fq; + + mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; + mcc->initfq.fqd.context_b = fq_to_tag(fq); + /* + * and the physical address - NB, if the user wasn't trying to + * set CONTEXTA, clear the stashing settings. + */ + if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { + mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; + memset(&mcc->initfq.fqd.context_a, 0, + sizeof(mcc->initfq.fqd.context_a)); + } else { + phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), + DMA_TO_DEVICE); + qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); + } + } + if (flags & QMAN_INITFQ_FLAG_LOCAL) { + int wq = 0; + + if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { + mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; + wq = 4; + } + qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); + } + qm_mc_commit(&p->p, myverb); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(p->config->dev, "MCR timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + if (opts) { + if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { + if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) + fq_set(fq, QMAN_FQ_STATE_CGR_EN); + else + fq_clear(fq, QMAN_FQ_STATE_CGR_EN); + } + if (opts->we_mask & QM_INITFQ_WE_CGID) + fq->cgr_groupid = opts->fqd.cgid; + } + fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? + qman_fq_state_sched : qman_fq_state_parked; + +out: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_init_fq); + +int qman_schedule_fq(struct qman_fq *fq) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p; + int ret = 0; + + if (fq->state != qman_fq_state_parked) + return -EINVAL; +#ifdef CONFIG_FSL_DPAA_CHECKING + if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) + return -EINVAL; +#endif + /* Issue a ALTERFQ_SCHED management command */ + p = get_affine_portal(); + if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || + fq->state != qman_fq_state_parked) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(p->config->dev, "ALTER_SCHED timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); + if (mcr->result != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + fq->state = qman_fq_state_sched; +out: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_schedule_fq); + +int qman_retire_fq(struct qman_fq *fq, u32 *flags) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p; + int ret; + u8 res; + + if (fq->state != qman_fq_state_parked && + fq->state != qman_fq_state_sched) + return -EINVAL; +#ifdef CONFIG_FSL_DPAA_CHECKING + if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) + return -EINVAL; +#endif + p = get_affine_portal(); + if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || + fq->state == qman_fq_state_retired || + fq->state == qman_fq_state_oos) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); + res = mcr->result; + /* + * "Elegant" would be to treat OK/PENDING the same way; set CHANGING, + * and defer the flags until FQRNI or FQRN (respectively) show up. But + * "Friendly" is to process OK immediately, and not set CHANGING. We do + * friendly, otherwise the caller doesn't necessarily have a fully + * "retired" FQ on return even if the retirement was immediate. However + * this does mean some code duplication between here and + * fq_state_change(). + */ + if (res == QM_MCR_RESULT_OK) { + ret = 0; + /* Process 'fq' right away, we'll ignore FQRNI */ + if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) + fq_set(fq, QMAN_FQ_STATE_NE); + if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) + fq_set(fq, QMAN_FQ_STATE_ORL); + if (flags) + *flags = fq->flags; + fq->state = qman_fq_state_retired; + if (fq->cb.fqs) { + /* + * Another issue with supporting "immediate" retirement + * is that we're forced to drop FQRNIs, because by the + * time they're seen it may already be "too late" (the + * fq may have been OOS'd and free()'d already). But if + * the upper layer wants a callback whether it's + * immediate or not, we have to fake a "MR" entry to + * look like an FQRNI... + */ + union qm_mr_entry msg; + + msg.verb = QM_MR_VERB_FQRNI; + msg.fq.fqs = mcr->alterfq.fqs; + msg.fq.fqid = fq->fqid; + msg.fq.contextB = fq_to_tag(fq); + fq->cb.fqs(p, fq, &msg); + } + } else if (res == QM_MCR_RESULT_PENDING) { + ret = 1; + fq_set(fq, QMAN_FQ_STATE_CHANGING); + } else { + ret = -EIO; + } +out: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_retire_fq); + +int qman_oos_fq(struct qman_fq *fq) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p; + int ret = 0; + + if (fq->state != qman_fq_state_retired) + return -EINVAL; +#ifdef CONFIG_FSL_DPAA_CHECKING + if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) + return -EINVAL; +#endif + p = get_affine_portal(); + if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) || + fq->state != qman_fq_state_retired) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); + if (mcr->result != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + fq->state = qman_fq_state_oos; +out: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_oos_fq); + +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + int ret = 0; + + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); + if (mcr->result == QM_MCR_RESULT_OK) + *fqd = mcr->queryfq.fqd; + else + ret = -EIO; +out: + put_affine_portal(); + return ret; +} + +static int qman_query_fq_np(struct qman_fq *fq, + struct qm_mcr_queryfq_np *np) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + int ret = 0; + + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); + if (mcr->result == QM_MCR_RESULT_OK) + *np = mcr->queryfq_np; + else if (mcr->result == QM_MCR_RESULT_ERR_FQID) + ret = -ERANGE; + else + ret = -EIO; +out: + put_affine_portal(); + return ret; +} + +static int qman_query_cgr(struct qman_cgr *cgr, + struct qm_mcr_querycgr *cgrd) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + int ret = 0; + + mcc = qm_mc_start(&p->p); + mcc->querycgr.cgid = cgr->cgrid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); + if (mcr->result == QM_MCR_RESULT_OK) + *cgrd = mcr->querycgr; + else { + dev_err(p->config->dev, "QUERY_CGR failed: %s\n", + mcr_result_str(mcr->result)); + ret = -EIO; + } +out: + put_affine_portal(); + return ret; +} + +int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result) +{ + struct qm_mcr_querycgr query_cgr; + int err; + + err = qman_query_cgr(cgr, &query_cgr); + if (err) + return err; + + *result = !!query_cgr.cgr.cs; + return 0; +} +EXPORT_SYMBOL(qman_query_cgr_congested); + +/* internal function used as a wait_event() expression */ +static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) +{ + unsigned long irqflags; + int ret = -EBUSY; + + local_irq_save(irqflags); + if (p->vdqcr_owned) + goto out; + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) + goto out; + + fq_set(fq, QMAN_FQ_STATE_VDQCR); + p->vdqcr_owned = fq; + qm_dqrr_vdqcr_set(&p->p, vdqcr); + ret = 0; +out: + local_irq_restore(irqflags); + return ret; +} + +static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) +{ + int ret; + + *p = get_affine_portal(); + ret = set_p_vdqcr(*p, fq, vdqcr); + put_affine_portal(); + return ret; +} + +static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, + u32 vdqcr, u32 flags) +{ + int ret = 0; + + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) + ret = wait_event_interruptible(affine_queue, + !set_vdqcr(p, fq, vdqcr)); + else + wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr)); + return ret; +} + +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr) +{ + struct qman_portal *p; + int ret; + + if (fq->state != qman_fq_state_parked && + fq->state != qman_fq_state_retired) + return -EINVAL; + if (vdqcr & QM_VDQCR_FQID_MASK) + return -EINVAL; + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) + return -EBUSY; + vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; + if (flags & QMAN_VOLATILE_FLAG_WAIT) + ret = wait_vdqcr_start(&p, fq, vdqcr, flags); + else + ret = set_vdqcr(&p, fq, vdqcr); + if (ret) + return ret; + /* VDQCR is set */ + if (flags & QMAN_VOLATILE_FLAG_FINISH) { + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) + /* + * NB: don't propagate any error - the caller wouldn't + * know whether the VDQCR was issued or not. A signal + * could arrive after returning anyway, so the caller + * can check signal_pending() if that's an issue. + */ + wait_event_interruptible(affine_queue, + !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); + else + wait_event(affine_queue, + !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); + } + return 0; +} +EXPORT_SYMBOL(qman_volatile_dequeue); + +static void update_eqcr_ci(struct qman_portal *p, u8 avail) +{ + if (avail) + qm_eqcr_cce_prefetch(&p->p); + else + qm_eqcr_cce_update(&p->p); +} + +int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) +{ + struct qman_portal *p; + struct qm_eqcr_entry *eq; + unsigned long irqflags; + u8 avail; + + p = get_affine_portal(); + local_irq_save(irqflags); + + if (p->use_eqcr_ci_stashing) { + /* + * The stashing case is easy, only update if we need to in + * order to try and liberate ring entries. + */ + eq = qm_eqcr_start_stash(&p->p); + } else { + /* + * The non-stashing case is harder, need to prefetch ahead of + * time. + */ + avail = qm_eqcr_get_avail(&p->p); + if (avail < 2) + update_eqcr_ci(p, avail); + eq = qm_eqcr_start_no_stash(&p->p); + } + + if (unlikely(!eq)) + goto out; + + eq->fqid = fq->fqid; + eq->tag = fq_to_tag(fq); + eq->fd = *fd; + + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); +out: + local_irq_restore(irqflags); + put_affine_portal(); + return 0; +} +EXPORT_SYMBOL(qman_enqueue); + +static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + u8 verb = QM_MCC_VERB_MODIFYCGR; + int ret = 0; + + mcc = qm_mc_start(&p->p); + if (opts) + mcc->initcgr = *opts; + mcc->initcgr.cgid = cgr->cgrid; + if (flags & QMAN_CGR_FLAG_USE_INIT) + verb = QM_MCC_VERB_INITCGR; + qm_mc_commit(&p->p, verb); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); + if (mcr->result != QM_MCR_RESULT_OK) + ret = -EIO; + +out: + put_affine_portal(); + return ret; +} + +#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) +#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n)) + +static u8 qman_cgr_cpus[CGR_NUM]; + +void qman_init_cgr_all(void) +{ + struct qman_cgr cgr; + int err_cnt = 0; + + for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) { + if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL)) + err_cnt++; + } + + if (err_cnt) + pr_err("Warning: %d error%s while initialising CGR h/w\n", + err_cnt, (err_cnt > 1) ? "s" : ""); +} + +int qman_create_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts) +{ + struct qm_mcr_querycgr cgr_state; + struct qm_mcc_initcgr local_opts = {}; + int ret; + struct qman_portal *p; + + /* + * We have to check that the provided CGRID is within the limits of the + * data-structures, for obvious reasons. However we'll let h/w take + * care of determining whether it's within the limits of what exists on + * the SoC. + */ + if (cgr->cgrid >= CGR_NUM) + return -EINVAL; + + preempt_disable(); + p = get_affine_portal(); + qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); + preempt_enable(); + + cgr->chan = p->config->channel; + spin_lock(&p->cgr_lock); + + if (opts) { + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) + goto out; + if (opts) + local_opts = *opts; + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + local_opts.cgr.cscn_targ_upd_ctrl = + QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); + else + /* Overwrite TARG */ + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | + TARG_MASK(p); + local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; + + /* send init if flags indicate so */ + if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) + ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, + &local_opts); + else + ret = qm_modify_cgr(cgr, 0, &local_opts); + if (ret) + goto out; + } + + list_add(&cgr->node, &p->cgr_cbs); + + /* Determine if newly added object requires its callback to be called */ + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) { + /* we can't go back, so proceed and return success */ + dev_err(p->config->dev, "CGR HW state partially modified\n"); + ret = 0; + goto out; + } + if (cgr->cb && cgr_state.cgr.cscn_en && + qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) + cgr->cb(p, cgr, 1); +out: + spin_unlock(&p->cgr_lock); + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_create_cgr); + +int qman_delete_cgr(struct qman_cgr *cgr) +{ + unsigned long irqflags; + struct qm_mcr_querycgr cgr_state; + struct qm_mcc_initcgr local_opts; + int ret = 0; + struct qman_cgr *i; + struct qman_portal *p = get_affine_portal(); + + if (cgr->chan != p->config->channel) { + /* attempt to delete from other portal than creator */ + dev_err(p->config->dev, "CGR not owned by current portal"); + dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n", + cgr->chan, p->config->channel); + + ret = -EINVAL; + goto put_portal; + } + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); + spin_lock_irqsave(&p->cgr_lock, irqflags); + list_del(&cgr->node); + /* + * If there are no other CGR objects for this CGRID in the list, + * update CSCN_TARG accordingly + */ + list_for_each_entry(i, &p->cgr_cbs, node) + if (i->cgrid == cgr->cgrid && i->cb) + goto release_lock; + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) { + /* add back to the list */ + list_add(&cgr->node, &p->cgr_cbs); + goto release_lock; + } + /* Overwrite TARG */ + local_opts.we_mask = QM_CGR_WE_CSCN_TARG; + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); + else + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & + ~(TARG_MASK(p)); + ret = qm_modify_cgr(cgr, 0, &local_opts); + if (ret) + /* add back to the list */ + list_add(&cgr->node, &p->cgr_cbs); +release_lock: + spin_unlock_irqrestore(&p->cgr_lock, irqflags); +put_portal: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_delete_cgr); + +struct cgr_comp { + struct qman_cgr *cgr; + struct completion completion; +}; + +static int qman_delete_cgr_thread(void *p) +{ + struct cgr_comp *cgr_comp = (struct cgr_comp *)p; + int ret; + + ret = qman_delete_cgr(cgr_comp->cgr); + complete(&cgr_comp->completion); + + return ret; +} + +void qman_delete_cgr_safe(struct qman_cgr *cgr) +{ + struct task_struct *thread; + struct cgr_comp cgr_comp; + + preempt_disable(); + if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { + init_completion(&cgr_comp.completion); + cgr_comp.cgr = cgr; + thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, + "cgr_del"); + + if (IS_ERR(thread)) + goto out; + + kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); + wake_up_process(thread); + wait_for_completion(&cgr_comp.completion); + preempt_enable(); + return; + } +out: + qman_delete_cgr(cgr); + preempt_enable(); +} +EXPORT_SYMBOL(qman_delete_cgr_safe); + +/* Cleanup FQs */ + +static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v) +{ + const union qm_mr_entry *msg; + int found = 0; + + qm_mr_pvb_update(p); + msg = qm_mr_current(p); + while (msg) { + if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v) + found = 1; + qm_mr_next(p); + qm_mr_cci_consume_to_current(p); + qm_mr_pvb_update(p); + msg = qm_mr_current(p); + } + return found; +} + +static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, + bool wait) +{ + const struct qm_dqrr_entry *dqrr; + int found = 0; + + do { + qm_dqrr_pvb_update(p); + dqrr = qm_dqrr_current(p); + if (!dqrr) + cpu_relax(); + } while (wait && !dqrr); + + while (dqrr) { + if (dqrr->fqid == fqid && (dqrr->stat & s)) + found = 1; + qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); + qm_dqrr_pvb_update(p); + qm_dqrr_next(p); + dqrr = qm_dqrr_current(p); + } + return found; +} + +#define qm_mr_drain(p, V) \ + _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V) + +#define qm_dqrr_drain(p, f, S) \ + _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false) + +#define qm_dqrr_drain_wait(p, f, S) \ + _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true) + +#define qm_dqrr_drain_nomatch(p) \ + _qm_dqrr_consume_and_match(p, 0, 0, false) + +static int qman_shutdown_fq(u32 fqid) +{ + struct qman_portal *p; + struct device *dev; + union qm_mc_command *mcc; + union qm_mc_result *mcr; + int orl_empty, drain = 0, ret = 0; + u32 channel, wq, res; + u8 state; + + p = get_affine_portal(); + dev = p->config->dev; + /* Determine the state of the FQID */ + mcc = qm_mc_start(&p->p); + mcc->queryfq_np.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(dev, "QUERYFQ_NP timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); + state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; + if (state == QM_MCR_NP_STATE_OOS) + goto out; /* Already OOS, no need to do anymore checks */ + + /* Query which channel the FQ is using */ + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(dev, "QUERYFQ timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); + /* Need to store these since the MCR gets reused */ + channel = qm_fqd_get_chan(&mcr->queryfq.fqd); + wq = qm_fqd_get_wq(&mcr->queryfq.fqd); + + switch (state) { + case QM_MCR_NP_STATE_TEN_SCHED: + case QM_MCR_NP_STATE_TRU_SCHED: + case QM_MCR_NP_STATE_ACTIVE: + case QM_MCR_NP_STATE_PARKED: + orl_empty = 0; + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(dev, "QUERYFQ_NP timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_MCR_VERB_ALTER_RETIRE); + res = mcr->result; /* Make a copy as we reuse MCR below */ + + if (res == QM_MCR_RESULT_PENDING) { + /* + * Need to wait for the FQRN in the message ring, which + * will only occur once the FQ has been drained. In + * order for the FQ to drain the portal needs to be set + * to dequeue from the channel the FQ is scheduled on + */ + int found_fqrn = 0; + u16 dequeue_wq = 0; + + /* Flag that we need to drain FQ */ + drain = 1; + + if (channel >= qm_channel_pool1 && + channel < qm_channel_pool1 + 15) { + /* Pool channel, enable the bit in the portal */ + dequeue_wq = (channel - + qm_channel_pool1 + 1)<<4 | wq; + } else if (channel < qm_channel_pool1) { + /* Dedicated channel */ + dequeue_wq = wq; + } else { + dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x", + fqid, channel); + ret = -EBUSY; + goto out; + } + /* Set the sdqcr to drain this channel */ + if (channel < qm_channel_pool1) + qm_dqrr_sdqcr_set(&p->p, + QM_SDQCR_TYPE_ACTIVE | + QM_SDQCR_CHANNELS_DEDICATED); + else + qm_dqrr_sdqcr_set(&p->p, + QM_SDQCR_TYPE_ACTIVE | + QM_SDQCR_CHANNELS_POOL_CONV + (channel)); + do { + /* Keep draining DQRR while checking the MR*/ + qm_dqrr_drain_nomatch(&p->p); + /* Process message ring too */ + found_fqrn = qm_mr_drain(&p->p, FQRN); + cpu_relax(); + } while (!found_fqrn); + + } + if (res != QM_MCR_RESULT_OK && + res != QM_MCR_RESULT_PENDING) { + dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n", + fqid, res); + ret = -EIO; + goto out; + } + if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { + /* + * ORL had no entries, no need to wait until the + * ERNs come in + */ + orl_empty = 1; + } + /* + * Retirement succeeded, check to see if FQ needs + * to be drained + */ + if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { + /* FQ is Not Empty, drain using volatile DQ commands */ + do { + u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); + + qm_dqrr_vdqcr_set(&p->p, vdqcr); + /* + * Wait for a dequeue and process the dequeues, + * making sure to empty the ring completely + */ + } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); + } + qm_dqrr_sdqcr_set(&p->p, 0); + + while (!orl_empty) { + /* Wait for the ORL to have been completely drained */ + orl_empty = qm_mr_drain(&p->p, FQRL); + cpu_relax(); + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_MCR_VERB_ALTER_OOS); + if (mcr->result != QM_MCR_RESULT_OK) { + dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n", + fqid, mcr->result); + ret = -EIO; + goto out; + } + break; + + case QM_MCR_NP_STATE_RETIRED: + /* Send OOS Command */ + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_MCR_VERB_ALTER_OOS); + if (mcr->result) { + dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n", + fqid, mcr->result); + ret = -EIO; + goto out; + } + break; + + case QM_MCR_NP_STATE_OOS: + /* Done */ + break; + + default: + ret = -EIO; + } + +out: + put_affine_portal(); + return ret; +} + +const struct qm_portal_config *qman_get_qm_portal_config( + struct qman_portal *portal) +{ + return portal->config; +} + +struct gen_pool *qm_fqalloc; /* FQID allocator */ +struct gen_pool *qm_qpalloc; /* pool-channel allocator */ +struct gen_pool *qm_cgralloc; /* CGR ID allocator */ + +static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt) +{ + unsigned long addr; + + addr = gen_pool_alloc(p, cnt); + if (!addr) + return -ENOMEM; + + *result = addr & ~DPAA_GENALLOC_OFF; + + return 0; +} + +int qman_alloc_fqid_range(u32 *result, u32 count) +{ + return qman_alloc_range(qm_fqalloc, result, count); +} +EXPORT_SYMBOL(qman_alloc_fqid_range); + +int qman_alloc_pool_range(u32 *result, u32 count) +{ + return qman_alloc_range(qm_qpalloc, result, count); +} +EXPORT_SYMBOL(qman_alloc_pool_range); + +int qman_alloc_cgrid_range(u32 *result, u32 count) +{ + return qman_alloc_range(qm_cgralloc, result, count); +} +EXPORT_SYMBOL(qman_alloc_cgrid_range); + +int qman_release_fqid(u32 fqid) +{ + int ret = qman_shutdown_fq(fqid); + + if (ret) { + pr_debug("FQID %d leaked\n", fqid); + return ret; + } + + gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1); + return 0; +} +EXPORT_SYMBOL(qman_release_fqid); + +static int qpool_cleanup(u32 qp) +{ + /* + * We query all FQDs starting from + * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs + * whose destination channel is the pool-channel being released. + * When a non-OOS FQD is found we attempt to clean it up + */ + struct qman_fq fq = { + .fqid = QM_FQID_RANGE_START + }; + int err; + + do { + struct qm_mcr_queryfq_np np; + + err = qman_query_fq_np(&fq, &np); + if (err) + /* FQID range exceeded, found no problems */ + return 0; + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { + struct qm_fqd fqd; + + err = qman_query_fq(&fq, &fqd); + if (WARN_ON(err)) + return 0; + if (qm_fqd_get_chan(&fqd) == qp) { + /* The channel is the FQ's target, clean it */ + err = qman_shutdown_fq(fq.fqid); + if (err) + /* + * Couldn't shut down the FQ + * so the pool must be leaked + */ + return err; + } + } + /* Move to the next FQID */ + fq.fqid++; + } while (1); +} + +int qman_release_pool(u32 qp) +{ + int ret; + + ret = qpool_cleanup(qp); + if (ret) { + pr_debug("CHID %d leaked\n", qp); + return ret; + } + + gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1); + return 0; +} +EXPORT_SYMBOL(qman_release_pool); + +static int cgr_cleanup(u32 cgrid) +{ + /* + * query all FQDs starting from FQID 1 until we get an "invalid FQID" + * error, looking for non-OOS FQDs whose CGR is the CGR being released + */ + struct qman_fq fq = { + .fqid = 1 + }; + int err; + + do { + struct qm_mcr_queryfq_np np; + + err = qman_query_fq_np(&fq, &np); + if (err) + /* FQID range exceeded, found no problems */ + return 0; + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { + struct qm_fqd fqd; + + err = qman_query_fq(&fq, &fqd); + if (WARN_ON(err)) + return 0; + if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && + fqd.cgid == cgrid) { + pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", + cgrid, fq.fqid); + return -EIO; + } + } + /* Move to the next FQID */ + fq.fqid++; + } while (1); +} + +int qman_release_cgrid(u32 cgrid) +{ + int ret; + + ret = cgr_cleanup(cgrid); + if (ret) { + pr_debug("CGRID %d leaked\n", cgrid); + return ret; + } + + gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1); + return 0; +} +EXPORT_SYMBOL(qman_release_cgrid); diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c new file mode 100644 index 000000000000..0cace9e0077e --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_ccsr.c @@ -0,0 +1,808 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_priv.h" + +u16 qman_ip_rev; +EXPORT_SYMBOL(qman_ip_rev); +u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1; +EXPORT_SYMBOL(qm_channel_pool1); + +/* Register offsets */ +#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10)) +#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10)) +#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10)) +#define REG_DD_CFG 0x0200 +#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10)) +#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10)) +#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10)) +#define REG_PFDR_FPC 0x0400 +#define REG_PFDR_FP_HEAD 0x0404 +#define REG_PFDR_FP_TAIL 0x0408 +#define REG_PFDR_FP_LWIT 0x0410 +#define REG_PFDR_CFG 0x0414 +#define REG_SFDR_CFG 0x0500 +#define REG_SFDR_IN_USE 0x0504 +#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04)) +#define REG_WQ_DEF_ENC_WQID 0x0630 +#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04)) +#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04)) +#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04)) +#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04)) +#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */ +#define REG_CM_CFG 0x0800 +#define REG_ECSR 0x0a00 +#define REG_ECIR 0x0a04 +#define REG_EADR 0x0a08 +#define REG_ECIR2 0x0a0c +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) +#define REG_MCR 0x0b00 +#define REG_MCP(n) (0x0b04 + ((n) * 0x04)) +#define REG_MISC_CFG 0x0be0 +#define REG_HID_CFG 0x0bf0 +#define REG_IDLE_STAT 0x0bf4 +#define REG_IP_REV_1 0x0bf8 +#define REG_IP_REV_2 0x0bfc +#define REG_FQD_BARE 0x0c00 +#define REG_PFDR_BARE 0x0c20 +#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */ +#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */ +#define REG_QCSP_BARE 0x0c80 +#define REG_QCSP_BAR 0x0c84 +#define REG_CI_SCHED_CFG 0x0d00 +#define REG_SRCIDR 0x0d04 +#define REG_LIODNR 0x0d08 +#define REG_CI_RLM_AVG 0x0d14 +#define REG_ERR_ISR 0x0e00 +#define REG_ERR_IER 0x0e04 +#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10)) +#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10)) +#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10)) + +/* Assists for QMAN_MCR */ +#define MCR_INIT_PFDR 0x01000000 +#define MCR_get_rslt(v) (u8)((v) >> 24) +#define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0)) +#define MCR_rslt_ok(r) ((r) == 0xf0) +#define MCR_rslt_eaccess(r) ((r) == 0xf8) +#define MCR_rslt_inval(r) ((r) == 0xff) + +/* + * Corenet initiator settings. Stash request queues are 4-deep to match cores + * ability to snarf. Stash priority is 3, other priorities are 2. + */ +#define QM_CI_SCHED_CFG_SRCCIV 4 +#define QM_CI_SCHED_CFG_SRQ_W 3 +#define QM_CI_SCHED_CFG_RW_W 2 +#define QM_CI_SCHED_CFG_BMAN_W 2 +/* write SRCCIV enable */ +#define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31) + +/* Follows WQ_CS_CFG0-5 */ +enum qm_wq_class { + qm_wq_portal = 0, + qm_wq_pool = 1, + qm_wq_fman0 = 2, + qm_wq_fman1 = 3, + qm_wq_caam = 4, + qm_wq_pme = 5, + qm_wq_first = qm_wq_portal, + qm_wq_last = qm_wq_pme +}; + +/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */ +enum qm_memory { + qm_memory_fqd, + qm_memory_pfdr +}; + +/* Used by all error interrupt registers except 'inhibit' */ +#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */ +#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */ +#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */ +#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */ +#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */ +#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */ +#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */ +#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */ +#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */ +#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */ +#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */ +#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */ +#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */ +#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */ +#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */ +#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */ +#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */ +#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */ + +/* QMAN_ECIR valid error bit */ +#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \ + QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \ + QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI) +#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \ + QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \ + QM_EIRQ_IFSI) + +struct qm_ecir { + u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */ +}; + +static bool qm_ecir_is_dcp(const struct qm_ecir *p) +{ + return p->info & BIT(29); +} + +static int qm_ecir_get_pnum(const struct qm_ecir *p) +{ + return (p->info >> 24) & 0x1f; +} + +static int qm_ecir_get_fqid(const struct qm_ecir *p) +{ + return p->info & (BIT(24) - 1); +} + +struct qm_ecir2 { + u32 info; /* ptyp[31], res[10-30], pnum[0-9] */ +}; + +static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p) +{ + return p->info & BIT(31); +} + +static int qm_ecir2_get_pnum(const struct qm_ecir2 *p) +{ + return p->info & (BIT(10) - 1); +} + +struct qm_eadr { + u32 info; /* memid[24-27], eadr[0-11] */ + /* v3: memid[24-28], eadr[0-15] */ +}; + +static int qm_eadr_get_memid(const struct qm_eadr *p) +{ + return (p->info >> 24) & 0xf; +} + +static int qm_eadr_get_eadr(const struct qm_eadr *p) +{ + return p->info & (BIT(12) - 1); +} + +static int qm_eadr_v3_get_memid(const struct qm_eadr *p) +{ + return (p->info >> 24) & 0x1f; +} + +static int qm_eadr_v3_get_eadr(const struct qm_eadr *p) +{ + return p->info & (BIT(16) - 1); +} + +struct qman_hwerr_txt { + u32 mask; + const char *txt; +}; + + +static const struct qman_hwerr_txt qman_hwerr_txts[] = { + { QM_EIRQ_CIDE, "Corenet Initiator Data Error" }, + { QM_EIRQ_CTDE, "Corenet Target Data Error" }, + { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" }, + { QM_EIRQ_PLWI, "PFDR Low Watermark" }, + { QM_EIRQ_MBEI, "Multi-bit ECC Error" }, + { QM_EIRQ_SBEI, "Single-bit ECC Error" }, + { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" }, + { QM_EIRQ_ICVI, "Invalid Command Verb" }, + { QM_EIRQ_IFSI, "Invalid Flow Control State" }, + { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" }, + { QM_EIRQ_IDFI, "Invalid Dequeue FQ" }, + { QM_EIRQ_IDSI, "Invalid Dequeue Source" }, + { QM_EIRQ_IDQI, "Invalid Dequeue Queue" }, + { QM_EIRQ_IECE, "Invalid Enqueue Configuration" }, + { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" }, + { QM_EIRQ_IESI, "Invalid Enqueue State" }, + { QM_EIRQ_IECI, "Invalid Enqueue Channel" }, + { QM_EIRQ_IEQI, "Invalid Enqueue Queue" }, +}; + +struct qman_error_info_mdata { + u16 addr_mask; + u16 bits; + const char *txt; +}; + +static const struct qman_error_info_mdata error_mdata[] = { + { 0x01FF, 24, "FQD cache tag memory 0" }, + { 0x01FF, 24, "FQD cache tag memory 1" }, + { 0x01FF, 24, "FQD cache tag memory 2" }, + { 0x01FF, 24, "FQD cache tag memory 3" }, + { 0x0FFF, 512, "FQD cache memory" }, + { 0x07FF, 128, "SFDR memory" }, + { 0x01FF, 72, "WQ context memory" }, + { 0x00FF, 240, "CGR memory" }, + { 0x00FF, 302, "Internal Order Restoration List memory" }, + { 0x01FF, 256, "SW portal ring memory" }, +}; + +#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI) + +/* + * TODO: unimplemented registers + * + * Keeping a list here of QMan registers I have not yet covered; + * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR, + * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG, + * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12 + */ + +/* Pointer to the start of the QMan's CCSR space */ +static u32 __iomem *qm_ccsr_start; +/* A SDQCR mask comprising all the available/visible pool channels */ +static u32 qm_pools_sdqcr; + +static inline u32 qm_ccsr_in(u32 offset) +{ + return ioread32be(qm_ccsr_start + offset/4); +} + +static inline void qm_ccsr_out(u32 offset, u32 val) +{ + iowrite32be(val, qm_ccsr_start + offset/4); +} + +u32 qm_get_pools_sdqcr(void) +{ + return qm_pools_sdqcr; +} + +enum qm_dc_portal { + qm_dc_portal_fman0 = 0, + qm_dc_portal_fman1 = 1 +}; + +static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd) +{ + DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 || + portal == qm_dc_portal_fman1); + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + qm_ccsr_out(REG_DCP_CFG(portal), + (ed ? 0x1000 : 0) | (sernd & 0x3ff)); + else + qm_ccsr_out(REG_DCP_CFG(portal), + (ed ? 0x100 : 0) | (sernd & 0x1f)); +} + +static void qm_set_wq_scheduling(enum qm_wq_class wq_class, + u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, + u8 csw5, u8 csw6, u8 csw7) +{ + qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) | + ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) | + ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) | + ((csw6 & 0x7) << 4) | (csw7 & 0x7)); +} + +static void qm_set_hid(void) +{ + qm_ccsr_out(REG_HID_CFG, 0); +} + +static void qm_set_corenet_initiator(void) +{ + qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN | + (QM_CI_SCHED_CFG_SRCCIV << 24) | + (QM_CI_SCHED_CFG_SRQ_W << 8) | + (QM_CI_SCHED_CFG_RW_W << 4) | + QM_CI_SCHED_CFG_BMAN_W); +} + +static void qm_get_version(u16 *id, u8 *major, u8 *minor) +{ + u32 v = qm_ccsr_in(REG_IP_REV_1); + *id = (v >> 16); + *major = (v >> 8) & 0xff; + *minor = v & 0xff; +} + +#define PFDR_AR_EN BIT(31) +static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size) +{ + u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; + u32 exp = ilog2(size); + + /* choke if size isn't within range */ + DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) && + is_power_of_2(size)); + /* choke if 'ba' has lower-alignment than 'size' */ + DPAA_ASSERT(!(ba & (size - 1))); + qm_ccsr_out(offset, upper_32_bits(ba)); + qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba)); + qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1)); +} + +static void qm_set_pfdr_threshold(u32 th, u8 k) +{ + qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff); + qm_ccsr_out(REG_PFDR_CFG, k); +} + +static void qm_set_sfdr_threshold(u16 th) +{ + qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff); +} + +static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num) +{ + u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR)); + + DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num); + /* Make sure the command interface is 'idle' */ + if (!MCR_rslt_idle(rslt)) { + dev_crit(dev, "QMAN_MCR isn't idle"); + WARN_ON(1); + } + + /* Write the MCR command params then the verb */ + qm_ccsr_out(REG_MCP(0), pfdr_start); + /* + * TODO: remove this - it's a workaround for a model bug that is + * corrected in more recent versions. We use the workaround until + * everyone has upgraded. + */ + qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16); + dma_wmb(); + qm_ccsr_out(REG_MCR, MCR_INIT_PFDR); + /* Poll for the result */ + do { + rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR)); + } while (!MCR_rslt_idle(rslt)); + if (MCR_rslt_ok(rslt)) + return 0; + if (MCR_rslt_eaccess(rslt)) + return -EACCES; + if (MCR_rslt_inval(rslt)) + return -EINVAL; + dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt); + return -ENODEV; +} + +/* + * Ideally we would use the DMA API to turn rmem->base into a DMA address + * (especially if iommu translations ever get involved). Unfortunately, the + * DMA API currently does not allow mapping anything that is not backed with + * a struct page. + */ +static dma_addr_t fqd_a, pfdr_a; +static size_t fqd_sz, pfdr_sz; + +static int qman_fqd(struct reserved_mem *rmem) +{ + fqd_a = rmem->base; + fqd_sz = rmem->size; + + WARN_ON(!(fqd_a && fqd_sz)); + + return 0; +} +RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd); + +static int qman_pfdr(struct reserved_mem *rmem) +{ + pfdr_a = rmem->base; + pfdr_sz = rmem->size; + + WARN_ON(!(pfdr_a && pfdr_sz)); + + return 0; +} +RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr); + +static unsigned int qm_get_fqid_maxcnt(void) +{ + return fqd_sz / 64; +} + +/* + * Flush this memory range from data cache so that QMAN originated + * transactions for this memory region could be marked non-coherent. + */ +static int zero_priv_mem(struct device *dev, struct device_node *node, + phys_addr_t addr, size_t sz) +{ + /* map as cacheable, non-guarded */ + void __iomem *tmpp = ioremap_prot(addr, sz, 0); + + memset_io(tmpp, 0, sz); + flush_dcache_range((unsigned long)tmpp, + (unsigned long)tmpp + sz); + iounmap(tmpp); + + return 0; +} + +static void log_edata_bits(struct device *dev, u32 bit_count) +{ + u32 i, j, mask = 0xffffffff; + + dev_warn(dev, "ErrInt, EDATA:\n"); + i = bit_count / 32; + if (bit_count % 32) { + i++; + mask = ~(mask << bit_count % 32); + } + j = 16 - i; + dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask); + j++; + for (; j < 16; j++) + dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j))); +} + +static void log_additional_error_info(struct device *dev, u32 isr_val, + u32 ecsr_val) +{ + struct qm_ecir ecir_val; + struct qm_eadr eadr_val; + int memid; + + ecir_val.info = qm_ccsr_in(REG_ECIR); + /* Is portal info valid */ + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { + struct qm_ecir2 ecir2_val; + + ecir2_val.info = qm_ccsr_in(REG_ECIR2); + if (ecsr_val & PORTAL_ECSR_ERR) { + dev_warn(dev, "ErrInt: %s id %d\n", + qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP", + qm_ecir2_get_pnum(&ecir2_val)); + } + if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) + dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n", + qm_ecir_get_fqid(&ecir_val)); + + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { + eadr_val.info = qm_ccsr_in(REG_EADR); + memid = qm_eadr_v3_get_memid(&eadr_val); + dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n", + error_mdata[memid].txt, + error_mdata[memid].addr_mask + & qm_eadr_v3_get_eadr(&eadr_val)); + log_edata_bits(dev, error_mdata[memid].bits); + } + } else { + if (ecsr_val & PORTAL_ECSR_ERR) { + dev_warn(dev, "ErrInt: %s id %d\n", + qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP", + qm_ecir_get_pnum(&ecir_val)); + } + if (ecsr_val & FQID_ECSR_ERR) + dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n", + qm_ecir_get_fqid(&ecir_val)); + + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { + eadr_val.info = qm_ccsr_in(REG_EADR); + memid = qm_eadr_get_memid(&eadr_val); + dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n", + error_mdata[memid].txt, + error_mdata[memid].addr_mask + & qm_eadr_get_eadr(&eadr_val)); + log_edata_bits(dev, error_mdata[memid].bits); + } + } +} + +static irqreturn_t qman_isr(int irq, void *ptr) +{ + u32 isr_val, ier_val, ecsr_val, isr_mask, i; + struct device *dev = ptr; + + ier_val = qm_ccsr_in(REG_ERR_IER); + isr_val = qm_ccsr_in(REG_ERR_ISR); + ecsr_val = qm_ccsr_in(REG_ECSR); + isr_mask = isr_val & ier_val; + + if (!isr_mask) + return IRQ_NONE; + + for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) { + if (qman_hwerr_txts[i].mask & isr_mask) { + dev_err_ratelimited(dev, "ErrInt: %s\n", + qman_hwerr_txts[i].txt); + if (qman_hwerr_txts[i].mask & ecsr_val) { + log_additional_error_info(dev, isr_mask, + ecsr_val); + /* Re-arm error capture registers */ + qm_ccsr_out(REG_ECSR, ecsr_val); + } + if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) { + dev_dbg(dev, "Disabling error 0x%x\n", + qman_hwerr_txts[i].mask); + ier_val &= ~qman_hwerr_txts[i].mask; + qm_ccsr_out(REG_ERR_IER, ier_val); + } + } + } + qm_ccsr_out(REG_ERR_ISR, isr_val); + + return IRQ_HANDLED; +} + +static int qman_init_ccsr(struct device *dev) +{ + int i, err; + + /* FQD memory */ + qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz); + /* PFDR memory */ + qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz); + err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8); + if (err) + return err; + /* thresholds */ + qm_set_pfdr_threshold(512, 64); + qm_set_sfdr_threshold(128); + /* clear stale PEBI bit from interrupt status register */ + qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI); + /* corenet initiator settings */ + qm_set_corenet_initiator(); + /* HID settings */ + qm_set_hid(); + /* Set scheduling weights to defaults */ + for (i = qm_wq_first; i <= qm_wq_last; i++) + qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0); + /* We are not prepared to accept ERNs for hardware enqueues */ + qm_set_dc(qm_dc_portal_fman0, 1, 0); + qm_set_dc(qm_dc_portal_fman1, 1, 0); + return 0; +} + +#define LIO_CFG_LIODN_MASK 0x0fff0000 +void qman_liodn_fixup(u16 channel) +{ + static int done; + static u32 liodn_offset; + u32 before, after; + int idx = channel - QM_CHANNEL_SWPORTAL0; + + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx)); + else + before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx)); + if (!done) { + liodn_offset = before & LIO_CFG_LIODN_MASK; + done = 1; + return; + } + after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset; + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after); + else + qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after); +} + +#define IO_CFG_SDEST_MASK 0x00ff0000 +void qman_set_sdest(u16 channel, unsigned int cpu_idx) +{ + int idx = channel - QM_CHANNEL_SWPORTAL0; + u32 before, after; + + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { + before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx)); + /* Each pair of vcpu share the same SRQ(SDEST) */ + cpu_idx /= 2; + after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); + qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after); + } else { + before = qm_ccsr_in(REG_QCSP_IO_CFG(idx)); + after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); + qm_ccsr_out(REG_QCSP_IO_CFG(idx), after); + } +} + +static int qman_resource_init(struct device *dev) +{ + int pool_chan_num, cgrid_num; + int ret, i; + + switch (qman_ip_rev >> 8) { + case 1: + pool_chan_num = 15; + cgrid_num = 256; + break; + case 2: + pool_chan_num = 3; + cgrid_num = 64; + break; + case 3: + pool_chan_num = 15; + cgrid_num = 256; + break; + default: + return -ENODEV; + } + + ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF, + pool_chan_num, -1); + if (ret) { + dev_err(dev, "Failed to seed pool channels (%d)\n", ret); + return ret; + } + + ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1); + if (ret) { + dev_err(dev, "Failed to seed CGRID range (%d)\n", ret); + return ret; + } + + /* parse pool channels into the SDQCR mask */ + for (i = 0; i < cgrid_num; i++) + qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i); + + ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF, + qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1); + if (ret) { + dev_err(dev, "Failed to seed FQID range (%d)\n", ret); + return ret; + } + + return 0; +} + +static int fsl_qman_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + int ret, err_irq; + u16 id; + u8 major, minor; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n", + node->full_name); + return -ENXIO; + } + qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); + if (!qm_ccsr_start) + return -ENXIO; + + qm_get_version(&id, &major, &minor); + if (major == 1 && minor == 0) { + dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n"); + return -ENODEV; + } else if (major == 1 && minor == 1) + qman_ip_rev = QMAN_REV11; + else if (major == 1 && minor == 2) + qman_ip_rev = QMAN_REV12; + else if (major == 2 && minor == 0) + qman_ip_rev = QMAN_REV20; + else if (major == 3 && minor == 0) + qman_ip_rev = QMAN_REV30; + else if (major == 3 && minor == 1) + qman_ip_rev = QMAN_REV31; + else { + dev_err(dev, "Unknown QMan version\n"); + return -ENODEV; + } + + if ((qman_ip_rev & 0xff00) >= QMAN_REV30) + qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3; + + ret = zero_priv_mem(dev, node, fqd_a, fqd_sz); + WARN_ON(ret); + if (ret) + return -ENODEV; + + ret = qman_init_ccsr(dev); + if (ret) { + dev_err(dev, "CCSR setup failed\n"); + return ret; + } + + err_irq = platform_get_irq(pdev, 0); + if (err_irq <= 0) { + dev_info(dev, "Can't get %s property 'interrupts'\n", + node->full_name); + return -ENODEV; + } + ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err", + dev); + if (ret) { + dev_err(dev, "devm_request_irq() failed %d for '%s'\n", + ret, node->full_name); + return ret; + } + + /* + * Write-to-clear any stale bits, (eg. starvation being asserted prior + * to resource allocation during driver init). + */ + qm_ccsr_out(REG_ERR_ISR, 0xffffffff); + /* Enable Error Interrupts */ + qm_ccsr_out(REG_ERR_IER, 0xffffffff); + + qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc"); + if (IS_ERR(qm_fqalloc)) { + ret = PTR_ERR(qm_fqalloc); + dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret); + return ret; + } + + qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc"); + if (IS_ERR(qm_qpalloc)) { + ret = PTR_ERR(qm_qpalloc); + dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret); + return ret; + } + + qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc"); + if (IS_ERR(qm_cgralloc)) { + ret = PTR_ERR(qm_cgralloc); + dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret); + return ret; + } + + ret = qman_resource_init(dev); + if (ret) + return ret; + + ret = qman_alloc_fq_table(qm_get_fqid_maxcnt()); + if (ret) + return ret; + + ret = qman_wq_alloc(); + if (ret) + return ret; + + return 0; +} + +static const struct of_device_id fsl_qman_ids[] = { + { + .compatible = "fsl,qman", + }, + {} +}; + +static struct platform_driver fsl_qman_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = fsl_qman_ids, + .suppress_bind_attrs = true, + }, + .probe = fsl_qman_probe, +}; + +builtin_platform_driver(fsl_qman_driver); diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c new file mode 100644 index 000000000000..148614388fca --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -0,0 +1,355 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_priv.h" + +/* Enable portal interupts (as opposed to polling mode) */ +#define CONFIG_FSL_DPA_PIRQ_SLOW 1 +#define CONFIG_FSL_DPA_PIRQ_FAST 1 + +static struct cpumask portal_cpus; +/* protect qman global registers and global data shared among portals */ +static DEFINE_SPINLOCK(qman_lock); + +static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) +{ +#ifdef CONFIG_FSL_PAMU + struct device *dev = pcfg->dev; + int window_count = 1; + struct iommu_domain_geometry geom_attr; + struct pamu_stash_attribute stash_attr; + int ret; + + pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); + if (!pcfg->iommu_domain) { + dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); + goto no_iommu; + } + geom_attr.aperture_start = 0; + geom_attr.aperture_end = + ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1; + geom_attr.force_aperture = true; + ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY, + &geom_attr); + if (ret < 0) { + dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, + ret); + goto out_domain_free; + } + ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS, + &window_count); + if (ret < 0) { + dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, + ret); + goto out_domain_free; + } + stash_attr.cpu = cpu; + stash_attr.cache = PAMU_ATTR_CACHE_L1; + ret = iommu_domain_set_attr(pcfg->iommu_domain, + DOMAIN_ATTR_FSL_PAMU_STASH, + &stash_attr); + if (ret < 0) { + dev_err(dev, "%s(): iommu_domain_set_attr() = %d", + __func__, ret); + goto out_domain_free; + } + ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36, + IOMMU_READ | IOMMU_WRITE); + if (ret < 0) { + dev_err(dev, "%s(): iommu_domain_window_enable() = %d", + __func__, ret); + goto out_domain_free; + } + ret = iommu_attach_device(pcfg->iommu_domain, dev); + if (ret < 0) { + dev_err(dev, "%s(): iommu_device_attach() = %d", __func__, + ret); + goto out_domain_free; + } + ret = iommu_domain_set_attr(pcfg->iommu_domain, + DOMAIN_ATTR_FSL_PAMU_ENABLE, + &window_count); + if (ret < 0) { + dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, + ret); + goto out_detach_device; + } + +no_iommu: +#endif + qman_set_sdest(pcfg->channel, cpu); + + return; + +#ifdef CONFIG_FSL_PAMU +out_detach_device: + iommu_detach_device(pcfg->iommu_domain, NULL); +out_domain_free: + iommu_domain_free(pcfg->iommu_domain); + pcfg->iommu_domain = NULL; +#endif +} + +static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) +{ + struct qman_portal *p; + u32 irq_sources = 0; + + /* We need the same LIODN offset for all portals */ + qman_liodn_fixup(pcfg->channel); + + pcfg->iommu_domain = NULL; + portal_set_cpu(pcfg, pcfg->cpu); + + p = qman_create_affine_portal(pcfg, NULL); + if (!p) { + dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n", + __func__, pcfg->cpu); + return NULL; + } + + /* Determine what should be interrupt-vs-poll driven */ +#ifdef CONFIG_FSL_DPA_PIRQ_SLOW + irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI | + QM_PIRQ_CSCI; +#endif +#ifdef CONFIG_FSL_DPA_PIRQ_FAST + irq_sources |= QM_PIRQ_DQRI; +#endif + qman_p_irqsource_add(p, irq_sources); + + spin_lock(&qman_lock); + if (cpumask_equal(&portal_cpus, cpu_possible_mask)) { + /* all assigned portals are initialized now */ + qman_init_cgr_all(); + } + spin_unlock(&qman_lock); + + dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); + + return p; +} + +static void qman_portal_update_sdest(const struct qm_portal_config *pcfg, + unsigned int cpu) +{ +#ifdef CONFIG_FSL_PAMU /* TODO */ + struct pamu_stash_attribute stash_attr; + int ret; + + if (pcfg->iommu_domain) { + stash_attr.cpu = cpu; + stash_attr.cache = PAMU_ATTR_CACHE_L1; + ret = iommu_domain_set_attr(pcfg->iommu_domain, + DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr); + if (ret < 0) { + dev_err(pcfg->dev, + "Failed to update pamu stash setting\n"); + return; + } + } +#endif + qman_set_sdest(pcfg->channel, cpu); +} + +static void qman_offline_cpu(unsigned int cpu) +{ + struct qman_portal *p; + const struct qm_portal_config *pcfg; + + p = affine_portals[cpu]; + if (p) { + pcfg = qman_get_qm_portal_config(p); + if (pcfg) { + irq_set_affinity(pcfg->irq, cpumask_of(0)); + qman_portal_update_sdest(pcfg, 0); + } + } +} + +static void qman_online_cpu(unsigned int cpu) +{ + struct qman_portal *p; + const struct qm_portal_config *pcfg; + + p = affine_portals[cpu]; + if (p) { + pcfg = qman_get_qm_portal_config(p); + if (pcfg) { + irq_set_affinity(pcfg->irq, cpumask_of(cpu)); + qman_portal_update_sdest(pcfg, cpu); + } + } +} + +static int qman_hotplug_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + qman_online_cpu(cpu); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + qman_offline_cpu(cpu); + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block qman_hotplug_cpu_notifier = { + .notifier_call = qman_hotplug_cpu_callback, +}; + +static int qman_portal_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct qm_portal_config *pcfg; + struct resource *addr_phys[2]; + const u32 *channel; + void __iomem *va; + int irq, len, cpu; + + pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); + if (!pcfg) + return -ENOMEM; + + pcfg->dev = dev; + + addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM, + DPAA_PORTAL_CE); + if (!addr_phys[0]) { + dev_err(dev, "Can't get %s property 'reg::CE'\n", + node->full_name); + return -ENXIO; + } + + addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, + DPAA_PORTAL_CI); + if (!addr_phys[1]) { + dev_err(dev, "Can't get %s property 'reg::CI'\n", + node->full_name); + return -ENXIO; + } + + channel = of_get_property(node, "cell-index", &len); + if (!channel || (len != 4)) { + dev_err(dev, "Can't get %s property 'cell-index'\n", + node->full_name); + return -ENXIO; + } + pcfg->channel = *channel; + pcfg->cpu = -1; + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev, "Can't get %s IRQ\n", node->full_name); + return -ENXIO; + } + pcfg->irq = irq; + + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); + if (!va) + goto err_ioremap1; + + pcfg->addr_virt[DPAA_PORTAL_CE] = va; + + va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), + _PAGE_GUARDED | _PAGE_NO_CACHE); + if (!va) + goto err_ioremap2; + + pcfg->addr_virt[DPAA_PORTAL_CI] = va; + + pcfg->pools = qm_get_pools_sdqcr(); + + spin_lock(&qman_lock); + cpu = cpumask_next_zero(-1, &portal_cpus); + if (cpu >= nr_cpu_ids) { + /* unassigned portal, skip init */ + spin_unlock(&qman_lock); + return 0; + } + + cpumask_set_cpu(cpu, &portal_cpus); + spin_unlock(&qman_lock); + pcfg->cpu = cpu; + + if (!init_pcfg(pcfg)) + goto err_ioremap2; + + /* clear irq affinity if assigned cpu is offline */ + if (!cpu_online(cpu)) + qman_offline_cpu(cpu); + + return 0; + +err_ioremap2: + iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); +err_ioremap1: + dev_err(dev, "ioremap failed\n"); + return -ENXIO; +} + +static const struct of_device_id qman_portal_ids[] = { + { + .compatible = "fsl,qman-portal", + }, + {} +}; +MODULE_DEVICE_TABLE(of, qman_portal_ids); + +static struct platform_driver qman_portal_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = qman_portal_ids, + }, + .probe = qman_portal_probe, +}; + +static int __init qman_portal_driver_register(struct platform_driver *drv) +{ + int ret; + + ret = platform_driver_register(drv); + if (ret < 0) + return ret; + + register_hotcpu_notifier(&qman_hotplug_cpu_notifier); + + return 0; +} + +module_driver(qman_portal_driver, + qman_portal_driver_register, platform_driver_unregister); diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h new file mode 100644 index 000000000000..5cf821e623a9 --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_priv.h @@ -0,0 +1,371 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "dpaa_sys.h" + +#include <soc/fsl/qman.h> +#include <linux/iommu.h> + +#if defined(CONFIG_FSL_PAMU) +#include <asm/fsl_pamu_stash.h> +#endif + +struct qm_mcr_querywq { + u8 verb; + u8 result; + u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ + u8 __reserved[28]; + u32 wq_len[8]; +} __packed; + +static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq) +{ + return wq->channel_wq >> 3; +} + +struct __qm_mcr_querycongestion { + u32 state[8]; +}; + +/* "Query Congestion Group State" */ +struct qm_mcr_querycongestion { + u8 verb; + u8 result; + u8 __reserved[30]; + /* Access this struct using qman_cgrs_get() */ + struct __qm_mcr_querycongestion state; +} __packed; + +/* "Query CGR" */ +struct qm_mcr_querycgr { + u8 verb; + u8 result; + u16 __reserved1; + struct __qm_mc_cgr cgr; /* CGR fields */ + u8 __reserved2[6]; + u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */ + u32 i_bcnt_lo; /* low 32-bits of 40-bit */ + u8 __reserved3[3]; + u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */ + u32 a_bcnt_lo; /* low 32-bits of 40-bit */ + u32 cscn_targ_swp[4]; +} __packed; + +static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) +{ + return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo; +} +static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) +{ + return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo; +} + +/* "Query FQ Non-Programmable Fields" */ +struct qm_mcc_queryfq_np { + u8 _ncw_verb; + u8 __reserved1[3]; + u32 fqid; /* 24-bit */ + u8 __reserved2[56]; +} __packed; + +struct qm_mcr_queryfq_np { + u8 verb; + u8 result; + u8 __reserved1; + u8 state; /* QM_MCR_NP_STATE_*** */ + u32 fqd_link; /* 24-bit, _res2[24-31] */ + u16 odp_seq; /* 14-bit, _res3[14-15] */ + u16 orp_nesn; /* 14-bit, _res4[14-15] */ + u16 orp_ea_hseq; /* 15-bit, _res5[15] */ + u16 orp_ea_tseq; /* 15-bit, _res6[15] */ + u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */ + u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */ + u32 pfdr_hptr; /* 24-bit, _res9[24-31] */ + u32 pfdr_tptr; /* 24-bit, _res10[24-31] */ + u8 __reserved2[5]; + u8 is; /* 1-bit, _res12[1-7] */ + u16 ics_surp; + u32 byte_cnt; + u32 frm_cnt; /* 24-bit, _res13[24-31] */ + u32 __reserved3; + u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */ + u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */ + u16 __reserved4; + u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */ + u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ + u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ +} __packed; + +#define QM_MCR_NP_STATE_FE 0x10 +#define QM_MCR_NP_STATE_R 0x08 +#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */ +#define QM_MCR_NP_STATE_OOS 0x00 +#define QM_MCR_NP_STATE_RETIRED 0x01 +#define QM_MCR_NP_STATE_TEN_SCHED 0x02 +#define QM_MCR_NP_STATE_TRU_SCHED 0x03 +#define QM_MCR_NP_STATE_PARKED 0x04 +#define QM_MCR_NP_STATE_ACTIVE 0x05 +#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */ +#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */ +#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */ +#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */ +#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */ + +enum qm_mcr_queryfq_np_masks { + qm_mcr_fqd_link_mask = BIT(24)-1, + qm_mcr_odp_seq_mask = BIT(14)-1, + qm_mcr_orp_nesn_mask = BIT(14)-1, + qm_mcr_orp_ea_hseq_mask = BIT(15)-1, + qm_mcr_orp_ea_tseq_mask = BIT(15)-1, + qm_mcr_orp_ea_hptr_mask = BIT(24)-1, + qm_mcr_orp_ea_tptr_mask = BIT(24)-1, + qm_mcr_pfdr_hptr_mask = BIT(24)-1, + qm_mcr_pfdr_tptr_mask = BIT(24)-1, + qm_mcr_is_mask = BIT(1)-1, + qm_mcr_frm_cnt_mask = BIT(24)-1, +}; +#define qm_mcr_np_get(np, field) \ + ((np)->field & (qm_mcr_##field##_mask)) + +/* Congestion Groups */ + +/* + * This wrapper represents a bit-array for the state of the 256 QMan congestion + * groups. Is also used as a *mask* for congestion groups, eg. so we ignore + * those that don't concern us. We harness the structure and accessor details + * already used in the management command to query congestion groups. + */ +#define CGR_BITS_PER_WORD 5 +#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD) +#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f)) +#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3) + +struct qman_cgrs { + struct __qm_mcr_querycongestion q; +}; + +static inline void qman_cgrs_init(struct qman_cgrs *c) +{ + memset(c, 0, sizeof(*c)); +} + +static inline void qman_cgrs_fill(struct qman_cgrs *c) +{ + memset(c, 0xff, sizeof(*c)); +} + +static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr) +{ + return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr); +} + +static inline void qman_cgrs_cp(struct qman_cgrs *dest, + const struct qman_cgrs *src) +{ + *dest = *src; +} + +static inline void qman_cgrs_and(struct qman_cgrs *dest, + const struct qman_cgrs *a, const struct qman_cgrs *b) +{ + int ret; + u32 *_d = dest->q.state; + const u32 *_a = a->q.state; + const u32 *_b = b->q.state; + + for (ret = 0; ret < 8; ret++) + *_d++ = *_a++ & *_b++; +} + +static inline void qman_cgrs_xor(struct qman_cgrs *dest, + const struct qman_cgrs *a, const struct qman_cgrs *b) +{ + int ret; + u32 *_d = dest->q.state; + const u32 *_a = a->q.state; + const u32 *_b = b->q.state; + + for (ret = 0; ret < 8; ret++) + *_d++ = *_a++ ^ *_b++; +} + +void qman_init_cgr_all(void); + +struct qm_portal_config { + /* + * Corenet portal addresses; + * [0]==cache-enabled, [1]==cache-inhibited. + */ + void __iomem *addr_virt[2]; + struct device *dev; + struct iommu_domain *iommu_domain; + /* Allow these to be joined in lists */ + struct list_head list; + /* User-visible portal configuration settings */ + /* portal is affined to this cpu */ + int cpu; + /* portal interrupt line */ + int irq; + /* + * the portal's dedicated channel id, used initialising + * frame queues to target this portal when scheduled + */ + u16 channel; + /* + * mask of pool channels this portal has dequeue access to + * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) + */ + u32 pools; +}; + +/* Revision info (for errata and feature handling) */ +#define QMAN_REV11 0x0101 +#define QMAN_REV12 0x0102 +#define QMAN_REV20 0x0200 +#define QMAN_REV30 0x0300 +#define QMAN_REV31 0x0301 +extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ + +#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */ +extern struct gen_pool *qm_fqalloc; /* FQID allocator */ +extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */ +extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */ +u32 qm_get_pools_sdqcr(void); + +int qman_wq_alloc(void); +void qman_liodn_fixup(u16 channel); +void qman_set_sdest(u16 channel, unsigned int cpu_idx); + +struct qman_portal *qman_create_affine_portal( + const struct qm_portal_config *config, + const struct qman_cgrs *cgrs); +const struct qm_portal_config *qman_destroy_affine_portal(void); + +/* + * qman_query_fq - Queries FQD fields (via h/w query command) + * @fq: the frame queue object to be queried + * @fqd: storage for the queried FQD fields + */ +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd); + +/* + * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use + * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use + * FQID(n) to fill in the frame queue ID. + */ +#define QM_VDQCR_PRECEDENCE_VDQCR 0x0 +#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000 +#define QM_VDQCR_EXACT 0x40000000 +#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000 +#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24) +#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f) +#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0) + +#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */ +#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */ +#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */ + +/* + * qman_volatile_dequeue - Issue a volatile dequeue command + * @fq: the frame queue object to dequeue from + * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options + * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set() + * + * Attempts to lock access to the portal's VDQCR volatile dequeue functionality. + * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and + * the VDQCR is already in use, otherwise returns non-zero for failure. If + * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once + * the VDQCR command has finished executing (ie. once the callback for the last + * DQRR entry resulting from the VDQCR command has been called). If not using + * the FINISH flag, completion can be determined either by detecting the + * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits + * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting + * for the QMAN_FQ_STATE_VDQCR bit to disappear. + */ +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); + +int qman_alloc_fq_table(u32 num_fqids); + +/* QMan s/w corenet portal, low-level i/face */ + +/* + * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one + * dequeue TYPE. Choose TOKEN (8-bit). + * If SOURCE == CHANNELS, + * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n). + * You can choose DEDICATED_PRECEDENCE if the portal channel should have + * priority. + * If SOURCE == SPECIFICWQ, + * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the + * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the + * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the + * same value. + */ +#define QM_SDQCR_SOURCE_CHANNELS 0x0 +#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000 +#define QM_SDQCR_COUNT_EXACT1 0x0 +#define QM_SDQCR_COUNT_UPTO3 0x20000000 +#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000 +#define QM_SDQCR_TYPE_MASK 0x03000000 +#define QM_SDQCR_TYPE_NULL 0x0 +#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000 +#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000 +#define QM_SDQCR_TYPE_ACTIVE 0x03000000 +#define QM_SDQCR_TOKEN_MASK 0x00ff0000 +#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16) +#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff) +#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000 +#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7 +#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000 +#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4) +#define QM_SDQCR_SPECIFICWQ_WQ(n) (n) + +/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */ +#define QM_VDQCR_FQID_MASK 0x00ffffff +#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK) + +/* + * Used by all portal interrupt registers except 'inhibit' + * Channels with frame availability + */ +#define QM_PIRQ_DQAVAIL 0x0000ffff + +/* The DQAVAIL interrupt fields break down into these bits; */ +#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */ +#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */ +#define QM_DQAVAIL_MASK 0xffff +/* This mask contains all the "irqsource" bits visible to API users */ +#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) + +extern struct qman_portal *affine_portals[NR_CPUS]; +const struct qm_portal_config *qman_get_qm_portal_config( + struct qman_portal *portal); diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c new file mode 100644 index 000000000000..18f7f0202fa7 --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_test.c @@ -0,0 +1,62 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_test.h" + +MODULE_AUTHOR("Geoff Thorpe"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("QMan testing"); + +static int test_init(void) +{ + int loop = 1; + int err = 0; + + while (loop--) { +#ifdef CONFIG_FSL_QMAN_TEST_STASH + err = qman_test_stash(); + if (err) + break; +#endif +#ifdef CONFIG_FSL_QMAN_TEST_API + err = qman_test_api(); + if (err) + break; +#endif + } + return err; +} + +static void test_exit(void) +{ +} + +module_init(test_init); +module_exit(test_exit); diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h new file mode 100644 index 000000000000..d5f8cb2260dc --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_test.h @@ -0,0 +1,36 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_priv.h" + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +int qman_test_stash(void); +int qman_test_api(void); diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c new file mode 100644 index 000000000000..6880ff17f45e --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_test_api.c @@ -0,0 +1,252 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_test.h" + +#define CGR_ID 27 +#define POOL_ID 2 +#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID +#define NUM_ENQUEUES 10 +#define NUM_PARTIAL 4 +#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \ + QM_SDQCR_TYPE_PRIO_QOS | \ + QM_SDQCR_TOKEN_SET(0x98) | \ + QM_SDQCR_CHANNELS_DEDICATED | \ + QM_SDQCR_CHANNELS_POOL(POOL_ID)) +#define PORTAL_OPAQUE ((void *)0xf00dbeef) +#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH) + +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *, + struct qman_fq *, + const struct qm_dqrr_entry *); +static void cb_ern(struct qman_portal *, struct qman_fq *, + const union qm_mr_entry *); +static void cb_fqs(struct qman_portal *, struct qman_fq *, + const union qm_mr_entry *); + +static struct qm_fd fd, fd_dq; +static struct qman_fq fq_base = { + .cb.dqrr = cb_dqrr, + .cb.ern = cb_ern, + .cb.fqs = cb_fqs +}; +static DECLARE_WAIT_QUEUE_HEAD(waitqueue); +static int retire_complete, sdqcr_complete; + +/* Helpers for initialising and "incrementing" a frame descriptor */ +static void fd_init(struct qm_fd *fd) +{ + qm_fd_addr_set64(fd, 0xabdeadbeefLLU); + qm_fd_set_contig_big(fd, 0x0000ffff); + fd->cmd = 0xfeedf00d; +} + +static void fd_inc(struct qm_fd *fd) +{ + u64 t = qm_fd_addr_get64(fd); + int z = t >> 40; + unsigned int len, off; + enum qm_fd_format fmt; + + t <<= 1; + if (z) + t |= 1; + qm_fd_addr_set64(fd, t); + + fmt = qm_fd_get_format(fd); + off = qm_fd_get_offset(fd); + len = qm_fd_get_length(fd); + len--; + qm_fd_set_param(fd, fmt, off, len); + + fd->cmd++; +} + +/* The only part of the 'fd' we can't memcmp() is the ppid */ +static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) +{ + int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; + + if (!r) { + enum qm_fd_format fmt_a, fmt_b; + + fmt_a = qm_fd_get_format(a); + fmt_b = qm_fd_get_format(b); + r = fmt_a - fmt_b; + } + if (!r) + r = a->cfg - b->cfg; + if (!r) + r = a->cmd - b->cmd; + return r; +} + +/* test */ +static int do_enqueues(struct qman_fq *fq) +{ + unsigned int loop; + int err = 0; + + for (loop = 0; loop < NUM_ENQUEUES; loop++) { + if (qman_enqueue(fq, &fd)) { + pr_crit("qman_enqueue() failed\n"); + err = -EIO; + } + fd_inc(&fd); + } + + return err; +} + +int qman_test_api(void) +{ + unsigned int flags, frmcnt; + int err; + struct qman_fq *fq = &fq_base; + + pr_info("%s(): Starting\n", __func__); + fd_init(&fd); + fd_init(&fd_dq); + + /* Initialise (parked) FQ */ + err = qman_create_fq(0, FQ_FLAGS, fq); + if (err) { + pr_crit("qman_create_fq() failed\n"); + goto failed; + } + err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL); + if (err) { + pr_crit("qman_init_fq() failed\n"); + goto failed; + } + /* Do enqueues + VDQCR, twice. (Parked FQ) */ + err = do_enqueues(fq); + if (err) + goto failed; + pr_info("VDQCR (till-empty);\n"); + frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY; + err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); + if (err) { + pr_crit("qman_volatile_dequeue() failed\n"); + goto failed; + } + err = do_enqueues(fq); + if (err) + goto failed; + pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); + frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL); + err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); + if (err) { + pr_crit("qman_volatile_dequeue() failed\n"); + goto failed; + } + pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, + NUM_ENQUEUES); + frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL); + err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); + if (err) { + pr_err("qman_volatile_dequeue() failed\n"); + goto failed; + } + + err = do_enqueues(fq); + if (err) + goto failed; + pr_info("scheduled dequeue (till-empty)\n"); + err = qman_schedule_fq(fq); + if (err) { + pr_crit("qman_schedule_fq() failed\n"); + goto failed; + } + wait_event(waitqueue, sdqcr_complete); + + /* Retire and OOS the FQ */ + err = qman_retire_fq(fq, &flags); + if (err < 0) { + pr_crit("qman_retire_fq() failed\n"); + goto failed; + } + wait_event(waitqueue, retire_complete); + if (flags & QMAN_FQ_STATE_BLOCKOOS) { + err = -EIO; + pr_crit("leaking frames\n"); + goto failed; + } + err = qman_oos_fq(fq); + if (err) { + pr_crit("qman_oos_fq() failed\n"); + goto failed; + } + qman_destroy_fq(fq); + pr_info("%s(): Finished\n", __func__); + return 0; + +failed: + WARN_ON(1); + return err; +} + +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) { + pr_err("BADNESS: dequeued frame doesn't match;\n"); + return qman_cb_dqrr_consume; + } + fd_inc(&fd_dq); + if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { + sdqcr_complete = 1; + wake_up(&waitqueue); + } + return qman_cb_dqrr_consume; +} + +static void cb_ern(struct qman_portal *p, struct qman_fq *fq, + const union qm_mr_entry *msg) +{ + pr_crit("cb_ern() unimplemented"); + WARN_ON(1); +} + +static void cb_fqs(struct qman_portal *p, struct qman_fq *fq, + const union qm_mr_entry *msg) +{ + u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK); + + if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) { + pr_crit("unexpected FQS message"); + WARN_ON(1); + return; + } + pr_info("Retirement message received\n"); + retire_complete = 1; + wake_up(&waitqueue); +} diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c new file mode 100644 index 000000000000..43cf66ba42f5 --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_test_stash.c @@ -0,0 +1,617 @@ +/* Copyright 2009 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_test.h" + +#include <linux/dma-mapping.h> +#include <linux/delay.h> + +/* + * Algorithm: + * + * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates + * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The + * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will + * shuttle a "hot potato" frame around them such that every forwarding action + * moves it from one cpu to another. (The use of more than one handler per cpu + * is to allow enough handlers/FQs to truly test the significance of caching - + * ie. when cache-expiries are occurring.) + * + * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the + * first and last words of the frame data will undergo a transformation step on + * each forwarding action. To achieve this, each handler will be assigned a + * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is + * received by a handler, the mixer of the expected sender is XOR'd into all + * words of the entire frame, which is then validated against the original + * values. Then, before forwarding, the entire frame is XOR'd with the mixer of + * the current handler. Apart from validating that the frame is taking the + * expected path, this also provides some quasi-realistic overheads to each + * forwarding action - dereferencing *all* the frame data, computation, and + * conditional branching. There is a "special" handler designated to act as the + * instigator of the test by creating an enqueuing the "hot potato" frame, and + * to determine when the test has completed by counting HP_LOOPS iterations. + * + * Init phases: + * + * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them + * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU + * handlers and link-list them (but do no other handler setup). + * + * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each + * hp_cpu's 'iterator' to point to its first handler. With each loop, + * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler + * and advance the iterator for the next loop. This includes a final fixup, + * which connects the last handler to the first (and which is why phase 2 + * and 3 are separate). + * + * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each + * hp_cpu's 'iterator' to point to its first handler. With each loop, + * initialise FQ objects and advance the iterator for the next loop. + * Moreover, do this initialisation on the cpu it applies to so that Rx FQ + * initialisation targets the correct cpu. + */ + +/* + * helper to run something on all cpus (can't use on_each_cpu(), as that invokes + * the fn from irq context, which is too restrictive). + */ +struct bstrap { + int (*fn)(void); + atomic_t started; +}; +static int bstrap_fn(void *bs) +{ + struct bstrap *bstrap = bs; + int err; + + atomic_inc(&bstrap->started); + err = bstrap->fn(); + if (err) + return err; + while (!kthread_should_stop()) + msleep(20); + return 0; +} +static int on_all_cpus(int (*fn)(void)) +{ + int cpu; + + for_each_cpu(cpu, cpu_online_mask) { + struct bstrap bstrap = { + .fn = fn, + .started = ATOMIC_INIT(0) + }; + struct task_struct *k = kthread_create(bstrap_fn, &bstrap, + "hotpotato%d", cpu); + int ret; + + if (IS_ERR(k)) + return -ENOMEM; + kthread_bind(k, cpu); + wake_up_process(k); + /* + * If we call kthread_stop() before the "wake up" has had an + * effect, then the thread may exit with -EINTR without ever + * running the function. So poll until it's started before + * requesting it to stop. + */ + while (!atomic_read(&bstrap.started)) + msleep(20); + ret = kthread_stop(k); + if (ret) + return ret; + } + return 0; +} + +struct hp_handler { + + /* The following data is stashed when 'rx' is dequeued; */ + /* -------------- */ + /* The Rx FQ, dequeues of which will stash the entire hp_handler */ + struct qman_fq rx; + /* The Tx FQ we should forward to */ + struct qman_fq tx; + /* The value we XOR post-dequeue, prior to validating */ + u32 rx_mixer; + /* The value we XOR pre-enqueue, after validating */ + u32 tx_mixer; + /* what the hotpotato address should be on dequeue */ + dma_addr_t addr; + u32 *frame_ptr; + + /* The following data isn't (necessarily) stashed on dequeue; */ + /* -------------- */ + u32 fqid_rx, fqid_tx; + /* list node for linking us into 'hp_cpu' */ + struct list_head node; + /* Just to check ... */ + unsigned int processor_id; +} ____cacheline_aligned; + +struct hp_cpu { + /* identify the cpu we run on; */ + unsigned int processor_id; + /* root node for the per-cpu list of handlers */ + struct list_head handlers; + /* list node for linking us into 'hp_cpu_list' */ + struct list_head node; + /* + * when repeatedly scanning 'hp_list', each time linking the n'th + * handlers together, this is used as per-cpu iterator state + */ + struct hp_handler *iterator; +}; + +/* Each cpu has one of these */ +static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); + +/* links together the hp_cpu structs, in first-come first-serve order. */ +static LIST_HEAD(hp_cpu_list); +static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); + +static unsigned int hp_cpu_list_length; + +/* the "special" handler, that starts and terminates the test. */ +static struct hp_handler *special_handler; +static int loop_counter; + +/* handlers are allocated out of this, so they're properly aligned. */ +static struct kmem_cache *hp_handler_slab; + +/* this is the frame data */ +static void *__frame_ptr; +static u32 *frame_ptr; +static dma_addr_t frame_dma; + +/* the main function waits on this */ +static DECLARE_WAIT_QUEUE_HEAD(queue); + +#define HP_PER_CPU 2 +#define HP_LOOPS 8 +/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */ +#define HP_NUM_WORDS 80 +/* First word of the LFSR-based frame data */ +#define HP_FIRST_WORD 0xabbaf00d + +static inline u32 do_lfsr(u32 prev) +{ + return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u); +} + +static int allocate_frame_data(void) +{ + u32 lfsr = HP_FIRST_WORD; + int loop; + struct platform_device *pdev = platform_device_alloc("foobar", -1); + + if (!pdev) { + pr_crit("platform_device_alloc() failed"); + return -EIO; + } + if (platform_device_add(pdev)) { + pr_crit("platform_device_add() failed"); + return -EIO; + } + __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); + if (!__frame_ptr) + return -ENOMEM; + + frame_ptr = PTR_ALIGN(__frame_ptr, 64); + for (loop = 0; loop < HP_NUM_WORDS; loop++) { + frame_ptr[loop] = lfsr; + lfsr = do_lfsr(lfsr); + } + frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, + DMA_BIDIRECTIONAL); + platform_device_del(pdev); + platform_device_put(pdev); + return 0; +} + +static void deallocate_frame_data(void) +{ + kfree(__frame_ptr); +} + +static inline int process_frame_data(struct hp_handler *handler, + const struct qm_fd *fd) +{ + u32 *p = handler->frame_ptr; + u32 lfsr = HP_FIRST_WORD; + int loop; + + if (qm_fd_addr_get64(fd) != handler->addr) { + pr_crit("bad frame address"); + return -EIO; + } + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { + *p ^= handler->rx_mixer; + if (*p != lfsr) { + pr_crit("corrupt frame data"); + return -EIO; + } + *p ^= handler->tx_mixer; + lfsr = do_lfsr(lfsr); + } + return 0; +} + +static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr) +{ + struct hp_handler *handler = (struct hp_handler *)fq; + + if (process_frame_data(handler, &dqrr->fd)) { + WARN_ON(1); + goto skip; + } + if (qman_enqueue(&handler->tx, &dqrr->fd)) { + pr_crit("qman_enqueue() failed"); + WARN_ON(1); + } +skip: + return qman_cb_dqrr_consume; +} + +static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr) +{ + struct hp_handler *handler = (struct hp_handler *)fq; + + process_frame_data(handler, &dqrr->fd); + if (++loop_counter < HP_LOOPS) { + if (qman_enqueue(&handler->tx, &dqrr->fd)) { + pr_crit("qman_enqueue() failed"); + WARN_ON(1); + goto skip; + } + } else { + pr_info("Received final (%dth) frame\n", loop_counter); + wake_up(&queue); + } +skip: + return qman_cb_dqrr_consume; +} + +static int create_per_cpu_handlers(void) +{ + struct hp_handler *handler; + int loop; + struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); + + hp_cpu->processor_id = smp_processor_id(); + spin_lock(&hp_lock); + list_add_tail(&hp_cpu->node, &hp_cpu_list); + hp_cpu_list_length++; + spin_unlock(&hp_lock); + INIT_LIST_HEAD(&hp_cpu->handlers); + for (loop = 0; loop < HP_PER_CPU; loop++) { + handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL); + if (!handler) { + pr_crit("kmem_cache_alloc() failed"); + WARN_ON(1); + return -EIO; + } + handler->processor_id = hp_cpu->processor_id; + handler->addr = frame_dma; + handler->frame_ptr = frame_ptr; + list_add_tail(&handler->node, &hp_cpu->handlers); + } + return 0; +} + +static int destroy_per_cpu_handlers(void) +{ + struct list_head *loop, *tmp; + struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); + + spin_lock(&hp_lock); + list_del(&hp_cpu->node); + spin_unlock(&hp_lock); + list_for_each_safe(loop, tmp, &hp_cpu->handlers) { + u32 flags = 0; + struct hp_handler *handler = list_entry(loop, struct hp_handler, + node); + if (qman_retire_fq(&handler->rx, &flags) || + (flags & QMAN_FQ_STATE_BLOCKOOS)) { + pr_crit("qman_retire_fq(rx) failed, flags: %x", flags); + WARN_ON(1); + return -EIO; + } + if (qman_oos_fq(&handler->rx)) { + pr_crit("qman_oos_fq(rx) failed"); + WARN_ON(1); + return -EIO; + } + qman_destroy_fq(&handler->rx); + qman_destroy_fq(&handler->tx); + qman_release_fqid(handler->fqid_rx); + list_del(&handler->node); + kmem_cache_free(hp_handler_slab, handler); + } + return 0; +} + +static inline u8 num_cachelines(u32 offset) +{ + u8 res = (offset + (L1_CACHE_BYTES - 1)) + / (L1_CACHE_BYTES); + if (res > 3) + return 3; + return res; +} +#define STASH_DATA_CL \ + num_cachelines(HP_NUM_WORDS * 4) +#define STASH_CTX_CL \ + num_cachelines(offsetof(struct hp_handler, fqid_rx)) + +static int init_handler(void *h) +{ + struct qm_mcc_initfq opts; + struct hp_handler *handler = h; + int err; + + if (handler->processor_id != smp_processor_id()) { + err = -EIO; + goto failed; + } + /* Set up rx */ + memset(&handler->rx, 0, sizeof(handler->rx)); + if (handler == special_handler) + handler->rx.cb.dqrr = special_dqrr; + else + handler->rx.cb.dqrr = normal_dqrr; + err = qman_create_fq(handler->fqid_rx, 0, &handler->rx); + if (err) { + pr_crit("qman_create_fq(rx) failed"); + goto failed; + } + memset(&opts, 0, sizeof(opts)); + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; + qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL); + err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | + QMAN_INITFQ_FLAG_LOCAL, &opts); + if (err) { + pr_crit("qman_init_fq(rx) failed"); + goto failed; + } + /* Set up tx */ + memset(&handler->tx, 0, sizeof(handler->tx)); + err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY, + &handler->tx); + if (err) { + pr_crit("qman_create_fq(tx) failed"); + goto failed; + } + + return 0; +failed: + return err; +} + +static void init_handler_cb(void *h) +{ + if (init_handler(h)) + WARN_ON(1); +} + +static int init_phase2(void) +{ + int loop; + u32 fqid = 0; + u32 lfsr = 0xdeadbeef; + struct hp_cpu *hp_cpu; + struct hp_handler *handler; + + for (loop = 0; loop < HP_PER_CPU; loop++) { + list_for_each_entry(hp_cpu, &hp_cpu_list, node) { + int err; + + if (!loop) + hp_cpu->iterator = list_first_entry( + &hp_cpu->handlers, + struct hp_handler, node); + else + hp_cpu->iterator = list_entry( + hp_cpu->iterator->node.next, + struct hp_handler, node); + /* Rx FQID is the previous handler's Tx FQID */ + hp_cpu->iterator->fqid_rx = fqid; + /* Allocate new FQID for Tx */ + err = qman_alloc_fqid(&fqid); + if (err) { + pr_crit("qman_alloc_fqid() failed"); + return err; + } + hp_cpu->iterator->fqid_tx = fqid; + /* Rx mixer is the previous handler's Tx mixer */ + hp_cpu->iterator->rx_mixer = lfsr; + /* Get new mixer for Tx */ + lfsr = do_lfsr(lfsr); + hp_cpu->iterator->tx_mixer = lfsr; + } + } + /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */ + hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node); + handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node); + if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef) + return 1; + handler->fqid_rx = fqid; + handler->rx_mixer = lfsr; + /* and tag it as our "special" handler */ + special_handler = handler; + return 0; +} + +static int init_phase3(void) +{ + int loop, err; + struct hp_cpu *hp_cpu; + + for (loop = 0; loop < HP_PER_CPU; loop++) { + list_for_each_entry(hp_cpu, &hp_cpu_list, node) { + if (!loop) + hp_cpu->iterator = list_first_entry( + &hp_cpu->handlers, + struct hp_handler, node); + else + hp_cpu->iterator = list_entry( + hp_cpu->iterator->node.next, + struct hp_handler, node); + preempt_disable(); + if (hp_cpu->processor_id == smp_processor_id()) { + err = init_handler(hp_cpu->iterator); + if (err) + return err; + } else { + smp_call_function_single(hp_cpu->processor_id, + init_handler_cb, hp_cpu->iterator, 1); + } + preempt_enable(); + } + } + return 0; +} + +static int send_first_frame(void *ignore) +{ + u32 *p = special_handler->frame_ptr; + u32 lfsr = HP_FIRST_WORD; + int loop, err; + struct qm_fd fd; + + if (special_handler->processor_id != smp_processor_id()) { + err = -EIO; + goto failed; + } + memset(&fd, 0, sizeof(fd)); + qm_fd_addr_set64(&fd, special_handler->addr); + qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4); + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { + if (*p != lfsr) { + err = -EIO; + pr_crit("corrupt frame data"); + goto failed; + } + *p ^= special_handler->tx_mixer; + lfsr = do_lfsr(lfsr); + } + pr_info("Sending first frame\n"); + err = qman_enqueue(&special_handler->tx, &fd); + if (err) { + pr_crit("qman_enqueue() failed"); + goto failed; + } + + return 0; +failed: + return err; +} + +static void send_first_frame_cb(void *ignore) +{ + if (send_first_frame(NULL)) + WARN_ON(1); +} + +int qman_test_stash(void) +{ + int err; + + if (cpumask_weight(cpu_online_mask) < 2) { + pr_info("%s(): skip - only 1 CPU\n", __func__); + return 0; + } + + pr_info("%s(): Starting\n", __func__); + + hp_cpu_list_length = 0; + loop_counter = 0; + hp_handler_slab = kmem_cache_create("hp_handler_slab", + sizeof(struct hp_handler), L1_CACHE_BYTES, + SLAB_HWCACHE_ALIGN, NULL); + if (!hp_handler_slab) { + err = -EIO; + pr_crit("kmem_cache_create() failed"); + goto failed; + } + + err = allocate_frame_data(); + if (err) + goto failed; + + /* Init phase 1 */ + pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU); + if (on_all_cpus(create_per_cpu_handlers)) { + err = -EIO; + pr_crit("on_each_cpu() failed"); + goto failed; + } + pr_info("Number of cpus: %d, total of %d handlers\n", + hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU); + + err = init_phase2(); + if (err) + goto failed; + + err = init_phase3(); + if (err) + goto failed; + + preempt_disable(); + if (special_handler->processor_id == smp_processor_id()) { + err = send_first_frame(NULL); + if (err) + goto failed; + } else { + smp_call_function_single(special_handler->processor_id, + send_first_frame_cb, NULL, 1); + } + preempt_enable(); + + wait_event(queue, loop_counter == HP_LOOPS); + deallocate_frame_data(); + if (on_all_cpus(destroy_per_cpu_handlers)) { + err = -EIO; + pr_crit("on_each_cpu() failed"); + goto failed; + } + kmem_cache_destroy(hp_handler_slab); + pr_info("%s(): Finished\n", __func__); + + return 0; +failed: + WARN_ON(1); + return err; +} diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c index 333eb2215a57..0aaf429f31d5 100644 --- a/drivers/soc/fsl/qe/gpio.c +++ b/drivers/soc/fsl/qe/gpio.c @@ -41,7 +41,8 @@ struct qe_gpio_chip { static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) { - struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc); + struct qe_gpio_chip *qe_gc = + container_of(mm_gc, struct qe_gpio_chip, mm_gc); struct qe_pio_regs __iomem *regs = mm_gc->regs; qe_gc->cpdata = in_be32(®s->cpdata); diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 7026507e6f1d..2707a827261b 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -69,8 +69,8 @@ static phys_addr_t qebase = -1; phys_addr_t get_qe_base(void) { struct device_node *qe; - int size; - const u32 *prop; + int ret; + struct resource res; if (qebase != -1) return qebase; @@ -82,9 +82,9 @@ phys_addr_t get_qe_base(void) return qebase; } - prop = of_get_property(qe, "reg", &size); - if (prop && size >= sizeof(*prop)) - qebase = of_translate_address(qe, prop); + ret = of_address_to_resource(qe, 0, &res); + if (!ret) + qebase = res.start; of_node_put(qe); return qebase; diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c index 41eff805a904..104e68d9b84f 100644 --- a/drivers/soc/fsl/qe/qe_common.c +++ b/drivers/soc/fsl/qe/qe_common.c @@ -70,6 +70,11 @@ int cpm_muram_init(void) } muram_pool = gen_pool_create(0, -1); + if (!muram_pool) { + pr_err("Cannot allocate memory pool for CPM/QE muram"); + ret = -ENOMEM; + goto out_muram; + } muram_pbase = of_translate_address(np, zero); if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { pr_err("Cannot translate zero through CPM muram node"); @@ -116,6 +121,9 @@ static unsigned long cpm_muram_alloc_common(unsigned long size, struct muram_block *entry; unsigned long start; + if (!muram_pool && cpm_muram_init()) + goto out2; + start = gen_pool_alloc_algo(muram_pool, size, algo, data); if (!start) goto out2; diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c index 5e48b1470178..a1048b44e6b9 100644 --- a/drivers/soc/fsl/qe/qe_tdm.c +++ b/drivers/soc/fsl/qe/qe_tdm.c @@ -99,7 +99,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm, utdm->tdm_port = val; ut_info->uf_info.tdm_num = utdm->tdm_port; - if (of_get_property(np, "fsl,tdm-internal-loopback", NULL)) + if (of_property_read_bool(np, "fsl,tdm-internal-loopback")) utdm->tdm_mode = TDM_INTERNAL_LOOPBACK; else utdm->tdm_mode = TDM_NORMAL; @@ -167,7 +167,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm, } if (siram_init_flag == 0) { - memset_io(utdm->siram, 0, res->end - res->start + 1); + memset_io(utdm->siram, 0, resource_size(res)); siram_init_flag = 1; } diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 2d702ca6556f..a13541bdc726 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -186,7 +186,7 @@ config HISI_THERMAL config IMX_THERMAL tristate "Temperature sensor driver for Freescale i.MX SoCs" - depends on CPU_THERMAL + depends on (ARCH_MXC && CPU_THERMAL) || COMPILE_TEST depends on MFD_SYSCON depends on OF help @@ -195,6 +195,26 @@ config IMX_THERMAL cpufreq is used as the cooling device to throttle CPUs when the passive trip is crossed. +config MAX77620_THERMAL + tristate "Temperature sensor driver for Maxim MAX77620 PMIC" + depends on MFD_MAX77620 + depends on OF + help + Support for die junction temperature warning alarm for Maxim + Semiconductor PMIC MAX77620 device. Device generates two alarm + interrupts when PMIC die temperature cross the threshold of + 120 degC and 140 degC. + +config QORIQ_THERMAL + tristate "QorIQ Thermal Monitoring Unit" + depends on THERMAL_OF + depends on HAS_IOMEM + help + Support for Thermal Monitoring Unit (TMU) found on QorIQ platforms. + It supports one critical trip point and one passive trip point. The + cpufreq is used as the cooling device to throttle CPUs when the + passive trip is crossed. + config SPEAR_THERMAL tristate "SPEAr thermal sensor driver" depends on PLAT_SPEAR || COMPILE_TEST @@ -332,6 +352,16 @@ menu "ACPI INT340X thermal drivers" source drivers/thermal/int340x_thermal/Kconfig endmenu +config INTEL_BXT_PMIC_THERMAL + tristate "Intel Broxton PMIC thermal driver" + depends on X86 && INTEL_SOC_PMIC && REGMAP + help + Select this driver for Intel Broxton PMIC with ADC channels monitoring + system temperature measurements and alerts. + This driver is used for monitoring the ADC channels of PMIC and handles + the alert trip point interrupts and notifies the thermal framework with + the trip point and temperature details of the zone. + config INTEL_PCH_THERMAL tristate "Intel PCH Thermal Reporting Driver" depends on X86 && PCI @@ -399,4 +429,9 @@ config GENERIC_ADC_THERMAL to this driver. This driver reports the temperature by reading ADC channel and converts it to temperature based on lookup table. +menu "Qualcomm thermal drivers" +depends on (ARCH_QCOM && OF) || COMPILE_TEST +source "drivers/thermal/qcom/Kconfig" +endmenu + endif diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 10b07c14f8a9..c92eb22a41ff 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -37,6 +37,8 @@ obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o obj-$(CONFIG_TANGO_THERMAL) += tango_thermal.o obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o +obj-$(CONFIG_MAX77620_THERMAL) += max77620_thermal.o +obj-$(CONFIG_QORIQ_THERMAL) += qoriq_thermal.o obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o @@ -45,8 +47,10 @@ obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/ obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/ +obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o obj-$(CONFIG_ST_THERMAL) += st/ +obj-$(CONFIG_QCOM_TSENS) += qcom/ obj-$(CONFIG_TEGRA_SOCTHERM) += tegra/ obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index a32b41783b77..9ce0e9eef923 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -74,7 +74,7 @@ struct power_table { * cpufreq frequencies. * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device. * @node: list_head to link all cpufreq_cooling_device together. - * @last_load: load measured by the latest call to cpufreq_get_actual_power() + * @last_load: load measured by the latest call to cpufreq_get_requested_power() * @time_in_idle: previous reading of the absolute time that this cpu was idle * @time_in_idle_timestamp: wall time of the last invocation of * get_cpu_idle_time_us() diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c index 652acd8fbe48..e776cea80cfc 100644 --- a/drivers/thermal/db8500_thermal.c +++ b/drivers/thermal/db8500_thermal.c @@ -306,7 +306,7 @@ static void db8500_thermal_work(struct work_struct *work) if (cur_mode == THERMAL_DEVICE_DISABLED) return; - thermal_zone_device_update(pzone->therm_dev); + thermal_zone_device_update(pzone->therm_dev, THERMAL_EVENT_UNSPECIFIED); dev_dbg(&pzone->therm_dev->device, "thermal work finished.\n"); } diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index 01f0015f80dc..81631b110e17 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -312,7 +312,7 @@ static int devfreq_cooling_state2power(struct thermal_cooling_device *cdev, unsigned long freq; u32 static_power; - if (state < 0 || state >= dfc->freq_table_size) + if (state >= dfc->freq_table_size) return -EINVAL; freq = dfc->freq_table[state]; diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c index bb118a152cbb..fc5e5057f0de 100644 --- a/drivers/thermal/gov_bang_bang.c +++ b/drivers/thermal/gov_bang_bang.c @@ -65,7 +65,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) if (instance->target == 0 && tz->temperature >= trip_temp) instance->target = 1; else if (instance->target == 1 && - tz->temperature < trip_temp - trip_hyst) + tz->temperature <= trip_temp - trip_hyst) instance->target = 0; dev_dbg(&instance->cdev->device, "target=%d\n", diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index 97fad8f51e1c..f6429666a1cf 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -237,7 +237,8 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev) if (!data->sensors[i].tzd) continue; - thermal_zone_device_update(data->sensors[i].tzd); + thermal_zone_device_update(data->sensors[i].tzd, + THERMAL_EVENT_UNSPECIFIED); } return IRQ_HANDLED; diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index e473548b5d28..06912f0602b7 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -246,7 +246,7 @@ static int imx_set_mode(struct thermal_zone_device *tz, } data->mode = mode; - thermal_zone_device_update(tz); + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); return 0; } @@ -457,7 +457,7 @@ static irqreturn_t imx_thermal_alarm_irq_thread(int irq, void *dev) dev_dbg(&data->tz->device, "THERMAL ALARM: T > %d\n", data->alarm_temp / 1000); - thermal_zone_device_update(data->tz); + thermal_zone_device_update(data->tz, THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c index 69df3d960303..8e90b3151a42 100644 --- a/drivers/thermal/int340x_thermal/int3402_thermal.c +++ b/drivers/thermal/int340x_thermal/int3402_thermal.c @@ -35,7 +35,8 @@ static void int3402_notify(acpi_handle handle, u32 event, void *data) case INT3402_PERF_CHANGED_EVENT: break; case INT3402_THERMAL_EVENT: - int340x_thermal_zone_device_update(priv->int340x_zone); + int340x_thermal_zone_device_update(priv->int340x_zone, + THERMAL_TRIP_VIOLATED); break; default: break; diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c index 50a7a08e3a15..c4890c9437eb 100644 --- a/drivers/thermal/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/int340x_thermal/int3403_thermal.c @@ -25,6 +25,7 @@ #define INT3403_TYPE_CHARGER 0x0B #define INT3403_TYPE_BATTERY 0x0C #define INT3403_PERF_CHANGED_EVENT 0x80 +#define INT3403_PERF_TRIP_POINT_CHANGED 0x81 #define INT3403_THERMAL_EVENT 0x90 /* Preserved structure for future expandbility */ @@ -72,7 +73,13 @@ static void int3403_notify(acpi_handle handle, case INT3403_PERF_CHANGED_EVENT: break; case INT3403_THERMAL_EVENT: - int340x_thermal_zone_device_update(obj->int340x_zone); + int340x_thermal_zone_device_update(obj->int340x_zone, + THERMAL_TRIP_VIOLATED); + break; + case INT3403_PERF_TRIP_POINT_CHANGED: + int340x_thermal_read_trips(obj->int340x_zone); + int340x_thermal_zone_device_update(obj->int340x_zone, + THERMAL_TRIP_CHANGED); break; default: dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c index b9b2666aa94c..145a5c53ff5c 100644 --- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c +++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c @@ -177,6 +177,42 @@ static int int340x_thermal_get_trip_config(acpi_handle handle, char *name, return 0; } +int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone) +{ + int trip_cnt = int34x_zone->aux_trip_nr; + int i; + + int34x_zone->crt_trip_id = -1; + if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT", + &int34x_zone->crt_temp)) + int34x_zone->crt_trip_id = trip_cnt++; + + int34x_zone->hot_trip_id = -1; + if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_HOT", + &int34x_zone->hot_temp)) + int34x_zone->hot_trip_id = trip_cnt++; + + int34x_zone->psv_trip_id = -1; + if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_PSV", + &int34x_zone->psv_temp)) + int34x_zone->psv_trip_id = trip_cnt++; + + for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) { + char name[5] = { '_', 'A', 'C', '0' + i, '\0' }; + + if (int340x_thermal_get_trip_config(int34x_zone->adev->handle, + name, + &int34x_zone->act_trips[i].temp)) + break; + + int34x_zone->act_trips[i].id = trip_cnt++; + int34x_zone->act_trips[i].valid = true; + } + + return trip_cnt; +} +EXPORT_SYMBOL_GPL(int340x_thermal_read_trips); + static struct thermal_zone_params int340x_thermal_params = { .governor_name = "user_space", .no_hwmon = true, @@ -188,7 +224,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, struct int34x_thermal_zone *int34x_thermal_zone; acpi_status status; unsigned long long trip_cnt; - int trip_mask = 0, i; + int trip_mask = 0; int ret; int34x_thermal_zone = kzalloc(sizeof(*int34x_thermal_zone), @@ -214,28 +250,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, int34x_thermal_zone->aux_trip_nr = trip_cnt; } - int34x_thermal_zone->crt_trip_id = -1; - if (!int340x_thermal_get_trip_config(adev->handle, "_CRT", - &int34x_thermal_zone->crt_temp)) - int34x_thermal_zone->crt_trip_id = trip_cnt++; - int34x_thermal_zone->hot_trip_id = -1; - if (!int340x_thermal_get_trip_config(adev->handle, "_HOT", - &int34x_thermal_zone->hot_temp)) - int34x_thermal_zone->hot_trip_id = trip_cnt++; - int34x_thermal_zone->psv_trip_id = -1; - if (!int340x_thermal_get_trip_config(adev->handle, "_PSV", - &int34x_thermal_zone->psv_temp)) - int34x_thermal_zone->psv_trip_id = trip_cnt++; - for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) { - char name[5] = { '_', 'A', 'C', '0' + i, '\0' }; + trip_cnt = int340x_thermal_read_trips(int34x_thermal_zone); - if (int340x_thermal_get_trip_config(adev->handle, name, - &int34x_thermal_zone->act_trips[i].temp)) - break; - - int34x_thermal_zone->act_trips[i].id = trip_cnt++; - int34x_thermal_zone->act_trips[i].valid = true; - } int34x_thermal_zone->lpat_table = acpi_lpat_get_conversion_table( adev->handle); diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h index aaadf724ff2e..5f3ba4775c5c 100644 --- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h +++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h @@ -46,6 +46,7 @@ struct int34x_thermal_zone { struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *, struct thermal_zone_device_ops *override_ops); void int340x_thermal_zone_remove(struct int34x_thermal_zone *); +int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone); static inline void int340x_thermal_zone_set_priv_data( struct int34x_thermal_zone *tzone, void *priv_data) @@ -60,9 +61,10 @@ static inline void *int340x_thermal_zone_get_priv_data( } static inline void int340x_thermal_zone_device_update( - struct int34x_thermal_zone *tzone) + struct int34x_thermal_zone *tzone, + enum thermal_notify_event event) { - thermal_zone_device_update(tzone->zone); + thermal_zone_device_update(tzone->zone, event); } #endif diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c index 42c1ac057bad..ff3b36f339e3 100644 --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c @@ -258,7 +258,8 @@ static void proc_thermal_notify(acpi_handle handle, u32 event, void *data) switch (event) { case PROC_POWER_CAPABILITY_CHANGED: proc_thermal_read_ppcc(proc_priv); - int340x_thermal_zone_device_update(proc_priv->int340x_zone); + int340x_thermal_zone_device_update(proc_priv->int340x_zone, + THERMAL_DEVICE_POWER_CAPABILITY_CHANGED); break; default: dev_err(proc_priv->dev, "Unsupported event [0x%x]\n", event); diff --git a/drivers/thermal/intel_bxt_pmic_thermal.c b/drivers/thermal/intel_bxt_pmic_thermal.c new file mode 100644 index 000000000000..0f19a393ddd8 --- /dev/null +++ b/drivers/thermal/intel_bxt_pmic_thermal.c @@ -0,0 +1,300 @@ +/* + * Intel Broxton PMIC thermal driver + * + * Copyright (C) 2016 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/thermal.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/mfd/intel_soc_pmic.h> + +#define BXTWC_THRM0IRQ 0x4E04 +#define BXTWC_THRM1IRQ 0x4E05 +#define BXTWC_THRM2IRQ 0x4E06 +#define BXTWC_MTHRM0IRQ 0x4E12 +#define BXTWC_MTHRM1IRQ 0x4E13 +#define BXTWC_MTHRM2IRQ 0x4E14 +#define BXTWC_STHRM0IRQ 0x4F19 +#define BXTWC_STHRM1IRQ 0x4F1A +#define BXTWC_STHRM2IRQ 0x4F1B + +struct trip_config_map { + u16 irq_reg; + u16 irq_en; + u16 evt_stat; + u8 irq_mask; + u8 irq_en_mask; + u8 evt_mask; + u8 trip_num; +}; + +struct thermal_irq_map { + char handle[20]; + int num_trips; + const struct trip_config_map *trip_config; +}; + +struct pmic_thermal_data { + const struct thermal_irq_map *maps; + int num_maps; +}; + +static const struct trip_config_map bxtwc_str0_trip_config[] = { + { + .irq_reg = BXTWC_THRM0IRQ, + .irq_mask = 0x01, + .irq_en = BXTWC_MTHRM0IRQ, + .irq_en_mask = 0x01, + .evt_stat = BXTWC_STHRM0IRQ, + .evt_mask = 0x01, + .trip_num = 0 + }, + { + .irq_reg = BXTWC_THRM0IRQ, + .irq_mask = 0x10, + .irq_en = BXTWC_MTHRM0IRQ, + .irq_en_mask = 0x10, + .evt_stat = BXTWC_STHRM0IRQ, + .evt_mask = 0x10, + .trip_num = 1 + } +}; + +static const struct trip_config_map bxtwc_str1_trip_config[] = { + { + .irq_reg = BXTWC_THRM0IRQ, + .irq_mask = 0x02, + .irq_en = BXTWC_MTHRM0IRQ, + .irq_en_mask = 0x02, + .evt_stat = BXTWC_STHRM0IRQ, + .evt_mask = 0x02, + .trip_num = 0 + }, + { + .irq_reg = BXTWC_THRM0IRQ, + .irq_mask = 0x20, + .irq_en = BXTWC_MTHRM0IRQ, + .irq_en_mask = 0x20, + .evt_stat = BXTWC_STHRM0IRQ, + .evt_mask = 0x20, + .trip_num = 1 + }, +}; + +static const struct trip_config_map bxtwc_str2_trip_config[] = { + { + .irq_reg = BXTWC_THRM0IRQ, + .irq_mask = 0x04, + .irq_en = BXTWC_MTHRM0IRQ, + .irq_en_mask = 0x04, + .evt_stat = BXTWC_STHRM0IRQ, + .evt_mask = 0x04, + .trip_num = 0 + }, + { + .irq_reg = BXTWC_THRM0IRQ, + .irq_mask = 0x40, + .irq_en = BXTWC_MTHRM0IRQ, + .irq_en_mask = 0x40, + .evt_stat = BXTWC_STHRM0IRQ, + .evt_mask = 0x40, + .trip_num = 1 + }, +}; + +static const struct trip_config_map bxtwc_str3_trip_config[] = { + { + .irq_reg = BXTWC_THRM2IRQ, + .irq_mask = 0x10, + .irq_en = BXTWC_MTHRM2IRQ, + .irq_en_mask = 0x10, + .evt_stat = BXTWC_STHRM2IRQ, + .evt_mask = 0x10, + .trip_num = 0 + }, +}; + +static const struct thermal_irq_map bxtwc_thermal_irq_map[] = { + { + .handle = "STR0", + .trip_config = bxtwc_str0_trip_config, + .num_trips = ARRAY_SIZE(bxtwc_str0_trip_config), + }, + { + .handle = "STR1", + .trip_config = bxtwc_str1_trip_config, + .num_trips = ARRAY_SIZE(bxtwc_str1_trip_config), + }, + { + .handle = "STR2", + .trip_config = bxtwc_str2_trip_config, + .num_trips = ARRAY_SIZE(bxtwc_str2_trip_config), + }, + { + .handle = "STR3", + .trip_config = bxtwc_str3_trip_config, + .num_trips = ARRAY_SIZE(bxtwc_str3_trip_config), + }, +}; + +static const struct pmic_thermal_data bxtwc_thermal_data = { + .maps = bxtwc_thermal_irq_map, + .num_maps = ARRAY_SIZE(bxtwc_thermal_irq_map), +}; + +static irqreturn_t pmic_thermal_irq_handler(int irq, void *data) +{ + struct platform_device *pdev = data; + struct thermal_zone_device *tzd; + struct pmic_thermal_data *td; + struct intel_soc_pmic *pmic; + struct regmap *regmap; + u8 reg_val, mask, irq_stat, trip; + u16 reg, evt_stat_reg; + int i, j, ret; + + pmic = dev_get_drvdata(pdev->dev.parent); + regmap = pmic->regmap; + td = (struct pmic_thermal_data *) + platform_get_device_id(pdev)->driver_data; + + /* Resolve thermal irqs */ + for (i = 0; i < td->num_maps; i++) { + for (j = 0; j < td->maps[i].num_trips; j++) { + reg = td->maps[i].trip_config[j].irq_reg; + mask = td->maps[i].trip_config[j].irq_mask; + /* + * Read the irq register to resolve whether the + * interrupt was triggered for this sensor + */ + if (regmap_read(regmap, reg, &ret)) + return IRQ_HANDLED; + + reg_val = (u8)ret; + irq_stat = ((u8)ret & mask); + + if (!irq_stat) + continue; + + /* + * Read the status register to find out what + * event occurred i.e a high or a low + */ + evt_stat_reg = td->maps[i].trip_config[j].evt_stat; + if (regmap_read(regmap, evt_stat_reg, &ret)) + return IRQ_HANDLED; + + trip = td->maps[i].trip_config[j].trip_num; + tzd = thermal_zone_get_zone_by_name(td->maps[i].handle); + if (!IS_ERR(tzd)) + thermal_zone_device_update(tzd, + THERMAL_EVENT_UNSPECIFIED); + + /* Clear the appropriate irq */ + regmap_write(regmap, reg, reg_val & mask); + } + } + + return IRQ_HANDLED; +} + +static int pmic_thermal_probe(struct platform_device *pdev) +{ + struct regmap_irq_chip_data *regmap_irq_chip; + struct pmic_thermal_data *thermal_data; + int ret, irq, virq, i, j, pmic_irq_count; + struct intel_soc_pmic *pmic; + struct regmap *regmap; + struct device *dev; + u16 reg; + u8 mask; + + dev = &pdev->dev; + pmic = dev_get_drvdata(pdev->dev.parent); + if (!pmic) { + dev_err(dev, "Failed to get struct intel_soc_pmic pointer\n"); + return -ENODEV; + } + + thermal_data = (struct pmic_thermal_data *) + platform_get_device_id(pdev)->driver_data; + if (!thermal_data) { + dev_err(dev, "No thermal data initialized!!\n"); + return -ENODEV; + } + + regmap = pmic->regmap; + regmap_irq_chip = pmic->irq_chip_data_level2; + + pmic_irq_count = 0; + while ((irq = platform_get_irq(pdev, pmic_irq_count)) != -ENXIO) { + virq = regmap_irq_get_virq(regmap_irq_chip, irq); + if (virq < 0) { + dev_err(dev, "failed to get virq by irq %d\n", irq); + return virq; + } + + ret = devm_request_threaded_irq(&pdev->dev, virq, + NULL, pmic_thermal_irq_handler, + IRQF_ONESHOT, "pmic_thermal", pdev); + + if (ret) { + dev_err(dev, "request irq(%d) failed: %d\n", virq, ret); + return ret; + } + pmic_irq_count++; + } + + /* Enable thermal interrupts */ + for (i = 0; i < thermal_data->num_maps; i++) { + for (j = 0; j < thermal_data->maps[i].num_trips; j++) { + reg = thermal_data->maps[i].trip_config[j].irq_en; + mask = thermal_data->maps[i].trip_config[j].irq_en_mask; + ret = regmap_update_bits(regmap, reg, mask, 0x00); + if (ret) + return ret; + } + } + + return 0; +} + +static const struct platform_device_id pmic_thermal_id_table[] = { + { + .name = "bxt_wcove_thermal", + .driver_data = (kernel_ulong_t)&bxtwc_thermal_data, + }, + {}, +}; + +static struct platform_driver pmic_thermal_driver = { + .probe = pmic_thermal_probe, + .driver = { + .name = "pmic_thermal", + }, + .id_table = pmic_thermal_id_table, +}; + +MODULE_DEVICE_TABLE(platform, pmic_thermal_id_table); +module_platform_driver(pmic_thermal_driver); + +MODULE_AUTHOR("Yegnesh S Iyer <yegnesh.s.iyer@intel.com>"); +MODULE_DESCRIPTION("Intel Broxton PMIC Thermal Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/intel_soc_dts_iosf.c b/drivers/thermal/intel_soc_dts_iosf.c index f72e1db3216f..e0813dfaa278 100644 --- a/drivers/thermal/intel_soc_dts_iosf.c +++ b/drivers/thermal/intel_soc_dts_iosf.c @@ -391,7 +391,8 @@ void intel_soc_dts_iosf_interrupt_handler(struct intel_soc_dts_sensors *sensors) for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) { pr_debug("TZD update for zone %d\n", i); - thermal_zone_device_update(sensors->soc_dts[i].tzone); + thermal_zone_device_update(sensors->soc_dts[i].tzone, + THERMAL_EVENT_UNSPECIFIED); } } else spin_unlock_irqrestore(&sensors->intr_notify_lock, flags); diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c new file mode 100644 index 000000000000..83905ff46e40 --- /dev/null +++ b/drivers/thermal/max77620_thermal.c @@ -0,0 +1,166 @@ +/* + * Junction temperature thermal driver for Maxim Max77620. + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Author: Laxman Dewangan <ldewangan@nvidia.com> + * Mallikarjun Kasoju <mkasoju@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/mfd/max77620.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/thermal.h> + +#define MAX77620_NORMAL_OPERATING_TEMP 100000 +#define MAX77620_TJALARM1_TEMP 120000 +#define MAX77620_TJALARM2_TEMP 140000 + +struct max77620_therm_info { + struct device *dev; + struct regmap *rmap; + struct thermal_zone_device *tz_device; + int irq_tjalarm1; + int irq_tjalarm2; +}; + +/** + * max77620_thermal_read_temp: Read PMIC die temperatue. + * @data: Device specific data. + * temp: Temperature in millidegrees Celsius + * + * The actual temperature of PMIC die is not available from PMIC. + * PMIC only tells the status if it has crossed or not the threshold level + * of 120degC or 140degC. + * If threshold has not been crossed then assume die temperature as 100degC + * else 120degC or 140deG based on the PMIC die temp threshold status. + * + * Return 0 on success otherwise error number to show reason of failure. + */ + +static int max77620_thermal_read_temp(void *data, int *temp) +{ + struct max77620_therm_info *mtherm = data; + unsigned int val; + int ret; + + ret = regmap_read(mtherm->rmap, MAX77620_REG_STATLBT, &val); + if (ret < 0) { + dev_err(mtherm->dev, "Failed to read STATLBT: %d\n", ret); + return ret; + } + + if (val & MAX77620_IRQ_TJALRM2_MASK) + *temp = MAX77620_TJALARM2_TEMP; + else if (val & MAX77620_IRQ_TJALRM1_MASK) + *temp = MAX77620_TJALARM1_TEMP; + else + *temp = MAX77620_NORMAL_OPERATING_TEMP; + + return 0; +} + +static const struct thermal_zone_of_device_ops max77620_thermal_ops = { + .get_temp = max77620_thermal_read_temp, +}; + +static irqreturn_t max77620_thermal_irq(int irq, void *data) +{ + struct max77620_therm_info *mtherm = data; + + if (irq == mtherm->irq_tjalarm1) + dev_warn(mtherm->dev, "Junction Temp Alarm1(120C) occurred\n"); + else if (irq == mtherm->irq_tjalarm2) + dev_crit(mtherm->dev, "Junction Temp Alarm2(140C) occurred\n"); + + thermal_zone_device_update(mtherm->tz_device, + THERMAL_EVENT_UNSPECIFIED); + + return IRQ_HANDLED; +} + +static int max77620_thermal_probe(struct platform_device *pdev) +{ + struct max77620_therm_info *mtherm; + int ret; + + mtherm = devm_kzalloc(&pdev->dev, sizeof(*mtherm), GFP_KERNEL); + if (!mtherm) + return -ENOMEM; + + mtherm->irq_tjalarm1 = platform_get_irq(pdev, 0); + mtherm->irq_tjalarm2 = platform_get_irq(pdev, 1); + if ((mtherm->irq_tjalarm1 < 0) || (mtherm->irq_tjalarm2 < 0)) { + dev_err(&pdev->dev, "Alarm irq number not available\n"); + return -EINVAL; + } + + pdev->dev.of_node = pdev->dev.parent->of_node; + + mtherm->dev = &pdev->dev; + mtherm->rmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!mtherm->rmap) { + dev_err(&pdev->dev, "Failed to get parent regmap\n"); + return -ENODEV; + } + + mtherm->tz_device = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, + mtherm, &max77620_thermal_ops); + if (IS_ERR(mtherm->tz_device)) { + ret = PTR_ERR(mtherm->tz_device); + dev_err(&pdev->dev, "Failed to register thermal zone: %d\n", + ret); + return ret; + } + + ret = devm_request_threaded_irq(&pdev->dev, mtherm->irq_tjalarm1, NULL, + max77620_thermal_irq, + IRQF_ONESHOT | IRQF_SHARED, + dev_name(&pdev->dev), mtherm); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to request irq1: %d\n", ret); + return ret; + } + + ret = devm_request_threaded_irq(&pdev->dev, mtherm->irq_tjalarm2, NULL, + max77620_thermal_irq, + IRQF_ONESHOT | IRQF_SHARED, + dev_name(&pdev->dev), mtherm); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to request irq2: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, mtherm); + + return 0; +} + +static struct platform_device_id max77620_thermal_devtype[] = { + { .name = "max77620-thermal", }, + {}, +}; + +static struct platform_driver max77620_thermal_driver = { + .driver = { + .name = "max77620-thermal", + }, + .probe = max77620_thermal_probe, + .id_table = max77620_thermal_devtype, +}; + +module_platform_driver(max77620_thermal_driver); + +MODULE_DESCRIPTION("Max77620 Junction temperature Thermal driver"); +MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); +MODULE_AUTHOR("Mallikarjun Kasoju <mkasoju@nvidia.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index 262ab0a2266f..34169c32d495 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -2,6 +2,7 @@ * Copyright (c) 2015 MediaTek Inc. * Author: Hanyi Wu <hanyi.wu@mediatek.com> * Sascha Hauer <s.hauer@pengutronix.de> + * Dawei Chien <dawei.chien@mediatek.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -21,6 +22,7 @@ #include <linux/nvmem-consumer.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> @@ -88,6 +90,7 @@ #define TEMP_ADCVALIDMASK_VALID_HIGH BIT(5) #define TEMP_ADCVALIDMASK_VALID_POS(bit) (bit) +/* MT8173 thermal sensors */ #define MT8173_TS1 0 #define MT8173_TS2 1 #define MT8173_TS3 2 @@ -106,7 +109,12 @@ /* The number of sensing points per bank */ #define MT8173_NUM_SENSORS_PER_ZONE 4 -/* Layout of the fuses providing the calibration data */ +/* + * Layout of the fuses providing the calibration data + * These macros could be used for both MT8173 and MT2701. + * MT8173 has five sensors and need five VTS calibration data, + * and MT2701 has three sensors and need three VTS calibration data. + */ #define MT8173_CALIB_BUF0_VALID BIT(0) #define MT8173_CALIB_BUF1_ADC_GE(x) (((x) >> 22) & 0x3ff) #define MT8173_CALIB_BUF0_VTS_TS1(x) (((x) >> 17) & 0x1ff) @@ -117,24 +125,50 @@ #define MT8173_CALIB_BUF0_DEGC_CALI(x) (((x) >> 1) & 0x3f) #define MT8173_CALIB_BUF0_O_SLOPE(x) (((x) >> 26) & 0x3f) +/* MT2701 thermal sensors */ +#define MT2701_TS1 0 +#define MT2701_TS2 1 +#define MT2701_TSABB 2 + +/* AUXADC channel 11 is used for the temperature sensors */ +#define MT2701_TEMP_AUXADC_CHANNEL 11 + +/* The total number of temperature sensors in the MT2701 */ +#define MT2701_NUM_SENSORS 3 + #define THERMAL_NAME "mtk-thermal" +/* The number of sensing points per bank */ +#define MT2701_NUM_SENSORS_PER_ZONE 3 + struct mtk_thermal; +struct thermal_bank_cfg { + unsigned int num_sensors; + const int *sensors; +}; + struct mtk_thermal_bank { struct mtk_thermal *mt; int id; }; +struct mtk_thermal_data { + s32 num_banks; + s32 num_sensors; + s32 auxadc_channel; + const int *sensor_mux_values; + const int *msr; + const int *adcpnp; + struct thermal_bank_cfg bank_data[]; +}; + struct mtk_thermal { struct device *dev; void __iomem *thermal_base; struct clk *clk_peri_therm; struct clk *clk_auxadc; - - struct mtk_thermal_bank banks[MT8173_NUM_ZONES]; - /* lock: for getting and putting banks */ struct mutex lock; @@ -144,16 +178,44 @@ struct mtk_thermal { s32 o_slope; s32 vts[MT8173_NUM_SENSORS]; + const struct mtk_thermal_data *conf; + struct mtk_thermal_bank banks[]; }; -struct mtk_thermal_bank_cfg { - unsigned int num_sensors; - unsigned int sensors[MT8173_NUM_SENSORS_PER_ZONE]; +/* MT8173 thermal sensor data */ +const int mt8173_bank_data[MT8173_NUM_ZONES][3] = { + { MT8173_TS2, MT8173_TS3 }, + { MT8173_TS2, MT8173_TS4 }, + { MT8173_TS1, MT8173_TS2, MT8173_TSABB }, + { MT8173_TS2 }, }; -static const int sensor_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 }; +const int mt8173_msr[MT8173_NUM_SENSORS_PER_ZONE] = { + TEMP_MSR0, TEMP_MSR1, TEMP_MSR2, TEMP_MSR2 +}; -/* +const int mt8173_adcpnp[MT8173_NUM_SENSORS_PER_ZONE] = { + TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2, TEMP_ADCPNP3 +}; + +const int mt8173_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 }; + +/* MT2701 thermal sensor data */ +const int mt2701_bank_data[MT2701_NUM_SENSORS] = { + MT2701_TS1, MT2701_TS2, MT2701_TSABB +}; + +const int mt2701_msr[MT2701_NUM_SENSORS_PER_ZONE] = { + TEMP_MSR0, TEMP_MSR1, TEMP_MSR2 +}; + +const int mt2701_adcpnp[MT2701_NUM_SENSORS_PER_ZONE] = { + TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2 +}; + +const int mt2701_mux_values[MT2701_NUM_SENSORS] = { 0, 1, 16 }; + +/** * The MT8173 thermal controller has four banks. Each bank can read up to * four temperature sensors simultaneously. The MT8173 has a total of 5 * temperature sensors. We use each bank to measure a certain area of the @@ -166,42 +228,53 @@ static const int sensor_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 }; * data, and this indeed needs the temperatures of the individual banks * for making better decisions. */ -static const struct mtk_thermal_bank_cfg bank_data[] = { - { - .num_sensors = 2, - .sensors = { MT8173_TS2, MT8173_TS3 }, - }, { - .num_sensors = 2, - .sensors = { MT8173_TS2, MT8173_TS4 }, - }, { - .num_sensors = 3, - .sensors = { MT8173_TS1, MT8173_TS2, MT8173_TSABB }, - }, { - .num_sensors = 1, - .sensors = { MT8173_TS2 }, +static const struct mtk_thermal_data mt8173_thermal_data = { + .auxadc_channel = MT8173_TEMP_AUXADC_CHANNEL, + .num_banks = MT8173_NUM_ZONES, + .num_sensors = MT8173_NUM_SENSORS, + .bank_data = { + { + .num_sensors = 2, + .sensors = mt8173_bank_data[0], + }, { + .num_sensors = 2, + .sensors = mt8173_bank_data[1], + }, { + .num_sensors = 3, + .sensors = mt8173_bank_data[2], + }, { + .num_sensors = 1, + .sensors = mt8173_bank_data[3], + }, }, + .msr = mt8173_msr, + .adcpnp = mt8173_adcpnp, + .sensor_mux_values = mt8173_mux_values, }; -struct mtk_thermal_sense_point { - int msr; - int adcpnp; -}; - -static const struct mtk_thermal_sense_point - sensing_points[MT8173_NUM_SENSORS_PER_ZONE] = { - { - .msr = TEMP_MSR0, - .adcpnp = TEMP_ADCPNP0, - }, { - .msr = TEMP_MSR1, - .adcpnp = TEMP_ADCPNP1, - }, { - .msr = TEMP_MSR2, - .adcpnp = TEMP_ADCPNP2, - }, { - .msr = TEMP_MSR3, - .adcpnp = TEMP_ADCPNP3, +/** + * The MT2701 thermal controller has one bank, which can read up to + * three temperature sensors simultaneously. The MT2701 has a total of 3 + * temperature sensors. + * + * The thermal core only gets the maximum temperature of this one bank, + * so the bank concept wouldn't be necessary here. However, the SVS (Smart + * Voltage Scaling) unit makes its decisions based on the same bank + * data. + */ +static const struct mtk_thermal_data mt2701_thermal_data = { + .auxadc_channel = MT2701_TEMP_AUXADC_CHANNEL, + .num_banks = 1, + .num_sensors = MT2701_NUM_SENSORS, + .bank_data = { + { + .num_sensors = 3, + .sensors = mt2701_bank_data, + }, }, + .msr = mt2701_msr, + .adcpnp = mt2701_adcpnp, + .sensor_mux_values = mt2701_mux_values, }; /** @@ -270,13 +343,16 @@ static void mtk_thermal_put_bank(struct mtk_thermal_bank *bank) static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank) { struct mtk_thermal *mt = bank->mt; + const struct mtk_thermal_data *conf = mt->conf; int i, temp = INT_MIN, max = INT_MIN; u32 raw; - for (i = 0; i < bank_data[bank->id].num_sensors; i++) { - raw = readl(mt->thermal_base + sensing_points[i].msr); + for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) { + raw = readl(mt->thermal_base + conf->msr[i]); - temp = raw_to_mcelsius(mt, bank_data[bank->id].sensors[i], raw); + temp = raw_to_mcelsius(mt, + conf->bank_data[bank->id].sensors[i], + raw); /* * The first read of a sensor often contains very high bogus @@ -299,7 +375,7 @@ static int mtk_read_temp(void *data, int *temperature) int i; int tempmax = INT_MIN; - for (i = 0; i < MT8173_NUM_ZONES; i++) { + for (i = 0; i < mt->conf->num_banks; i++) { struct mtk_thermal_bank *bank = &mt->banks[i]; mtk_thermal_get_bank(bank); @@ -322,7 +398,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num, u32 apmixed_phys_base, u32 auxadc_phys_base) { struct mtk_thermal_bank *bank = &mt->banks[num]; - const struct mtk_thermal_bank_cfg *cfg = &bank_data[num]; + const struct mtk_thermal_data *conf = mt->conf; int i; bank->id = num; @@ -368,7 +444,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num, * this value will be stored to TEMP_PNPMUXADDR (TEMP_SPARE0) * automatically by hw */ - writel(BIT(MT8173_TEMP_AUXADC_CHANNEL), mt->thermal_base + TEMP_ADCMUX); + writel(BIT(conf->auxadc_channel), mt->thermal_base + TEMP_ADCMUX); /* AHB address for auxadc mux selection */ writel(auxadc_phys_base + AUXADC_CON1_CLR_V, @@ -379,18 +455,18 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num, mt->thermal_base + TEMP_PNPMUXADDR); /* AHB value for auxadc enable */ - writel(BIT(MT8173_TEMP_AUXADC_CHANNEL), mt->thermal_base + TEMP_ADCEN); + writel(BIT(conf->auxadc_channel), mt->thermal_base + TEMP_ADCEN); /* AHB address for auxadc enable (channel 0 immediate mode selected) */ writel(auxadc_phys_base + AUXADC_CON1_SET_V, mt->thermal_base + TEMP_ADCENADDR); /* AHB address for auxadc valid bit */ - writel(auxadc_phys_base + AUXADC_DATA(MT8173_TEMP_AUXADC_CHANNEL), + writel(auxadc_phys_base + AUXADC_DATA(conf->auxadc_channel), mt->thermal_base + TEMP_ADCVALIDADDR); /* AHB address for auxadc voltage output */ - writel(auxadc_phys_base + AUXADC_DATA(MT8173_TEMP_AUXADC_CHANNEL), + writel(auxadc_phys_base + AUXADC_DATA(conf->auxadc_channel), mt->thermal_base + TEMP_ADCVOLTADDR); /* read valid & voltage are at the same register */ @@ -407,11 +483,12 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num, writel(TEMP_ADCWRITECTRL_ADC_MUX_WRITE, mt->thermal_base + TEMP_ADCWRITECTRL); - for (i = 0; i < cfg->num_sensors; i++) - writel(sensor_mux_values[cfg->sensors[i]], - mt->thermal_base + sensing_points[i].adcpnp); + for (i = 0; i < conf->bank_data[num].num_sensors; i++) + writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]], + mt->thermal_base + conf->adcpnp[i]); - writel((1 << cfg->num_sensors) - 1, mt->thermal_base + TEMP_MONCTL0); + writel((1 << conf->bank_data[num].num_sensors) - 1, + mt->thermal_base + TEMP_MONCTL0); writel(TEMP_ADCWRITECTRL_ADC_PNP_WRITE | TEMP_ADCWRITECTRL_ADC_MUX_WRITE, @@ -442,7 +519,7 @@ static int mtk_thermal_get_calibration_data(struct device *dev, /* Start with default values */ mt->adc_ge = 512; - for (i = 0; i < MT8173_NUM_SENSORS; i++) + for (i = 0; i < mt->conf->num_sensors; i++) mt->vts[i] = 260; mt->degc_cali = 40; mt->o_slope = 0; @@ -486,18 +563,37 @@ out: return ret; } +static const struct of_device_id mtk_thermal_of_match[] = { + { + .compatible = "mediatek,mt8173-thermal", + .data = (void *)&mt8173_thermal_data, + }, + { + .compatible = "mediatek,mt2701-thermal", + .data = (void *)&mt2701_thermal_data, + }, { + }, +}; +MODULE_DEVICE_TABLE(of, mtk_thermal_of_match); + static int mtk_thermal_probe(struct platform_device *pdev) { int ret, i; struct device_node *auxadc, *apmixedsys, *np = pdev->dev.of_node; struct mtk_thermal *mt; struct resource *res; + const struct of_device_id *of_id; u64 auxadc_phys_base, apmixed_phys_base; + struct thermal_zone_device *tzdev; mt = devm_kzalloc(&pdev->dev, sizeof(*mt), GFP_KERNEL); if (!mt) return -ENOMEM; + of_id = of_match_device(mtk_thermal_of_match, &pdev->dev); + if (of_id) + mt->conf = (const struct mtk_thermal_data *)of_id->data; + mt->clk_peri_therm = devm_clk_get(&pdev->dev, "therm"); if (IS_ERR(mt->clk_peri_therm)) return PTR_ERR(mt->clk_peri_therm); @@ -565,17 +661,23 @@ static int mtk_thermal_probe(struct platform_device *pdev) goto err_disable_clk_auxadc; } - for (i = 0; i < MT8173_NUM_ZONES; i++) + for (i = 0; i < mt->conf->num_banks; i++) mtk_thermal_init_bank(mt, i, apmixed_phys_base, auxadc_phys_base); platform_set_drvdata(pdev, mt); - devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt, - &mtk_thermal_ops); + tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt, + &mtk_thermal_ops); + if (IS_ERR(tzdev)) { + ret = PTR_ERR(tzdev); + goto err_disable_clk_peri_therm; + } return 0; +err_disable_clk_peri_therm: + clk_disable_unprepare(mt->clk_peri_therm); err_disable_clk_auxadc: clk_disable_unprepare(mt->clk_auxadc); @@ -592,13 +694,6 @@ static int mtk_thermal_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id mtk_thermal_of_match[] = { - { - .compatible = "mediatek,mt8173-thermal", - }, { - }, -}; - static struct platform_driver mtk_thermal_driver = { .probe = mtk_thermal_probe, .remove = mtk_thermal_remove, @@ -610,6 +705,7 @@ static struct platform_driver mtk_thermal_driver = { module_platform_driver(mtk_thermal_driver); +MODULE_AUTHOR("Dawei Chien <dawei.chien@mediatek.com>"); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>"); MODULE_DESCRIPTION("Mediatek thermal driver"); diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index b8e509c60848..d04ec3b9e5ff 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -101,6 +101,17 @@ static int of_thermal_get_temp(struct thermal_zone_device *tz, return data->ops->get_temp(data->sensor_data, temp); } +static int of_thermal_set_trips(struct thermal_zone_device *tz, + int low, int high) +{ + struct __thermal_zone *data = tz->devdata; + + if (!data->ops || !data->ops->set_trips) + return -EINVAL; + + return data->ops->set_trips(data->sensor_data, low, high); +} + /** * of_thermal_get_ntrips - function to export number of available trip * points. @@ -181,9 +192,6 @@ static int of_thermal_set_emul_temp(struct thermal_zone_device *tz, { struct __thermal_zone *data = tz->devdata; - if (!data->ops || !data->ops->set_emul_temp) - return -EINVAL; - return data->ops->set_emul_temp(data->sensor_data, temp); } @@ -191,25 +199,11 @@ static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip, enum thermal_trend *trend) { struct __thermal_zone *data = tz->devdata; - long dev_trend; - int r; if (!data->ops->get_trend) return -EINVAL; - r = data->ops->get_trend(data->sensor_data, &dev_trend); - if (r) - return r; - - /* TODO: These intervals might have some thresholds, but in core code */ - if (dev_trend > 0) - *trend = THERMAL_TREND_RAISING; - else if (dev_trend < 0) - *trend = THERMAL_TREND_DROPPING; - else - *trend = THERMAL_TREND_STABLE; - - return 0; + return data->ops->get_trend(data->sensor_data, trip, trend); } static int of_thermal_bind(struct thermal_zone_device *thermal, @@ -292,7 +286,7 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz, mutex_unlock(&tz->lock); data->mode = mode; - thermal_zone_device_update(tz); + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); return 0; } @@ -427,7 +421,17 @@ thermal_zone_of_add_sensor(struct device_node *zone, tzd->ops->get_temp = of_thermal_get_temp; tzd->ops->get_trend = of_thermal_get_trend; - tzd->ops->set_emul_temp = of_thermal_set_emul_temp; + + /* + * The thermal zone core will calculate the window if they have set the + * optional set_trips pointer. + */ + if (ops->set_trips) + tzd->ops->set_trips = of_thermal_set_trips; + + if (ops->set_emul_temp) + tzd->ops->set_emul_temp = of_thermal_set_emul_temp; + mutex_unlock(&tzd->lock); return tzd; @@ -596,7 +600,7 @@ static int devm_thermal_zone_of_sensor_match(struct device *dev, void *res, * Return: On success returns a valid struct thermal_zone_device, * otherwise, it returns a corresponding ERR_PTR(). Caller must * check the return value with help of IS_ERR() helper. - * Registered hermal_zone_device device will automatically be + * Registered thermal_zone_device device will automatically be * released when device is unbounded. */ struct thermal_zone_device *devm_thermal_zone_of_sensor_register( diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c index f8a3c60bef94..819c6d5d7aa7 100644 --- a/drivers/thermal/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom-spmi-temp-alarm.c @@ -150,7 +150,7 @@ static irqreturn_t qpnp_tm_isr(int irq, void *data) { struct qpnp_tm_chip *chip = data; - thermal_zone_device_update(chip->tz_dev); + thermal_zone_device_update(chip->tz_dev, THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig new file mode 100644 index 000000000000..be32e5abce3c --- /dev/null +++ b/drivers/thermal/qcom/Kconfig @@ -0,0 +1,11 @@ +config QCOM_TSENS + tristate "Qualcomm TSENS Temperature Alarm" + depends on THERMAL + depends on QCOM_QFPROM + depends on ARCH_QCOM || COMPILE_TEST + help + This enables the thermal sysfs driver for the TSENS device. It shows + up in Sysfs as a thermal zone with multiple trip points. Disabling the + thermal zone device via the mode file results in disabling the sensor. + Also able to set threshold temperature for both hot and cold and update + when a threshold is reached. diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile new file mode 100644 index 000000000000..2cc2193637e7 --- /dev/null +++ b/drivers/thermal/qcom/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o +qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o diff --git a/drivers/thermal/qcom/tsens-8916.c b/drivers/thermal/qcom/tsens-8916.c new file mode 100644 index 000000000000..fdf561b8b81d --- /dev/null +++ b/drivers/thermal/qcom/tsens-8916.c @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/platform_device.h> +#include "tsens.h" + +/* eeprom layout data for 8916 */ +#define BASE0_MASK 0x0000007f +#define BASE1_MASK 0xfe000000 +#define BASE0_SHIFT 0 +#define BASE1_SHIFT 25 + +#define S0_P1_MASK 0x00000f80 +#define S1_P1_MASK 0x003e0000 +#define S2_P1_MASK 0xf8000000 +#define S3_P1_MASK 0x000003e0 +#define S4_P1_MASK 0x000f8000 + +#define S0_P2_MASK 0x0001f000 +#define S1_P2_MASK 0x07c00000 +#define S2_P2_MASK 0x0000001f +#define S3_P2_MASK 0x00007c00 +#define S4_P2_MASK 0x01f00000 + +#define S0_P1_SHIFT 7 +#define S1_P1_SHIFT 17 +#define S2_P1_SHIFT 27 +#define S3_P1_SHIFT 5 +#define S4_P1_SHIFT 15 + +#define S0_P2_SHIFT 12 +#define S1_P2_SHIFT 22 +#define S2_P2_SHIFT 0 +#define S3_P2_SHIFT 10 +#define S4_P2_SHIFT 20 + +#define CAL_SEL_MASK 0xe0000000 +#define CAL_SEL_SHIFT 29 + +static int calibrate_8916(struct tsens_device *tmdev) +{ + int base0 = 0, base1 = 0, i; + u32 p1[5], p2[5]; + int mode = 0; + u32 *qfprom_cdata, *qfprom_csel; + + qfprom_cdata = (u32 *)qfprom_read(tmdev->dev, "calib"); + if (IS_ERR(qfprom_cdata)) + return PTR_ERR(qfprom_cdata); + + qfprom_csel = (u32 *)qfprom_read(tmdev->dev, "calib_sel"); + if (IS_ERR(qfprom_csel)) + return PTR_ERR(qfprom_csel); + + mode = (qfprom_csel[0] & CAL_SEL_MASK) >> CAL_SEL_SHIFT; + dev_dbg(tmdev->dev, "calibration mode is %d\n", mode); + + switch (mode) { + case TWO_PT_CALIB: + base1 = (qfprom_cdata[1] & BASE1_MASK) >> BASE1_SHIFT; + p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT; + p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT; + p2[2] = (qfprom_cdata[1] & S2_P2_MASK) >> S2_P2_SHIFT; + p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT; + p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT; + for (i = 0; i < tmdev->num_sensors; i++) + p2[i] = ((base1 + p2[i]) << 3); + /* Fall through */ + case ONE_PT_CALIB2: + base0 = (qfprom_cdata[0] & BASE0_MASK); + p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT; + p1[1] = (qfprom_cdata[0] & S1_P1_MASK) >> S1_P1_SHIFT; + p1[2] = (qfprom_cdata[0] & S2_P1_MASK) >> S2_P1_SHIFT; + p1[3] = (qfprom_cdata[1] & S3_P1_MASK) >> S3_P1_SHIFT; + p1[4] = (qfprom_cdata[1] & S4_P1_MASK) >> S4_P1_SHIFT; + for (i = 0; i < tmdev->num_sensors; i++) + p1[i] = (((base0) + p1[i]) << 3); + break; + default: + for (i = 0; i < tmdev->num_sensors; i++) { + p1[i] = 500; + p2[i] = 780; + } + break; + } + + compute_intercept_slope(tmdev, p1, p2, mode); + + return 0; +} + +static const struct tsens_ops ops_8916 = { + .init = init_common, + .calibrate = calibrate_8916, + .get_temp = get_temp_common, +}; + +const struct tsens_data data_8916 = { + .num_sensors = 5, + .ops = &ops_8916, + .hw_ids = (unsigned int []){0, 1, 2, 4, 5 }, +}; diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c new file mode 100644 index 000000000000..0451277d3a8f --- /dev/null +++ b/drivers/thermal/qcom/tsens-8960.c @@ -0,0 +1,292 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/bitops.h> +#include <linux/regmap.h> +#include <linux/thermal.h> +#include "tsens.h" + +#define CAL_MDEGC 30000 + +#define CONFIG_ADDR 0x3640 +#define CONFIG_ADDR_8660 0x3620 +/* CONFIG_ADDR bitmasks */ +#define CONFIG 0x9b +#define CONFIG_MASK 0xf +#define CONFIG_8660 1 +#define CONFIG_SHIFT_8660 28 +#define CONFIG_MASK_8660 (3 << CONFIG_SHIFT_8660) + +#define STATUS_CNTL_ADDR_8064 0x3660 +#define CNTL_ADDR 0x3620 +/* CNTL_ADDR bitmasks */ +#define EN BIT(0) +#define SW_RST BIT(1) +#define SENSOR0_EN BIT(3) +#define SLP_CLK_ENA BIT(26) +#define SLP_CLK_ENA_8660 BIT(24) +#define MEASURE_PERIOD 1 +#define SENSOR0_SHIFT 3 + +/* INT_STATUS_ADDR bitmasks */ +#define MIN_STATUS_MASK BIT(0) +#define LOWER_STATUS_CLR BIT(1) +#define UPPER_STATUS_CLR BIT(2) +#define MAX_STATUS_MASK BIT(3) + +#define THRESHOLD_ADDR 0x3624 +/* THRESHOLD_ADDR bitmasks */ +#define THRESHOLD_MAX_LIMIT_SHIFT 24 +#define THRESHOLD_MIN_LIMIT_SHIFT 16 +#define THRESHOLD_UPPER_LIMIT_SHIFT 8 +#define THRESHOLD_LOWER_LIMIT_SHIFT 0 + +/* Initial temperature threshold values */ +#define LOWER_LIMIT_TH 0x50 +#define UPPER_LIMIT_TH 0xdf +#define MIN_LIMIT_TH 0x0 +#define MAX_LIMIT_TH 0xff + +#define S0_STATUS_ADDR 0x3628 +#define INT_STATUS_ADDR 0x363c +#define TRDY_MASK BIT(7) +#define TIMEOUT_US 100 + +static int suspend_8960(struct tsens_device *tmdev) +{ + int ret; + unsigned int mask; + struct regmap *map = tmdev->map; + + ret = regmap_read(map, THRESHOLD_ADDR, &tmdev->ctx.threshold); + if (ret) + return ret; + + ret = regmap_read(map, CNTL_ADDR, &tmdev->ctx.control); + if (ret) + return ret; + + if (tmdev->num_sensors > 1) + mask = SLP_CLK_ENA | EN; + else + mask = SLP_CLK_ENA_8660 | EN; + + ret = regmap_update_bits(map, CNTL_ADDR, mask, 0); + if (ret) + return ret; + + return 0; +} + +static int resume_8960(struct tsens_device *tmdev) +{ + int ret; + struct regmap *map = tmdev->map; + + ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST); + if (ret) + return ret; + + /* + * Separate CONFIG restore is not needed only for 8660 as + * config is part of CTRL Addr and its restored as such + */ + if (tmdev->num_sensors > 1) { + ret = regmap_update_bits(map, CONFIG_ADDR, CONFIG_MASK, CONFIG); + if (ret) + return ret; + } + + ret = regmap_write(map, THRESHOLD_ADDR, tmdev->ctx.threshold); + if (ret) + return ret; + + ret = regmap_write(map, CNTL_ADDR, tmdev->ctx.control); + if (ret) + return ret; + + return 0; +} + +static int enable_8960(struct tsens_device *tmdev, int id) +{ + int ret; + u32 reg, mask; + + ret = regmap_read(tmdev->map, CNTL_ADDR, ®); + if (ret) + return ret; + + mask = BIT(id + SENSOR0_SHIFT); + ret = regmap_write(tmdev->map, CNTL_ADDR, reg | SW_RST); + if (ret) + return ret; + + if (tmdev->num_sensors > 1) + reg |= mask | SLP_CLK_ENA | EN; + else + reg |= mask | SLP_CLK_ENA_8660 | EN; + + ret = regmap_write(tmdev->map, CNTL_ADDR, reg); + if (ret) + return ret; + + return 0; +} + +static void disable_8960(struct tsens_device *tmdev) +{ + int ret; + u32 reg_cntl; + u32 mask; + + mask = GENMASK(tmdev->num_sensors - 1, 0); + mask <<= SENSOR0_SHIFT; + mask |= EN; + + ret = regmap_read(tmdev->map, CNTL_ADDR, ®_cntl); + if (ret) + return; + + reg_cntl &= ~mask; + + if (tmdev->num_sensors > 1) + reg_cntl &= ~SLP_CLK_ENA; + else + reg_cntl &= ~SLP_CLK_ENA_8660; + + regmap_write(tmdev->map, CNTL_ADDR, reg_cntl); +} + +static int init_8960(struct tsens_device *tmdev) +{ + int ret, i; + u32 reg_cntl; + + tmdev->map = dev_get_regmap(tmdev->dev, NULL); + if (!tmdev->map) + return -ENODEV; + + /* + * The status registers for each sensor are discontiguous + * because some SoCs have 5 sensors while others have more + * but the control registers stay in the same place, i.e + * directly after the first 5 status registers. + */ + for (i = 0; i < tmdev->num_sensors; i++) { + if (i >= 5) + tmdev->sensor[i].status = S0_STATUS_ADDR + 40; + tmdev->sensor[i].status += i * 4; + } + + reg_cntl = SW_RST; + ret = regmap_update_bits(tmdev->map, CNTL_ADDR, SW_RST, reg_cntl); + if (ret) + return ret; + + if (tmdev->num_sensors > 1) { + reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18); + reg_cntl &= ~SW_RST; + ret = regmap_update_bits(tmdev->map, CONFIG_ADDR, + CONFIG_MASK, CONFIG); + } else { + reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16); + reg_cntl &= ~CONFIG_MASK_8660; + reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660; + } + + reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT; + ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl); + if (ret) + return ret; + + reg_cntl |= EN; + ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl); + if (ret) + return ret; + + return 0; +} + +static int calibrate_8960(struct tsens_device *tmdev) +{ + int i; + char *data; + + ssize_t num_read = tmdev->num_sensors; + struct tsens_sensor *s = tmdev->sensor; + + data = qfprom_read(tmdev->dev, "calib"); + if (IS_ERR(data)) + data = qfprom_read(tmdev->dev, "calib_backup"); + if (IS_ERR(data)) + return PTR_ERR(data); + + for (i = 0; i < num_read; i++, s++) + s->offset = data[i]; + + return 0; +} + +/* Temperature on y axis and ADC-code on x-axis */ +static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s) +{ + int slope, offset; + + slope = thermal_zone_get_slope(s->tzd); + offset = CAL_MDEGC - slope * s->offset; + + return adc_code * slope + offset; +} + +static int get_temp_8960(struct tsens_device *tmdev, int id, int *temp) +{ + int ret; + u32 code, trdy; + const struct tsens_sensor *s = &tmdev->sensor[id]; + unsigned long timeout; + + timeout = jiffies + usecs_to_jiffies(TIMEOUT_US); + do { + ret = regmap_read(tmdev->map, INT_STATUS_ADDR, &trdy); + if (ret) + return ret; + if (!(trdy & TRDY_MASK)) + continue; + ret = regmap_read(tmdev->map, s->status, &code); + if (ret) + return ret; + *temp = code_to_mdegC(code, s); + return 0; + } while (time_before(jiffies, timeout)); + + return -ETIMEDOUT; +} + +static const struct tsens_ops ops_8960 = { + .init = init_8960, + .calibrate = calibrate_8960, + .get_temp = get_temp_8960, + .enable = enable_8960, + .disable = disable_8960, + .suspend = suspend_8960, + .resume = resume_8960, +}; + +const struct tsens_data data_8960 = { + .num_sensors = 11, + .ops = &ops_8960, +}; diff --git a/drivers/thermal/qcom/tsens-8974.c b/drivers/thermal/qcom/tsens-8974.c new file mode 100644 index 000000000000..9baf77e8cbe3 --- /dev/null +++ b/drivers/thermal/qcom/tsens-8974.c @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/platform_device.h> +#include "tsens.h" + +/* eeprom layout data for 8974 */ +#define BASE1_MASK 0xff +#define S0_P1_MASK 0x3f00 +#define S1_P1_MASK 0xfc000 +#define S2_P1_MASK 0x3f00000 +#define S3_P1_MASK 0xfc000000 +#define S4_P1_MASK 0x3f +#define S5_P1_MASK 0xfc0 +#define S6_P1_MASK 0x3f000 +#define S7_P1_MASK 0xfc0000 +#define S8_P1_MASK 0x3f000000 +#define S8_P1_MASK_BKP 0x3f +#define S9_P1_MASK 0x3f +#define S9_P1_MASK_BKP 0xfc0 +#define S10_P1_MASK 0xfc0 +#define S10_P1_MASK_BKP 0x3f000 +#define CAL_SEL_0_1 0xc0000000 +#define CAL_SEL_2 0x40000000 +#define CAL_SEL_SHIFT 30 +#define CAL_SEL_SHIFT_2 28 + +#define S0_P1_SHIFT 8 +#define S1_P1_SHIFT 14 +#define S2_P1_SHIFT 20 +#define S3_P1_SHIFT 26 +#define S5_P1_SHIFT 6 +#define S6_P1_SHIFT 12 +#define S7_P1_SHIFT 18 +#define S8_P1_SHIFT 24 +#define S9_P1_BKP_SHIFT 6 +#define S10_P1_SHIFT 6 +#define S10_P1_BKP_SHIFT 12 + +#define BASE2_SHIFT 12 +#define BASE2_BKP_SHIFT 18 +#define S0_P2_SHIFT 20 +#define S0_P2_BKP_SHIFT 26 +#define S1_P2_SHIFT 26 +#define S2_P2_BKP_SHIFT 6 +#define S3_P2_SHIFT 6 +#define S3_P2_BKP_SHIFT 12 +#define S4_P2_SHIFT 12 +#define S4_P2_BKP_SHIFT 18 +#define S5_P2_SHIFT 18 +#define S5_P2_BKP_SHIFT 24 +#define S6_P2_SHIFT 24 +#define S7_P2_BKP_SHIFT 6 +#define S8_P2_SHIFT 6 +#define S8_P2_BKP_SHIFT 12 +#define S9_P2_SHIFT 12 +#define S9_P2_BKP_SHIFT 18 +#define S10_P2_SHIFT 18 +#define S10_P2_BKP_SHIFT 24 + +#define BASE2_MASK 0xff000 +#define BASE2_BKP_MASK 0xfc0000 +#define S0_P2_MASK 0x3f00000 +#define S0_P2_BKP_MASK 0xfc000000 +#define S1_P2_MASK 0xfc000000 +#define S1_P2_BKP_MASK 0x3f +#define S2_P2_MASK 0x3f +#define S2_P2_BKP_MASK 0xfc0 +#define S3_P2_MASK 0xfc0 +#define S3_P2_BKP_MASK 0x3f000 +#define S4_P2_MASK 0x3f000 +#define S4_P2_BKP_MASK 0xfc0000 +#define S5_P2_MASK 0xfc0000 +#define S5_P2_BKP_MASK 0x3f000000 +#define S6_P2_MASK 0x3f000000 +#define S6_P2_BKP_MASK 0x3f +#define S7_P2_MASK 0x3f +#define S7_P2_BKP_MASK 0xfc0 +#define S8_P2_MASK 0xfc0 +#define S8_P2_BKP_MASK 0x3f000 +#define S9_P2_MASK 0x3f000 +#define S9_P2_BKP_MASK 0xfc0000 +#define S10_P2_MASK 0xfc0000 +#define S10_P2_BKP_MASK 0x3f000000 + +#define BKP_SEL 0x3 +#define BKP_REDUN_SEL 0xe0000000 +#define BKP_REDUN_SHIFT 29 + +#define BIT_APPEND 0x3 + +static int calibrate_8974(struct tsens_device *tmdev) +{ + int base1 = 0, base2 = 0, i; + u32 p1[11], p2[11]; + int mode = 0; + u32 *calib, *bkp; + u32 calib_redun_sel; + + calib = (u32 *)qfprom_read(tmdev->dev, "calib"); + if (IS_ERR(calib)) + return PTR_ERR(calib); + + bkp = (u32 *)qfprom_read(tmdev->dev, "calib_backup"); + if (IS_ERR(bkp)) + return PTR_ERR(bkp); + + calib_redun_sel = bkp[1] & BKP_REDUN_SEL; + calib_redun_sel >>= BKP_REDUN_SHIFT; + + if (calib_redun_sel == BKP_SEL) { + mode = (calib[4] & CAL_SEL_0_1) >> CAL_SEL_SHIFT; + mode |= (calib[5] & CAL_SEL_2) >> CAL_SEL_SHIFT_2; + + switch (mode) { + case TWO_PT_CALIB: + base2 = (bkp[2] & BASE2_BKP_MASK) >> BASE2_BKP_SHIFT; + p2[0] = (bkp[2] & S0_P2_BKP_MASK) >> S0_P2_BKP_SHIFT; + p2[1] = (bkp[3] & S1_P2_BKP_MASK); + p2[2] = (bkp[3] & S2_P2_BKP_MASK) >> S2_P2_BKP_SHIFT; + p2[3] = (bkp[3] & S3_P2_BKP_MASK) >> S3_P2_BKP_SHIFT; + p2[4] = (bkp[3] & S4_P2_BKP_MASK) >> S4_P2_BKP_SHIFT; + p2[5] = (calib[4] & S5_P2_BKP_MASK) >> S5_P2_BKP_SHIFT; + p2[6] = (calib[5] & S6_P2_BKP_MASK); + p2[7] = (calib[5] & S7_P2_BKP_MASK) >> S7_P2_BKP_SHIFT; + p2[8] = (calib[5] & S8_P2_BKP_MASK) >> S8_P2_BKP_SHIFT; + p2[9] = (calib[5] & S9_P2_BKP_MASK) >> S9_P2_BKP_SHIFT; + p2[10] = (calib[5] & S10_P2_BKP_MASK) >> S10_P2_BKP_SHIFT; + /* Fall through */ + case ONE_PT_CALIB: + case ONE_PT_CALIB2: + base1 = bkp[0] & BASE1_MASK; + p1[0] = (bkp[0] & S0_P1_MASK) >> S0_P1_SHIFT; + p1[1] = (bkp[0] & S1_P1_MASK) >> S1_P1_SHIFT; + p1[2] = (bkp[0] & S2_P1_MASK) >> S2_P1_SHIFT; + p1[3] = (bkp[0] & S3_P1_MASK) >> S3_P1_SHIFT; + p1[4] = (bkp[1] & S4_P1_MASK); + p1[5] = (bkp[1] & S5_P1_MASK) >> S5_P1_SHIFT; + p1[6] = (bkp[1] & S6_P1_MASK) >> S6_P1_SHIFT; + p1[7] = (bkp[1] & S7_P1_MASK) >> S7_P1_SHIFT; + p1[8] = (bkp[2] & S8_P1_MASK_BKP) >> S8_P1_SHIFT; + p1[9] = (bkp[2] & S9_P1_MASK_BKP) >> S9_P1_BKP_SHIFT; + p1[10] = (bkp[2] & S10_P1_MASK_BKP) >> S10_P1_BKP_SHIFT; + break; + } + } else { + mode = (calib[1] & CAL_SEL_0_1) >> CAL_SEL_SHIFT; + mode |= (calib[3] & CAL_SEL_2) >> CAL_SEL_SHIFT_2; + + switch (mode) { + case TWO_PT_CALIB: + base2 = (calib[2] & BASE2_MASK) >> BASE2_SHIFT; + p2[0] = (calib[2] & S0_P2_MASK) >> S0_P2_SHIFT; + p2[1] = (calib[2] & S1_P2_MASK) >> S1_P2_SHIFT; + p2[2] = (calib[3] & S2_P2_MASK); + p2[3] = (calib[3] & S3_P2_MASK) >> S3_P2_SHIFT; + p2[4] = (calib[3] & S4_P2_MASK) >> S4_P2_SHIFT; + p2[5] = (calib[3] & S5_P2_MASK) >> S5_P2_SHIFT; + p2[6] = (calib[3] & S6_P2_MASK) >> S6_P2_SHIFT; + p2[7] = (calib[4] & S7_P2_MASK); + p2[8] = (calib[4] & S8_P2_MASK) >> S8_P2_SHIFT; + p2[9] = (calib[4] & S9_P2_MASK) >> S9_P2_SHIFT; + p2[10] = (calib[4] & S10_P2_MASK) >> S10_P2_SHIFT; + /* Fall through */ + case ONE_PT_CALIB: + case ONE_PT_CALIB2: + base1 = calib[0] & BASE1_MASK; + p1[0] = (calib[0] & S0_P1_MASK) >> S0_P1_SHIFT; + p1[1] = (calib[0] & S1_P1_MASK) >> S1_P1_SHIFT; + p1[2] = (calib[0] & S2_P1_MASK) >> S2_P1_SHIFT; + p1[3] = (calib[0] & S3_P1_MASK) >> S3_P1_SHIFT; + p1[4] = (calib[1] & S4_P1_MASK); + p1[5] = (calib[1] & S5_P1_MASK) >> S5_P1_SHIFT; + p1[6] = (calib[1] & S6_P1_MASK) >> S6_P1_SHIFT; + p1[7] = (calib[1] & S7_P1_MASK) >> S7_P1_SHIFT; + p1[8] = (calib[1] & S8_P1_MASK) >> S8_P1_SHIFT; + p1[9] = (calib[2] & S9_P1_MASK); + p1[10] = (calib[2] & S10_P1_MASK) >> S10_P1_SHIFT; + break; + } + } + + switch (mode) { + case ONE_PT_CALIB: + for (i = 0; i < tmdev->num_sensors; i++) + p1[i] += (base1 << 2) | BIT_APPEND; + break; + case TWO_PT_CALIB: + for (i = 0; i < tmdev->num_sensors; i++) { + p2[i] += base2; + p2[i] <<= 2; + p2[i] |= BIT_APPEND; + } + /* Fall through */ + case ONE_PT_CALIB2: + for (i = 0; i < tmdev->num_sensors; i++) { + p1[i] += base1; + p1[i] <<= 2; + p1[i] |= BIT_APPEND; + } + break; + default: + for (i = 0; i < tmdev->num_sensors; i++) + p2[i] = 780; + p1[0] = 502; + p1[1] = 509; + p1[2] = 503; + p1[3] = 509; + p1[4] = 505; + p1[5] = 509; + p1[6] = 507; + p1[7] = 510; + p1[8] = 508; + p1[9] = 509; + p1[10] = 508; + break; + } + + compute_intercept_slope(tmdev, p1, p2, mode); + + return 0; +} + +static const struct tsens_ops ops_8974 = { + .init = init_common, + .calibrate = calibrate_8974, + .get_temp = get_temp_common, +}; + +const struct tsens_data data_8974 = { + .num_sensors = 11, + .ops = &ops_8974, +}; diff --git a/drivers/thermal/qcom/tsens-8996.c b/drivers/thermal/qcom/tsens-8996.c new file mode 100644 index 000000000000..e1f77818d8fa --- /dev/null +++ b/drivers/thermal/qcom/tsens-8996.c @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include "tsens.h" + +#define STATUS_OFFSET 0x10a0 +#define LAST_TEMP_MASK 0xfff +#define STATUS_VALID_BIT BIT(21) +#define CODE_SIGN_BIT BIT(11) + +static int get_temp_8996(struct tsens_device *tmdev, int id, int *temp) +{ + struct tsens_sensor *s = &tmdev->sensor[id]; + u32 code; + unsigned int sensor_addr; + int last_temp = 0, last_temp2 = 0, last_temp3 = 0, ret; + + sensor_addr = STATUS_OFFSET + s->hw_id * 4; + ret = regmap_read(tmdev->map, sensor_addr, &code); + if (ret) + return ret; + last_temp = code & LAST_TEMP_MASK; + if (code & STATUS_VALID_BIT) + goto done; + + /* Try a second time */ + ret = regmap_read(tmdev->map, sensor_addr, &code); + if (ret) + return ret; + if (code & STATUS_VALID_BIT) { + last_temp = code & LAST_TEMP_MASK; + goto done; + } else { + last_temp2 = code & LAST_TEMP_MASK; + } + + /* Try a third/last time */ + ret = regmap_read(tmdev->map, sensor_addr, &code); + if (ret) + return ret; + if (code & STATUS_VALID_BIT) { + last_temp = code & LAST_TEMP_MASK; + goto done; + } else { + last_temp3 = code & LAST_TEMP_MASK; + } + + if (last_temp == last_temp2) + last_temp = last_temp2; + else if (last_temp2 == last_temp3) + last_temp = last_temp3; +done: + /* Code sign bit is the sign extension for a negative value */ + if (last_temp & CODE_SIGN_BIT) + last_temp |= ~CODE_SIGN_BIT; + + /* Temperatures are in deciCelicius */ + *temp = last_temp * 100; + + return 0; +} + +static const struct tsens_ops ops_8996 = { + .init = init_common, + .get_temp = get_temp_8996, +}; + +const struct tsens_data data_8996 = { + .num_sensors = 13, + .ops = &ops_8996, +}; diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c new file mode 100644 index 000000000000..b1449ad67fc0 --- /dev/null +++ b/drivers/thermal/qcom/tsens-common.c @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/nvmem-consumer.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include "tsens.h" + +#define S0_ST_ADDR 0x1030 +#define SN_ADDR_OFFSET 0x4 +#define SN_ST_TEMP_MASK 0x3ff +#define CAL_DEGC_PT1 30 +#define CAL_DEGC_PT2 120 +#define SLOPE_FACTOR 1000 +#define SLOPE_DEFAULT 3200 + +char *qfprom_read(struct device *dev, const char *cname) +{ + struct nvmem_cell *cell; + ssize_t data; + char *ret; + + cell = nvmem_cell_get(dev, cname); + if (IS_ERR(cell)) + return ERR_CAST(cell); + + ret = nvmem_cell_read(cell, &data); + nvmem_cell_put(cell); + + return ret; +} + +/* + * Use this function on devices where slope and offset calculations + * depend on calibration data read from qfprom. On others the slope + * and offset values are derived from tz->tzp->slope and tz->tzp->offset + * resp. + */ +void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1, + u32 *p2, u32 mode) +{ + int i; + int num, den; + + for (i = 0; i < tmdev->num_sensors; i++) { + dev_dbg(tmdev->dev, + "sensor%d - data_point1:%#x data_point2:%#x\n", + i, p1[i], p2[i]); + + tmdev->sensor[i].slope = SLOPE_DEFAULT; + if (mode == TWO_PT_CALIB) { + /* + * slope (m) = adc_code2 - adc_code1 (y2 - y1)/ + * temp_120_degc - temp_30_degc (x2 - x1) + */ + num = p2[i] - p1[i]; + num *= SLOPE_FACTOR; + den = CAL_DEGC_PT2 - CAL_DEGC_PT1; + tmdev->sensor[i].slope = num / den; + } + + tmdev->sensor[i].offset = (p1[i] * SLOPE_FACTOR) - + (CAL_DEGC_PT1 * + tmdev->sensor[i].slope); + dev_dbg(tmdev->dev, "offset:%d\n", tmdev->sensor[i].offset); + } +} + +static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s) +{ + int degc, num, den; + + num = (adc_code * SLOPE_FACTOR) - s->offset; + den = s->slope; + + if (num > 0) + degc = num + (den / 2); + else if (num < 0) + degc = num - (den / 2); + else + degc = num; + + degc /= den; + + return degc; +} + +int get_temp_common(struct tsens_device *tmdev, int id, int *temp) +{ + struct tsens_sensor *s = &tmdev->sensor[id]; + u32 code; + unsigned int sensor_addr; + int last_temp = 0, ret; + + sensor_addr = S0_ST_ADDR + s->hw_id * SN_ADDR_OFFSET; + ret = regmap_read(tmdev->map, sensor_addr, &code); + if (ret) + return ret; + last_temp = code & SN_ST_TEMP_MASK; + + *temp = code_to_degc(last_temp, s) * 1000; + + return 0; +} + +static const struct regmap_config tsens_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +int __init init_common(struct tsens_device *tmdev) +{ + void __iomem *base; + + base = of_iomap(tmdev->dev->of_node, 0); + if (!base) + return -EINVAL; + + tmdev->map = devm_regmap_init_mmio(tmdev->dev, base, &tsens_config); + if (IS_ERR(tmdev->map)) { + iounmap(base); + return PTR_ERR(tmdev->map); + } + + return 0; +} diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c new file mode 100644 index 000000000000..3f9fe6aa51cc --- /dev/null +++ b/drivers/thermal/qcom/tsens.c @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/slab.h> +#include <linux/thermal.h> +#include "tsens.h" + +static int tsens_get_temp(void *data, int *temp) +{ + const struct tsens_sensor *s = data; + struct tsens_device *tmdev = s->tmdev; + + return tmdev->ops->get_temp(tmdev, s->id, temp); +} + +static int tsens_get_trend(void *p, int trip, enum thermal_trend *trend) +{ + const struct tsens_sensor *s = p; + struct tsens_device *tmdev = s->tmdev; + + if (tmdev->ops->get_trend) + return tmdev->ops->get_trend(tmdev, s->id, trend); + + return -ENOTSUPP; +} + +static int __maybe_unused tsens_suspend(struct device *dev) +{ + struct tsens_device *tmdev = dev_get_drvdata(dev); + + if (tmdev->ops && tmdev->ops->suspend) + return tmdev->ops->suspend(tmdev); + + return 0; +} + +static int __maybe_unused tsens_resume(struct device *dev) +{ + struct tsens_device *tmdev = dev_get_drvdata(dev); + + if (tmdev->ops && tmdev->ops->resume) + return tmdev->ops->resume(tmdev); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume); + +static const struct of_device_id tsens_table[] = { + { + .compatible = "qcom,msm8916-tsens", + .data = &data_8916, + }, { + .compatible = "qcom,msm8974-tsens", + .data = &data_8974, + }, { + .compatible = "qcom,msm8996-tsens", + .data = &data_8996, + }, + {} +}; +MODULE_DEVICE_TABLE(of, tsens_table); + +static const struct thermal_zone_of_device_ops tsens_of_ops = { + .get_temp = tsens_get_temp, + .get_trend = tsens_get_trend, +}; + +static int tsens_register(struct tsens_device *tmdev) +{ + int i; + struct thermal_zone_device *tzd; + u32 *hw_id, n = tmdev->num_sensors; + + hw_id = devm_kcalloc(tmdev->dev, n, sizeof(u32), GFP_KERNEL); + if (!hw_id) + return -ENOMEM; + + for (i = 0; i < tmdev->num_sensors; i++) { + tmdev->sensor[i].tmdev = tmdev; + tmdev->sensor[i].id = i; + tzd = devm_thermal_zone_of_sensor_register(tmdev->dev, i, + &tmdev->sensor[i], + &tsens_of_ops); + if (IS_ERR(tzd)) + continue; + tmdev->sensor[i].tzd = tzd; + if (tmdev->ops->enable) + tmdev->ops->enable(tmdev, i); + } + return 0; +} + +static int tsens_probe(struct platform_device *pdev) +{ + int ret, i; + struct device *dev; + struct device_node *np; + struct tsens_sensor *s; + struct tsens_device *tmdev; + const struct tsens_data *data; + const struct of_device_id *id; + + if (pdev->dev.of_node) + dev = &pdev->dev; + else + dev = pdev->dev.parent; + + np = dev->of_node; + + id = of_match_node(tsens_table, np); + if (id) + data = id->data; + else + data = &data_8960; + + if (data->num_sensors <= 0) { + dev_err(dev, "invalid number of sensors\n"); + return -EINVAL; + } + + tmdev = devm_kzalloc(dev, sizeof(*tmdev) + + data->num_sensors * sizeof(*s), GFP_KERNEL); + if (!tmdev) + return -ENOMEM; + + tmdev->dev = dev; + tmdev->num_sensors = data->num_sensors; + tmdev->ops = data->ops; + for (i = 0; i < tmdev->num_sensors; i++) { + if (data->hw_ids) + tmdev->sensor[i].hw_id = data->hw_ids[i]; + else + tmdev->sensor[i].hw_id = i; + } + + if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->get_temp) + return -EINVAL; + + ret = tmdev->ops->init(tmdev); + if (ret < 0) { + dev_err(dev, "tsens init failed\n"); + return ret; + } + + if (tmdev->ops->calibrate) { + ret = tmdev->ops->calibrate(tmdev); + if (ret < 0) { + dev_err(dev, "tsens calibration failed\n"); + return ret; + } + } + + ret = tsens_register(tmdev); + + platform_set_drvdata(pdev, tmdev); + + return ret; +} + +static int tsens_remove(struct platform_device *pdev) +{ + struct tsens_device *tmdev = platform_get_drvdata(pdev); + + if (tmdev->ops->disable) + tmdev->ops->disable(tmdev); + + return 0; +} + +static struct platform_driver tsens_driver = { + .probe = tsens_probe, + .remove = tsens_remove, + .driver = { + .name = "qcom-tsens", + .pm = &tsens_pm_ops, + .of_match_table = tsens_table, + }, +}; +module_platform_driver(tsens_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("QCOM Temperature Sensor driver"); +MODULE_ALIAS("platform:qcom-tsens"); diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h new file mode 100644 index 000000000000..911c1978892b --- /dev/null +++ b/drivers/thermal/qcom/tsens.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __QCOM_TSENS_H__ +#define __QCOM_TSENS_H__ + +#define ONE_PT_CALIB 0x1 +#define ONE_PT_CALIB2 0x2 +#define TWO_PT_CALIB 0x3 + +#include <linux/thermal.h> + +struct tsens_device; + +struct tsens_sensor { + struct tsens_device *tmdev; + struct thermal_zone_device *tzd; + int offset; + int id; + int hw_id; + int slope; + u32 status; +}; + +/** + * struct tsens_ops - operations as supported by the tsens device + * @init: Function to initialize the tsens device + * @calibrate: Function to calibrate the tsens device + * @get_temp: Function which returns the temp in millidegC + * @enable: Function to enable (clocks/power) tsens device + * @disable: Function to disable the tsens device + * @suspend: Function to suspend the tsens device + * @resume: Function to resume the tsens device + * @get_trend: Function to get the thermal/temp trend + */ +struct tsens_ops { + /* mandatory callbacks */ + int (*init)(struct tsens_device *); + int (*calibrate)(struct tsens_device *); + int (*get_temp)(struct tsens_device *, int, int *); + /* optional callbacks */ + int (*enable)(struct tsens_device *, int); + void (*disable)(struct tsens_device *); + int (*suspend)(struct tsens_device *); + int (*resume)(struct tsens_device *); + int (*get_trend)(struct tsens_device *, int, enum thermal_trend *); +}; + +/** + * struct tsens_data - tsens instance specific data + * @num_sensors: Max number of sensors supported by platform + * @ops: operations the tsens instance supports + * @hw_ids: Subset of sensors ids supported by platform, if not the first n + */ +struct tsens_data { + const u32 num_sensors; + const struct tsens_ops *ops; + unsigned int *hw_ids; +}; + +/* Registers to be saved/restored across a context loss */ +struct tsens_context { + int threshold; + int control; +}; + +struct tsens_device { + struct device *dev; + u32 num_sensors; + struct regmap *map; + struct regmap_field *status_field; + struct tsens_context ctx; + bool trdy; + const struct tsens_ops *ops; + struct tsens_sensor sensor[0]; +}; + +char *qfprom_read(struct device *, const char *); +void compute_intercept_slope(struct tsens_device *, u32 *, u32 *, u32); +int init_common(struct tsens_device *); +int get_temp_common(struct tsens_device *, int, int *); + +extern const struct tsens_data data_8916, data_8974, data_8960, data_8996; + +#endif /* __QCOM_TSENS_H__ */ diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c new file mode 100644 index 000000000000..644ba526d9ea --- /dev/null +++ b/drivers/thermal/qoriq_thermal.c @@ -0,0 +1,328 @@ +/* + * Copyright 2016 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/thermal.h> + +#include "thermal_core.h" + +#define SITES_MAX 16 + +/* + * QorIQ TMU Registers + */ +struct qoriq_tmu_site_regs { + u32 tritsr; /* Immediate Temperature Site Register */ + u32 tratsr; /* Average Temperature Site Register */ + u8 res0[0x8]; +}; + +struct qoriq_tmu_regs { + u32 tmr; /* Mode Register */ +#define TMR_DISABLE 0x0 +#define TMR_ME 0x80000000 +#define TMR_ALPF 0x0c000000 + u32 tsr; /* Status Register */ + u32 tmtmir; /* Temperature measurement interval Register */ +#define TMTMIR_DEFAULT 0x0000000f + u8 res0[0x14]; + u32 tier; /* Interrupt Enable Register */ +#define TIER_DISABLE 0x0 + u32 tidr; /* Interrupt Detect Register */ + u32 tiscr; /* Interrupt Site Capture Register */ + u32 ticscr; /* Interrupt Critical Site Capture Register */ + u8 res1[0x10]; + u32 tmhtcrh; /* High Temperature Capture Register */ + u32 tmhtcrl; /* Low Temperature Capture Register */ + u8 res2[0x8]; + u32 tmhtitr; /* High Temperature Immediate Threshold */ + u32 tmhtatr; /* High Temperature Average Threshold */ + u32 tmhtactr; /* High Temperature Average Crit Threshold */ + u8 res3[0x24]; + u32 ttcfgr; /* Temperature Configuration Register */ + u32 tscfgr; /* Sensor Configuration Register */ + u8 res4[0x78]; + struct qoriq_tmu_site_regs site[SITES_MAX]; + u8 res5[0x9f8]; + u32 ipbrr0; /* IP Block Revision Register 0 */ + u32 ipbrr1; /* IP Block Revision Register 1 */ + u8 res6[0x310]; + u32 ttr0cr; /* Temperature Range 0 Control Register */ + u32 ttr1cr; /* Temperature Range 1 Control Register */ + u32 ttr2cr; /* Temperature Range 2 Control Register */ + u32 ttr3cr; /* Temperature Range 3 Control Register */ +}; + +/* + * Thermal zone data + */ +struct qoriq_tmu_data { + struct thermal_zone_device *tz; + struct qoriq_tmu_regs __iomem *regs; + int sensor_id; + bool little_endian; +}; + +static void tmu_write(struct qoriq_tmu_data *p, u32 val, void __iomem *addr) +{ + if (p->little_endian) + iowrite32(val, addr); + else + iowrite32be(val, addr); +} + +static u32 tmu_read(struct qoriq_tmu_data *p, void __iomem *addr) +{ + if (p->little_endian) + return ioread32(addr); + else + return ioread32be(addr); +} + +static int tmu_get_temp(void *p, int *temp) +{ + u32 val; + struct qoriq_tmu_data *data = p; + + val = tmu_read(data, &data->regs->site[data->sensor_id].tritsr); + *temp = (val & 0xff) * 1000; + + return 0; +} + +static int qoriq_tmu_get_sensor_id(void) +{ + int ret, id; + struct of_phandle_args sensor_specs; + struct device_node *np, *sensor_np; + + np = of_find_node_by_name(NULL, "thermal-zones"); + if (!np) + return -ENODEV; + + sensor_np = of_get_next_child(np, NULL); + ret = of_parse_phandle_with_args(sensor_np, "thermal-sensors", + "#thermal-sensor-cells", + 0, &sensor_specs); + if (ret) { + of_node_put(np); + of_node_put(sensor_np); + return ret; + } + + if (sensor_specs.args_count >= 1) { + id = sensor_specs.args[0]; + WARN(sensor_specs.args_count > 1, + "%s: too many cells in sensor specifier %d\n", + sensor_specs.np->name, sensor_specs.args_count); + } else { + id = 0; + } + + of_node_put(np); + of_node_put(sensor_np); + + return id; +} + +static int qoriq_tmu_calibration(struct platform_device *pdev) +{ + int i, val, len; + u32 range[4]; + const u32 *calibration; + struct device_node *np = pdev->dev.of_node; + struct qoriq_tmu_data *data = platform_get_drvdata(pdev); + + if (of_property_read_u32_array(np, "fsl,tmu-range", range, 4)) { + dev_err(&pdev->dev, "missing calibration range.\n"); + return -ENODEV; + } + + /* Init temperature range registers */ + tmu_write(data, range[0], &data->regs->ttr0cr); + tmu_write(data, range[1], &data->regs->ttr1cr); + tmu_write(data, range[2], &data->regs->ttr2cr); + tmu_write(data, range[3], &data->regs->ttr3cr); + + calibration = of_get_property(np, "fsl,tmu-calibration", &len); + if (calibration == NULL || len % 8) { + dev_err(&pdev->dev, "invalid calibration data.\n"); + return -ENODEV; + } + + for (i = 0; i < len; i += 8, calibration += 2) { + val = of_read_number(calibration, 1); + tmu_write(data, val, &data->regs->ttcfgr); + val = of_read_number(calibration + 1, 1); + tmu_write(data, val, &data->regs->tscfgr); + } + + return 0; +} + +static void qoriq_tmu_init_device(struct qoriq_tmu_data *data) +{ + /* Disable interrupt, using polling instead */ + tmu_write(data, TIER_DISABLE, &data->regs->tier); + + /* Set update_interval */ + tmu_write(data, TMTMIR_DEFAULT, &data->regs->tmtmir); + + /* Disable monitoring */ + tmu_write(data, TMR_DISABLE, &data->regs->tmr); +} + +static struct thermal_zone_of_device_ops tmu_tz_ops = { + .get_temp = tmu_get_temp, +}; + +static int qoriq_tmu_probe(struct platform_device *pdev) +{ + int ret; + const struct thermal_trip *trip; + struct qoriq_tmu_data *data; + struct device_node *np = pdev->dev.of_node; + u32 site = 0; + + if (!np) { + dev_err(&pdev->dev, "Device OF-Node is NULL"); + return -ENODEV; + } + + data = devm_kzalloc(&pdev->dev, sizeof(struct qoriq_tmu_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + platform_set_drvdata(pdev, data); + + data->little_endian = of_property_read_bool(np, "little-endian"); + + data->sensor_id = qoriq_tmu_get_sensor_id(); + if (data->sensor_id < 0) { + dev_err(&pdev->dev, "Failed to get sensor id\n"); + ret = -ENODEV; + goto err_iomap; + } + + data->regs = of_iomap(np, 0); + if (!data->regs) { + dev_err(&pdev->dev, "Failed to get memory region\n"); + ret = -ENODEV; + goto err_iomap; + } + + qoriq_tmu_init_device(data); /* TMU initialization */ + + ret = qoriq_tmu_calibration(pdev); /* TMU calibration */ + if (ret < 0) + goto err_tmu; + + data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id, + data, &tmu_tz_ops); + if (IS_ERR(data->tz)) { + ret = PTR_ERR(data->tz); + dev_err(&pdev->dev, + "Failed to register thermal zone device %d\n", ret); + goto err_tmu; + } + + trip = of_thermal_get_trip_points(data->tz); + + /* Enable monitoring */ + site |= 0x1 << (15 - data->sensor_id); + tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); + + return 0; + +err_tmu: + iounmap(data->regs); + +err_iomap: + platform_set_drvdata(pdev, NULL); + + return ret; +} + +static int qoriq_tmu_remove(struct platform_device *pdev) +{ + struct qoriq_tmu_data *data = platform_get_drvdata(pdev); + + thermal_zone_of_sensor_unregister(&pdev->dev, data->tz); + + /* Disable monitoring */ + tmu_write(data, TMR_DISABLE, &data->regs->tmr); + + iounmap(data->regs); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int qoriq_tmu_suspend(struct device *dev) +{ + u32 tmr; + struct qoriq_tmu_data *data = dev_get_drvdata(dev); + + /* Disable monitoring */ + tmr = tmu_read(data, &data->regs->tmr); + tmr &= ~TMR_ME; + tmu_write(data, tmr, &data->regs->tmr); + + return 0; +} + +static int qoriq_tmu_resume(struct device *dev) +{ + u32 tmr; + struct qoriq_tmu_data *data = dev_get_drvdata(dev); + + /* Enable monitoring */ + tmr = tmu_read(data, &data->regs->tmr); + tmr |= TMR_ME; + tmu_write(data, tmr, &data->regs->tmr); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops, + qoriq_tmu_suspend, qoriq_tmu_resume); + +static const struct of_device_id qoriq_tmu_match[] = { + { .compatible = "fsl,qoriq-tmu", }, + {}, +}; +MODULE_DEVICE_TABLE(of, qoriq_tmu_match); + +static struct platform_driver qoriq_tmu = { + .driver = { + .name = "qoriq_thermal", + .pm = &qoriq_tmu_pm_ops, + .of_match_table = qoriq_tmu_match, + }, + .probe = qoriq_tmu_probe, + .remove = qoriq_tmu_remove, +}; +module_platform_driver(qoriq_tmu); + +MODULE_AUTHOR("Jia Hongtao <hongtao.jia@nxp.com>"); +MODULE_DESCRIPTION("QorIQ Thermal Monitoring Unit driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 5f817923f374..73e5fee6cf1d 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -31,6 +31,8 @@ #include <linux/spinlock.h> #include <linux/thermal.h> +#include "thermal_hwmon.h" + #define IDLE_INTERVAL 5000 #define COMMON_STR 0x00 @@ -75,6 +77,8 @@ struct rcar_thermal_priv { #define rcar_priv_to_dev(priv) ((priv)->common->dev) #define rcar_has_irq_support(priv) ((priv)->common->base) #define rcar_id_to_shift(priv) ((priv)->id * 8) +#define rcar_of_data(dev) ((unsigned long)of_device_get_match_data(dev)) +#define rcar_use_of_thermal(dev) (rcar_of_data(dev) == USE_OF_THERMAL) #define USE_OF_THERMAL 1 static const struct of_device_id rcar_thermal_dt_ids[] = { @@ -354,7 +358,8 @@ static void rcar_thermal_work(struct work_struct *work) return; if (nctemp != cctemp) - thermal_zone_device_update(priv->zone); + thermal_zone_device_update(priv->zone, + THERMAL_EVENT_UNSPECIFIED); } static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status) @@ -415,7 +420,10 @@ static int rcar_thermal_remove(struct platform_device *pdev) rcar_thermal_for_each_priv(priv, common) { rcar_thermal_irq_disable(priv); - thermal_zone_device_unregister(priv->zone); + if (rcar_use_of_thermal(dev)) + thermal_remove_hwmon_sysfs(priv->zone); + else + thermal_zone_device_unregister(priv->zone); } pm_runtime_put(dev); @@ -430,7 +438,6 @@ static int rcar_thermal_probe(struct platform_device *pdev) struct rcar_thermal_priv *priv; struct device *dev = &pdev->dev; struct resource *res, *irq; - unsigned long of_data = (unsigned long)of_device_get_match_data(dev); int mres = 0; int i; int ret = -ENODEV; @@ -491,7 +498,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) if (ret < 0) goto error_unregister; - if (of_data == USE_OF_THERMAL) + if (rcar_use_of_thermal(dev)) priv->zone = devm_thermal_zone_of_sensor_register( dev, i, priv, &rcar_thermal_zone_of_ops); @@ -508,6 +515,17 @@ static int rcar_thermal_probe(struct platform_device *pdev) goto error_unregister; } + if (rcar_use_of_thermal(dev)) { + /* + * thermal_zone doesn't enable hwmon as default, + * but, enable it here to keep compatible + */ + priv->zone->tzp->no_hwmon = false; + ret = thermal_add_hwmon_sysfs(priv->zone); + if (ret) + goto error_unregister; + } + rcar_thermal_irq_enable(priv); list_move_tail(&priv->list, &common->head); diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 5d491f16a866..e227a9f0acf7 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -96,6 +96,7 @@ struct chip_tsadc_table { * @initialize: SoC special initialize tsadc controller method * @irq_ack: clear the interrupt * @get_temp: get the temperature + * @set_alarm_temp: set the high temperature interrupt * @set_tshut_temp: set the hardware-controlled shutdown temperature * @set_tshut_mode: set the hardware-controlled shutdown mode * @table: the chip-specific conversion table @@ -119,6 +120,8 @@ struct rockchip_tsadc_chip { /* Per-sensor methods */ int (*get_temp)(struct chip_tsadc_table table, int chn, void __iomem *reg, int *temp); + void (*set_alarm_temp)(struct chip_tsadc_table table, + int chn, void __iomem *reg, int temp); void (*set_tshut_temp)(struct chip_tsadc_table table, int chn, void __iomem *reg, int temp); void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); @@ -183,6 +186,7 @@ struct rockchip_thermal_data { #define TSADCV2_INT_EN 0x08 #define TSADCV2_INT_PD 0x0c #define TSADCV2_DATA(chn) (0x20 + (chn) * 0x04) +#define TSADCV2_COMP_INT(chn) (0x30 + (chn) * 0x04) #define TSADCV2_COMP_SHUT(chn) (0x40 + (chn) * 0x04) #define TSADCV2_HIGHT_INT_DEBOUNCE 0x60 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE 0x64 @@ -207,18 +211,21 @@ struct rockchip_thermal_data { #define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4 -#define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */ -#define TSADCV2_AUTO_PERIOD_HT_TIME 50 /* msec */ +#define TSADCV2_AUTO_PERIOD_TIME 250 /* 250ms */ +#define TSADCV2_AUTO_PERIOD_HT_TIME 50 /* 50ms */ +#define TSADCV3_AUTO_PERIOD_TIME 1875 /* 2.5ms */ +#define TSADCV3_AUTO_PERIOD_HT_TIME 1875 /* 2.5ms */ + #define TSADCV2_USER_INTER_PD_SOC 0x340 /* 13 clocks */ #define GRF_SARADC_TESTBIT 0x0e644 #define GRF_TSADC_TESTBIT_L 0x0e648 #define GRF_TSADC_TESTBIT_H 0x0e64c -#define GRF_TSADC_TSEN_PD_ON (0x30003 << 0) -#define GRF_TSADC_TSEN_PD_OFF (0x30000 << 0) #define GRF_SARADC_TESTBIT_ON (0x10001 << 2) #define GRF_TSADC_TESTBIT_H_ON (0x10001 << 2) +#define GRF_TSADC_VCM_EN_L (0x10001 << 7) +#define GRF_TSADC_VCM_EN_H (0x10001 << 7) /** * struct tsadc_table - code to temperature conversion table @@ -394,13 +401,17 @@ static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table, int temp) { int high, low, mid; + u32 error = 0; low = 0; high = table.length - 1; mid = (high + low) / 2; - if (temp < table.id[low].temp || temp > table.id[high].temp) - return 0; + /* Return mask code data when the temp is over table range */ + if (temp < table.id[low].temp || temp > table.id[high].temp) { + error = table.data_mask; + goto exit; + } while (low <= high) { if (temp == table.id[mid].temp) @@ -412,7 +423,9 @@ static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table, mid = (low + high) / 2; } - return 0; +exit: + pr_err("Invalid the conversion, error=%d\n", error); + return error; } static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, @@ -543,14 +556,34 @@ static void rk_tsadcv3_initialize(struct regmap *grf, void __iomem *regs, /* Set interleave value to workround ic time sync issue */ writel_relaxed(TSADCV2_USER_INTER_PD_SOC, regs + TSADCV2_USER_CON); + + writel_relaxed(TSADCV2_AUTO_PERIOD_TIME, + regs + TSADCV2_AUTO_PERIOD); + writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT, + regs + TSADCV2_HIGHT_INT_DEBOUNCE); + writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME, + regs + TSADCV2_AUTO_PERIOD_HT); + writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT, + regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE); + } else { - regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_ON); - mdelay(10); - regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_OFF); + /* Enable the voltage common mode feature */ + regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_VCM_EN_L); + regmap_write(grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_VCM_EN_H); + usleep_range(15, 100); /* The spec note says at least 15 us */ regmap_write(grf, GRF_SARADC_TESTBIT, GRF_SARADC_TESTBIT_ON); regmap_write(grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_TESTBIT_H_ON); usleep_range(90, 200); /* The spec note says at least 90 us */ + + writel_relaxed(TSADCV3_AUTO_PERIOD_TIME, + regs + TSADCV2_AUTO_PERIOD); + writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT, + regs + TSADCV2_HIGHT_INT_DEBOUNCE); + writel_relaxed(TSADCV3_AUTO_PERIOD_HT_TIME, + regs + TSADCV2_AUTO_PERIOD_HT); + writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT, + regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE); } if (tshut_polarity == TSHUT_HIGH_ACTIVE) @@ -559,14 +592,6 @@ static void rk_tsadcv3_initialize(struct regmap *grf, void __iomem *regs, else writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH, regs + TSADCV2_AUTO_CON); - - writel_relaxed(TSADCV2_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD); - writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT, - regs + TSADCV2_HIGHT_INT_DEBOUNCE); - writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME, - regs + TSADCV2_AUTO_PERIOD_HT); - writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT, - regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE); } static void rk_tsadcv2_irq_ack(void __iomem *regs) @@ -628,12 +653,34 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, return rk_tsadcv2_code_to_temp(table, val, temp); } +static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table, + int chn, void __iomem *regs, int temp) +{ + u32 alarm_value, int_en; + + /* Make sure the value is valid */ + alarm_value = rk_tsadcv2_temp_to_code(table, temp); + if (alarm_value == table.data_mask) + return; + + writel_relaxed(alarm_value & table.data_mask, + regs + TSADCV2_COMP_INT(chn)); + + int_en = readl_relaxed(regs + TSADCV2_INT_EN); + int_en |= TSADCV2_INT_SRC_EN(chn); + writel_relaxed(int_en, regs + TSADCV2_INT_EN); +} + static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table, int chn, void __iomem *regs, int temp) { u32 tshut_value, val; + /* Make sure the value is valid */ tshut_value = rk_tsadcv2_temp_to_code(table, temp); + if (tshut_value == table.data_mask) + return; + writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); /* TSHUT will be valid */ @@ -670,6 +717,7 @@ static const struct rockchip_tsadc_chip rk3228_tsadc_data = { .irq_ack = rk_tsadcv3_irq_ack, .control = rk_tsadcv3_control, .get_temp = rk_tsadcv2_get_temp, + .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, @@ -694,6 +742,7 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = { .irq_ack = rk_tsadcv2_irq_ack, .control = rk_tsadcv2_control, .get_temp = rk_tsadcv2_get_temp, + .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, @@ -718,6 +767,7 @@ static const struct rockchip_tsadc_chip rk3366_tsadc_data = { .irq_ack = rk_tsadcv3_irq_ack, .control = rk_tsadcv3_control, .get_temp = rk_tsadcv2_get_temp, + .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, @@ -742,6 +792,7 @@ static const struct rockchip_tsadc_chip rk3368_tsadc_data = { .irq_ack = rk_tsadcv2_irq_ack, .control = rk_tsadcv2_control, .get_temp = rk_tsadcv2_get_temp, + .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, @@ -766,6 +817,7 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = { .irq_ack = rk_tsadcv3_irq_ack, .control = rk_tsadcv3_control, .get_temp = rk_tsadcv2_get_temp, + .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, @@ -821,11 +873,27 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev) thermal->chip->irq_ack(thermal->regs); for (i = 0; i < thermal->chip->chn_num; i++) - thermal_zone_device_update(thermal->sensors[i].tzd); + thermal_zone_device_update(thermal->sensors[i].tzd, + THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } +static int rockchip_thermal_set_trips(void *_sensor, int low, int high) +{ + struct rockchip_thermal_sensor *sensor = _sensor; + struct rockchip_thermal_data *thermal = sensor->thermal; + const struct rockchip_tsadc_chip *tsadc = thermal->chip; + + dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n", + __func__, sensor->id, low, high); + + tsadc->set_alarm_temp(tsadc->table, + sensor->id, thermal->regs, high); + + return 0; +} + static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) { struct rockchip_thermal_sensor *sensor = _sensor; @@ -843,6 +911,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) static const struct thermal_zone_of_device_ops rockchip_of_thermal_ops = { .get_temp = rockchip_thermal_get_temp, + .set_trips = rockchip_thermal_set_trips, }; static int rockchip_configure_from_dt(struct device *dev, diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index f3ce94ec73b5..ad1186dd6132 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -225,7 +225,7 @@ static void exynos_report_trigger(struct exynos_tmu_data *p) return; } - thermal_zone_device_update(tz); + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); mutex_lock(&tz->lock); /* Find the level for which trip happened */ diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c index fc0c9e198710..91d42319de27 100644 --- a/drivers/thermal/st/st_thermal_memmap.c +++ b/drivers/thermal/st/st_thermal_memmap.c @@ -42,7 +42,8 @@ static irqreturn_t st_mmap_thermal_trip_handler(int irq, void *sdata) { struct st_thermal_sensor *sensor = sdata; - thermal_zone_device_update(sensor->thermal_dev); + thermal_zone_device_update(sensor->thermal_dev, + THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } diff --git a/drivers/thermal/tango_thermal.c b/drivers/thermal/tango_thermal.c index 70e0d9f406e9..201304aeafeb 100644 --- a/drivers/thermal/tango_thermal.c +++ b/drivers/thermal/tango_thermal.c @@ -64,6 +64,12 @@ static const struct thermal_zone_of_device_ops ops = { .get_temp = tango_get_temp, }; +static void tango_thermal_init(struct tango_thermal_priv *priv) +{ + writel(0, priv->base + TEMPSI_CFG); + writel(CMD_ON, priv->base + TEMPSI_CMD); +} + static int tango_thermal_probe(struct platform_device *pdev) { struct resource *res; @@ -79,14 +85,22 @@ static int tango_thermal_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); + platform_set_drvdata(pdev, priv); priv->thresh_idx = IDX_MIN; - writel(0, priv->base + TEMPSI_CFG); - writel(CMD_ON, priv->base + TEMPSI_CMD); + tango_thermal_init(priv); tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv, &ops); return PTR_ERR_OR_ZERO(tzdev); } +static int __maybe_unused tango_thermal_resume(struct device *dev) +{ + tango_thermal_init(dev_get_drvdata(dev)); + return 0; +} + +static SIMPLE_DEV_PM_OPS(tango_thermal_pm, NULL, tango_thermal_resume); + static const struct of_device_id tango_sensor_ids[] = { { .compatible = "sigma,smp8758-thermal", @@ -99,6 +113,7 @@ static struct platform_driver tango_thermal_driver = { .driver = { .name = "tango-thermal", .of_match_table = tango_sensor_ids, + .pm = &tango_thermal_pm, }, }; diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c index b8651726201e..7d2db23d71a3 100644 --- a/drivers/thermal/tegra/soctherm.c +++ b/drivers/thermal/tegra/soctherm.c @@ -30,6 +30,7 @@ #include <dt-bindings/thermal/tegra124-soctherm.h> +#include "../thermal_core.h" #include "soctherm.h" #define SENSOR_CONFIG0 0 @@ -67,35 +68,228 @@ #define READBACK_ADD_HALF BIT(7) #define READBACK_NEGATE BIT(0) +/* + * THERMCTL_LEVEL0_GROUP_CPU is defined in soctherm.h + * because it will be used by tegraxxx_soctherm.c + */ +#define THERMCTL_LVL0_CPU0_EN_MASK BIT(8) +#define THERMCTL_LVL0_CPU0_CPU_THROT_MASK (0x3 << 5) +#define THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT 0x1 +#define THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY 0x2 +#define THERMCTL_LVL0_CPU0_GPU_THROT_MASK (0x3 << 3) +#define THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT 0x1 +#define THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY 0x2 +#define THERMCTL_LVL0_CPU0_MEM_THROT_MASK BIT(2) +#define THERMCTL_LVL0_CPU0_STATUS_MASK 0x3 + +#define THERMCTL_LVL0_UP_STATS 0x10 +#define THERMCTL_LVL0_DN_STATS 0x14 + +#define THERMCTL_STATS_CTL 0x94 +#define STATS_CTL_CLR_DN 0x8 +#define STATS_CTL_EN_DN 0x4 +#define STATS_CTL_CLR_UP 0x2 +#define STATS_CTL_EN_UP 0x1 + +#define THROT_GLOBAL_CFG 0x400 +#define THROT_GLOBAL_ENB_MASK BIT(0) + +#define CPU_PSKIP_STATUS 0x418 +#define XPU_PSKIP_STATUS_M_MASK (0xff << 12) +#define XPU_PSKIP_STATUS_N_MASK (0xff << 4) +#define XPU_PSKIP_STATUS_SW_OVERRIDE_MASK BIT(1) +#define XPU_PSKIP_STATUS_ENABLED_MASK BIT(0) + +#define THROT_PRIORITY_LOCK 0x424 +#define THROT_PRIORITY_LOCK_PRIORITY_MASK 0xff + +#define THROT_STATUS 0x428 +#define THROT_STATUS_BREACH_MASK BIT(12) +#define THROT_STATUS_STATE_MASK (0xff << 4) +#define THROT_STATUS_ENABLED_MASK BIT(0) + +#define THROT_PSKIP_CTRL_LITE_CPU 0x430 +#define THROT_PSKIP_CTRL_ENABLE_MASK BIT(31) +#define THROT_PSKIP_CTRL_DIVIDEND_MASK (0xff << 8) +#define THROT_PSKIP_CTRL_DIVISOR_MASK 0xff +#define THROT_PSKIP_CTRL_VECT_GPU_MASK (0x7 << 16) +#define THROT_PSKIP_CTRL_VECT_CPU_MASK (0x7 << 8) +#define THROT_PSKIP_CTRL_VECT2_CPU_MASK 0x7 + +#define THROT_VECT_NONE 0x0 /* 3'b000 */ +#define THROT_VECT_LOW 0x1 /* 3'b001 */ +#define THROT_VECT_MED 0x3 /* 3'b011 */ +#define THROT_VECT_HIGH 0x7 /* 3'b111 */ + +#define THROT_PSKIP_RAMP_LITE_CPU 0x434 +#define THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK BIT(31) +#define THROT_PSKIP_RAMP_DURATION_MASK (0xffff << 8) +#define THROT_PSKIP_RAMP_STEP_MASK 0xff + +#define THROT_PRIORITY_LITE 0x444 +#define THROT_PRIORITY_LITE_PRIO_MASK 0xff + +#define THROT_DELAY_LITE 0x448 +#define THROT_DELAY_LITE_DELAY_MASK 0xff + +/* car register offsets needed for enabling HW throttling */ +#define CAR_SUPER_CCLKG_DIVIDER 0x36c +#define CDIVG_USE_THERM_CONTROLS_MASK BIT(30) + +/* ccroc register offsets needed for enabling HW throttling for Tegra132 */ +#define CCROC_SUPER_CCLKG_DIVIDER 0x024 + +#define CCROC_GLOBAL_CFG 0x148 + +#define CCROC_THROT_PSKIP_RAMP_CPU 0x150 +#define CCROC_THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK BIT(31) +#define CCROC_THROT_PSKIP_RAMP_DURATION_MASK (0xffff << 8) +#define CCROC_THROT_PSKIP_RAMP_STEP_MASK 0xff + +#define CCROC_THROT_PSKIP_CTRL_CPU 0x154 +#define CCROC_THROT_PSKIP_CTRL_ENB_MASK BIT(31) +#define CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK (0xff << 8) +#define CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK 0xff + /* get val from register(r) mask bits(m) */ #define REG_GET_MASK(r, m) (((r) & (m)) >> (ffs(m) - 1)) /* set val(v) to mask bits(m) of register(r) */ #define REG_SET_MASK(r, m, v) (((r) & ~(m)) | \ (((v) & (m >> (ffs(m) - 1))) << (ffs(m) - 1))) +/* get dividend from the depth */ +#define THROT_DEPTH_DIVIDEND(depth) ((256 * (100 - (depth)) / 100) - 1) + +/* get THROT_PSKIP_xxx offset per LIGHT/HEAVY throt and CPU/GPU dev */ +#define THROT_OFFSET 0x30 +#define THROT_PSKIP_CTRL(throt, dev) (THROT_PSKIP_CTRL_LITE_CPU + \ + (THROT_OFFSET * throt) + (8 * dev)) +#define THROT_PSKIP_RAMP(throt, dev) (THROT_PSKIP_RAMP_LITE_CPU + \ + (THROT_OFFSET * throt) + (8 * dev)) + +/* get THROT_xxx_CTRL offset per LIGHT/HEAVY throt */ +#define THROT_PRIORITY_CTRL(throt) (THROT_PRIORITY_LITE + \ + (THROT_OFFSET * throt)) +#define THROT_DELAY_CTRL(throt) (THROT_DELAY_LITE + \ + (THROT_OFFSET * throt)) + +/* get CCROC_THROT_PSKIP_xxx offset per HIGH/MED/LOW vect*/ +#define CCROC_THROT_OFFSET 0x0c +#define CCROC_THROT_PSKIP_CTRL_CPU_REG(vect) (CCROC_THROT_PSKIP_CTRL_CPU + \ + (CCROC_THROT_OFFSET * vect)) +#define CCROC_THROT_PSKIP_RAMP_CPU_REG(vect) (CCROC_THROT_PSKIP_RAMP_CPU + \ + (CCROC_THROT_OFFSET * vect)) + +/* get THERMCTL_LEVELx offset per CPU/GPU/MEM/TSENSE rg and LEVEL0~3 lv */ +#define THERMCTL_LVL_REGS_SIZE 0x20 +#define THERMCTL_LVL_REG(rg, lv) ((rg) + ((lv) * THERMCTL_LVL_REGS_SIZE)) + static const int min_low_temp = -127000; static const int max_high_temp = 127000; +enum soctherm_throttle_id { + THROTTLE_LIGHT = 0, + THROTTLE_HEAVY, + THROTTLE_SIZE, +}; + +enum soctherm_throttle_dev_id { + THROTTLE_DEV_CPU = 0, + THROTTLE_DEV_GPU, + THROTTLE_DEV_SIZE, +}; + +static const char *const throt_names[] = { + [THROTTLE_LIGHT] = "light", + [THROTTLE_HEAVY] = "heavy", +}; + +struct tegra_soctherm; struct tegra_thermctl_zone { void __iomem *reg; struct device *dev; + struct tegra_soctherm *ts; struct thermal_zone_device *tz; const struct tegra_tsensor_group *sg; }; +struct soctherm_throt_cfg { + const char *name; + unsigned int id; + u8 priority; + u8 cpu_throt_level; + u32 cpu_throt_depth; + struct thermal_cooling_device *cdev; + bool init; +}; + struct tegra_soctherm { struct reset_control *reset; struct clk *clock_tsensor; struct clk *clock_soctherm; void __iomem *regs; - struct thermal_zone_device **thermctl_tzs; + void __iomem *clk_regs; + void __iomem *ccroc_regs; u32 *calib; + struct thermal_zone_device **thermctl_tzs; struct tegra_soctherm_soc *soc; + struct soctherm_throt_cfg throt_cfgs[THROTTLE_SIZE]; + struct dentry *debugfs_dir; }; +/** + * clk_writel() - writes a value to a CAR register + * @ts: pointer to a struct tegra_soctherm + * @v: the value to write + * @reg: the register offset + * + * Writes @v to @reg. No return value. + */ +static inline void clk_writel(struct tegra_soctherm *ts, u32 value, u32 reg) +{ + writel(value, (ts->clk_regs + reg)); +} + +/** + * clk_readl() - reads specified register from CAR IP block + * @ts: pointer to a struct tegra_soctherm + * @reg: register address to be read + * + * Return: the value of the register + */ +static inline u32 clk_readl(struct tegra_soctherm *ts, u32 reg) +{ + return readl(ts->clk_regs + reg); +} + +/** + * ccroc_writel() - writes a value to a CCROC register + * @ts: pointer to a struct tegra_soctherm + * @v: the value to write + * @reg: the register offset + * + * Writes @v to @reg. No return value. + */ +static inline void ccroc_writel(struct tegra_soctherm *ts, u32 value, u32 reg) +{ + writel(value, (ts->ccroc_regs + reg)); +} + +/** + * ccroc_readl() - reads specified register from CCROC IP block + * @ts: pointer to a struct tegra_soctherm + * @reg: register address to be read + * + * Return: the value of the register + */ +static inline u32 ccroc_readl(struct tegra_soctherm *ts, u32 reg) +{ + return readl(ts->ccroc_regs + reg); +} + static void enable_tsensor(struct tegra_soctherm *tegra, unsigned int i) { const struct tegra_tsensor *sensor = &tegra->soc->tsensors[i]; @@ -150,11 +344,17 @@ static int tegra_thermctl_get_temp(void *data, int *out_temp) static int thermtrip_program(struct device *dev, const struct tegra_tsensor_group *sg, int trip_temp); +static int +throttrip_program(struct device *dev, const struct tegra_tsensor_group *sg, + struct soctherm_throt_cfg *stc, int trip_temp); +static struct soctherm_throt_cfg * +find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name); static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp) { struct tegra_thermctl_zone *zone = data; struct thermal_zone_device *tz = zone->tz; + struct tegra_soctherm *ts = zone->ts; const struct tegra_tsensor_group *sg = zone->sg; struct device *dev = zone->dev; enum thermal_trip_type type; @@ -167,10 +367,29 @@ static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp) if (ret) return ret; - if (type != THERMAL_TRIP_CRITICAL) - return 0; + if (type == THERMAL_TRIP_CRITICAL) { + return thermtrip_program(dev, sg, temp); + } else if (type == THERMAL_TRIP_HOT) { + int i; + + for (i = 0; i < THROTTLE_SIZE; i++) { + struct thermal_cooling_device *cdev; + struct soctherm_throt_cfg *stc; + + if (!ts->throt_cfgs[i].init) + continue; + + cdev = ts->throt_cfgs[i].cdev; + if (get_thermal_instance(tz, cdev, trip)) + stc = find_throttle_cfg_by_name(ts, cdev->type); + else + continue; + + return throttrip_program(dev, sg, stc, temp); + } + } - return thermtrip_program(dev, sg, temp); + return 0; } static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = { @@ -238,14 +457,110 @@ static int thermtrip_program(struct device *dev, } /** + * throttrip_program() - Configures the hardware to throttle the + * pulse if a given sensor group reaches a given temperature + * @dev: ptr to the struct device for the SOC_THERM IP block + * @sg: pointer to the sensor group to set the thermtrip temperature for + * @stc: pointer to the throttle need to be triggered + * @trip_temp: the temperature in millicelsius to trigger the thermal trip at + * + * Sets the thermal trip threshold and throttle event of the given sensor + * group. If this threshold is crossed, the hardware will trigger the + * throttle. + * + * Note that, although @trip_temp is specified in millicelsius, the + * hardware is programmed in degrees Celsius. + * + * Return: 0 upon success, or %-EINVAL upon failure. + */ +static int throttrip_program(struct device *dev, + const struct tegra_tsensor_group *sg, + struct soctherm_throt_cfg *stc, + int trip_temp) +{ + struct tegra_soctherm *ts = dev_get_drvdata(dev); + int temp, cpu_throt, gpu_throt; + unsigned int throt; + u32 r, reg_off; + + if (!dev || !sg || !stc || !stc->init) + return -EINVAL; + + temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain; + + /* Hardcode LIGHT on LEVEL1 and HEAVY on LEVEL2 */ + throt = stc->id; + reg_off = THERMCTL_LVL_REG(sg->thermctl_lvl0_offset, throt + 1); + + if (throt == THROTTLE_LIGHT) { + cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT; + gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT; + } else { + cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY; + gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY; + if (throt != THROTTLE_HEAVY) + dev_warn(dev, + "invalid throt id %d - assuming HEAVY", + throt); + } + + r = readl(ts->regs + reg_off); + r = REG_SET_MASK(r, sg->thermctl_lvl0_up_thresh_mask, temp); + r = REG_SET_MASK(r, sg->thermctl_lvl0_dn_thresh_mask, temp); + r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_CPU_THROT_MASK, cpu_throt); + r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_GPU_THROT_MASK, gpu_throt); + r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1); + writel(r, ts->regs + reg_off); + + return 0; +} + +static struct soctherm_throt_cfg * +find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name) +{ + unsigned int i; + + for (i = 0; ts->throt_cfgs[i].name; i++) + if (!strcmp(ts->throt_cfgs[i].name, name)) + return &ts->throt_cfgs[i]; + + return NULL; +} + +static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp) +{ + int ntrips, i, ret; + enum thermal_trip_type type; + + ntrips = of_thermal_get_ntrips(tz); + if (ntrips <= 0) + return -EINVAL; + + for (i = 0; i < ntrips; i++) { + ret = tz->ops->get_trip_type(tz, i, &type); + if (ret) + return -EINVAL; + if (type == THERMAL_TRIP_HOT) { + ret = tz->ops->get_trip_temp(tz, i, temp); + if (!ret) + *trip = i; + + return ret; + } + } + + return -EINVAL; +} + +/** * tegra_soctherm_set_hwtrips() - set HW trip point from DT data * @dev: struct device * of the SOC_THERM instance * * Configure the SOC_THERM HW trip points, setting "THERMTRIP" - * trip points , using "critical" type trip_temp from thermal - * zone. - * After they have been configured, THERMTRIP will take action - * when the configured SoC thermal sensor group reaches a + * "THROTTLE" trip points , using "critical" or "hot" type trip_temp + * from thermal zone. + * After they have been configured, THERMTRIP or THROTTLE will take + * action when the configured SoC thermal sensor group reaches a * certain temperature. * * Return: 0 upon success, or a negative error code on failure. @@ -254,19 +569,24 @@ static int thermtrip_program(struct device *dev, * THERMTRIP has been enabled successfully when a message similar to * this one appears on the serial console: * "thermtrip: will shut down when sensor group XXX reaches YYYYYY mC" + * THROTTLE has been enabled successfully when a message similar to + * this one appears on the serial console: + * ""throttrip: will throttle when sensor group XXX reaches YYYYYY mC" */ static int tegra_soctherm_set_hwtrips(struct device *dev, const struct tegra_tsensor_group *sg, struct thermal_zone_device *tz) { - int temperature; + struct tegra_soctherm *ts = dev_get_drvdata(dev); + struct soctherm_throt_cfg *stc; + int i, trip, temperature; int ret; ret = tz->ops->get_crit_temp(tz, &temperature); if (ret) { dev_warn(dev, "thermtrip: %s: missing critical temperature\n", sg->name); - return ret; + goto set_throttle; } ret = thermtrip_program(dev, sg, temperature); @@ -280,6 +600,43 @@ static int tegra_soctherm_set_hwtrips(struct device *dev, "thermtrip: will shut down when %s reaches %d mC\n", sg->name, temperature); +set_throttle: + ret = get_hot_temp(tz, &trip, &temperature); + if (ret) { + dev_warn(dev, "throttrip: %s: missing hot temperature\n", + sg->name); + return 0; + } + + for (i = 0; i < THROTTLE_SIZE; i++) { + struct thermal_cooling_device *cdev; + + if (!ts->throt_cfgs[i].init) + continue; + + cdev = ts->throt_cfgs[i].cdev; + if (get_thermal_instance(tz, cdev, trip)) + stc = find_throttle_cfg_by_name(ts, cdev->type); + else + continue; + + ret = throttrip_program(dev, sg, stc, temperature); + if (ret) { + dev_err(dev, "throttrip: %s: error during enable\n", + sg->name); + return ret; + } + + dev_info(dev, + "throttrip: will throttle when %s reaches %d mC\n", + sg->name, temperature); + break; + } + + if (i == THROTTLE_SIZE) + dev_warn(dev, "throttrip: %s: missing throttle cdev\n", + sg->name); + return 0; } @@ -291,7 +648,7 @@ static int regs_show(struct seq_file *s, void *data) const struct tegra_tsensor *tsensors = ts->soc->tsensors; const struct tegra_tsensor_group **ttgs = ts->soc->ttgs; u32 r, state; - int i; + int i, level; seq_puts(s, "-----TSENSE (convert HW)-----\n"); @@ -365,6 +722,81 @@ static int regs_show(struct seq_file *s, void *data) state = REG_GET_MASK(r, SENSOR_TEMP2_MEM_TEMP_MASK); seq_printf(s, " MEM(%d)\n", translate_temp(state)); + for (i = 0; i < ts->soc->num_ttgs; i++) { + seq_printf(s, "%s:\n", ttgs[i]->name); + for (level = 0; level < 4; level++) { + s32 v; + u32 mask; + u16 off = ttgs[i]->thermctl_lvl0_offset; + + r = readl(ts->regs + THERMCTL_LVL_REG(off, level)); + + mask = ttgs[i]->thermctl_lvl0_up_thresh_mask; + state = REG_GET_MASK(r, mask); + v = sign_extend32(state, ts->soc->bptt - 1); + v *= ts->soc->thresh_grain; + seq_printf(s, " %d: Up/Dn(%d /", level, v); + + mask = ttgs[i]->thermctl_lvl0_dn_thresh_mask; + state = REG_GET_MASK(r, mask); + v = sign_extend32(state, ts->soc->bptt - 1); + v *= ts->soc->thresh_grain; + seq_printf(s, "%d ) ", v); + + mask = THERMCTL_LVL0_CPU0_EN_MASK; + state = REG_GET_MASK(r, mask); + seq_printf(s, "En(%d) ", state); + + mask = THERMCTL_LVL0_CPU0_CPU_THROT_MASK; + state = REG_GET_MASK(r, mask); + seq_puts(s, "CPU Throt"); + if (!state) + seq_printf(s, "(%s) ", "none"); + else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT) + seq_printf(s, "(%s) ", "L"); + else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY) + seq_printf(s, "(%s) ", "H"); + else + seq_printf(s, "(%s) ", "H+L"); + + mask = THERMCTL_LVL0_CPU0_GPU_THROT_MASK; + state = REG_GET_MASK(r, mask); + seq_puts(s, "GPU Throt"); + if (!state) + seq_printf(s, "(%s) ", "none"); + else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT) + seq_printf(s, "(%s) ", "L"); + else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY) + seq_printf(s, "(%s) ", "H"); + else + seq_printf(s, "(%s) ", "H+L"); + + mask = THERMCTL_LVL0_CPU0_STATUS_MASK; + state = REG_GET_MASK(r, mask); + seq_printf(s, "Status(%s)\n", + state == 0 ? "LO" : + state == 1 ? "In" : + state == 2 ? "Res" : "HI"); + } + } + + r = readl(ts->regs + THERMCTL_STATS_CTL); + seq_printf(s, "STATS: Up(%s) Dn(%s)\n", + r & STATS_CTL_EN_UP ? "En" : "--", + r & STATS_CTL_EN_DN ? "En" : "--"); + + for (level = 0; level < 4; level++) { + u16 off; + + off = THERMCTL_LVL0_UP_STATS; + r = readl(ts->regs + THERMCTL_LVL_REG(off, level)); + seq_printf(s, " Level_%d Up(%d) ", level, r); + + off = THERMCTL_LVL0_DN_STATS; + r = readl(ts->regs + THERMCTL_LVL_REG(off, level)); + seq_printf(s, "Dn(%d)\n", r); + } + r = readl(ts->regs + THERMCTL_THERMTRIP_CTL); state = REG_GET_MASK(r, ttgs[0]->thermtrip_any_en_mask); seq_printf(s, "Thermtrip Any En(%d)\n", state); @@ -376,6 +808,32 @@ static int regs_show(struct seq_file *s, void *data) seq_printf(s, "Thresh(%d)\n", state); } + r = readl(ts->regs + THROT_GLOBAL_CFG); + seq_puts(s, "\n"); + seq_printf(s, "GLOBAL THROTTLE CONFIG: 0x%08x\n", r); + + seq_puts(s, "---------------------------------------------------\n"); + r = readl(ts->regs + THROT_STATUS); + state = REG_GET_MASK(r, THROT_STATUS_BREACH_MASK); + seq_printf(s, "THROT STATUS: breach(%d) ", state); + state = REG_GET_MASK(r, THROT_STATUS_STATE_MASK); + seq_printf(s, "state(%d) ", state); + state = REG_GET_MASK(r, THROT_STATUS_ENABLED_MASK); + seq_printf(s, "enabled(%d)\n", state); + + r = readl(ts->regs + CPU_PSKIP_STATUS); + if (ts->soc->use_ccroc) { + state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK); + seq_printf(s, "CPU PSKIP STATUS: enabled(%d)\n", state); + } else { + state = REG_GET_MASK(r, XPU_PSKIP_STATUS_M_MASK); + seq_printf(s, "CPU PSKIP STATUS: M(%d) ", state); + state = REG_GET_MASK(r, XPU_PSKIP_STATUS_N_MASK); + seq_printf(s, "N(%d) ", state); + state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK); + seq_printf(s, "enabled(%d)\n", state); + } + return 0; } @@ -449,6 +907,326 @@ static int soctherm_clk_enable(struct platform_device *pdev, bool enable) return 0; } +static int throt_get_cdev_max_state(struct thermal_cooling_device *cdev, + unsigned long *max_state) +{ + *max_state = 1; + return 0; +} + +static int throt_get_cdev_cur_state(struct thermal_cooling_device *cdev, + unsigned long *cur_state) +{ + struct tegra_soctherm *ts = cdev->devdata; + u32 r; + + r = readl(ts->regs + THROT_STATUS); + if (REG_GET_MASK(r, THROT_STATUS_STATE_MASK)) + *cur_state = 1; + else + *cur_state = 0; + + return 0; +} + +static int throt_set_cdev_state(struct thermal_cooling_device *cdev, + unsigned long cur_state) +{ + return 0; +} + +static struct thermal_cooling_device_ops throt_cooling_ops = { + .get_max_state = throt_get_cdev_max_state, + .get_cur_state = throt_get_cdev_cur_state, + .set_cur_state = throt_set_cdev_state, +}; + +/** + * soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations + * and register them as cooling devices. + */ +static void soctherm_init_hw_throt_cdev(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct tegra_soctherm *ts = dev_get_drvdata(dev); + struct device_node *np_stc, *np_stcc; + const char *name; + u32 val; + int i, r; + + for (i = 0; i < THROTTLE_SIZE; i++) { + ts->throt_cfgs[i].name = throt_names[i]; + ts->throt_cfgs[i].id = i; + ts->throt_cfgs[i].init = false; + } + + np_stc = of_get_child_by_name(dev->of_node, "throttle-cfgs"); + if (!np_stc) { + dev_info(dev, + "throttle-cfg: no throttle-cfgs - not enabling\n"); + return; + } + + for_each_child_of_node(np_stc, np_stcc) { + struct soctherm_throt_cfg *stc; + struct thermal_cooling_device *tcd; + + name = np_stcc->name; + stc = find_throttle_cfg_by_name(ts, name); + if (!stc) { + dev_err(dev, + "throttle-cfg: could not find %s\n", name); + continue; + } + + r = of_property_read_u32(np_stcc, "nvidia,priority", &val); + if (r) { + dev_info(dev, + "throttle-cfg: %s: missing priority\n", name); + continue; + } + stc->priority = val; + + if (ts->soc->use_ccroc) { + r = of_property_read_u32(np_stcc, + "nvidia,cpu-throt-level", + &val); + if (r) { + dev_info(dev, + "throttle-cfg: %s: missing cpu-throt-level\n", + name); + continue; + } + stc->cpu_throt_level = val; + } else { + r = of_property_read_u32(np_stcc, + "nvidia,cpu-throt-percent", + &val); + if (r) { + dev_info(dev, + "throttle-cfg: %s: missing cpu-throt-percent\n", + name); + continue; + } + stc->cpu_throt_depth = val; + } + + tcd = thermal_of_cooling_device_register(np_stcc, + (char *)name, ts, + &throt_cooling_ops); + of_node_put(np_stcc); + if (IS_ERR_OR_NULL(tcd)) { + dev_err(dev, + "throttle-cfg: %s: failed to register cooling device\n", + name); + continue; + } + + stc->cdev = tcd; + stc->init = true; + } + + of_node_put(np_stc); +} + +/** + * throttlectl_cpu_level_cfg() - programs CCROC NV_THERM level config + * @level: describing the level LOW/MED/HIGH of throttling + * + * It's necessary to set up the CPU-local CCROC NV_THERM instance with + * the M/N values desired for each level. This function does this. + * + * This function pre-programs the CCROC NV_THERM levels in terms of + * pre-configured "Low", "Medium" or "Heavy" throttle levels which are + * mapped to THROT_LEVEL_LOW, THROT_LEVEL_MED and THROT_LEVEL_HVY. + */ +static void throttlectl_cpu_level_cfg(struct tegra_soctherm *ts, int level) +{ + u8 depth, dividend; + u32 r; + + switch (level) { + case TEGRA_SOCTHERM_THROT_LEVEL_LOW: + depth = 50; + break; + case TEGRA_SOCTHERM_THROT_LEVEL_MED: + depth = 75; + break; + case TEGRA_SOCTHERM_THROT_LEVEL_HIGH: + depth = 80; + break; + case TEGRA_SOCTHERM_THROT_LEVEL_NONE: + return; + default: + return; + } + + dividend = THROT_DEPTH_DIVIDEND(depth); + + /* setup PSKIP in ccroc nv_therm registers */ + r = ccroc_readl(ts, CCROC_THROT_PSKIP_RAMP_CPU_REG(level)); + r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_DURATION_MASK, 0xff); + r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_STEP_MASK, 0xf); + ccroc_writel(ts, r, CCROC_THROT_PSKIP_RAMP_CPU_REG(level)); + + r = ccroc_readl(ts, CCROC_THROT_PSKIP_CTRL_CPU_REG(level)); + r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_ENB_MASK, 1); + r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend); + r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff); + ccroc_writel(ts, r, CCROC_THROT_PSKIP_CTRL_CPU_REG(level)); +} + +/** + * throttlectl_cpu_level_select() - program CPU pulse skipper config + * @throt: the LIGHT/HEAVY of throttle event id + * + * Pulse skippers are used to throttle clock frequencies. This + * function programs the pulse skippers based on @throt and platform + * data. This function is used on SoCs which have CPU-local pulse + * skipper control, such as T13x. It programs soctherm's interface to + * Denver:CCROC NV_THERM in terms of Low, Medium and HIGH throttling + * vectors. PSKIP_BYPASS mode is set as required per HW spec. + */ +static void throttlectl_cpu_level_select(struct tegra_soctherm *ts, + enum soctherm_throttle_id throt) +{ + u32 r, throt_vect; + + /* Denver:CCROC NV_THERM interface N:3 Mapping */ + switch (ts->throt_cfgs[throt].cpu_throt_level) { + case TEGRA_SOCTHERM_THROT_LEVEL_LOW: + throt_vect = THROT_VECT_LOW; + break; + case TEGRA_SOCTHERM_THROT_LEVEL_MED: + throt_vect = THROT_VECT_MED; + break; + case TEGRA_SOCTHERM_THROT_LEVEL_HIGH: + throt_vect = THROT_VECT_HIGH; + break; + default: + throt_vect = THROT_VECT_NONE; + break; + } + + r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU)); + r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1); + r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_CPU_MASK, throt_vect); + r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT2_CPU_MASK, throt_vect); + writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU)); + + /* bypass sequencer in soc_therm as it is programmed in ccroc */ + r = REG_SET_MASK(0, THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK, 1); + writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU)); +} + +/** + * throttlectl_cpu_mn() - program CPU pulse skipper configuration + * @throt: the LIGHT/HEAVY of throttle event id + * + * Pulse skippers are used to throttle clock frequencies. This + * function programs the pulse skippers based on @throt and platform + * data. This function is used for CPUs that have "remote" pulse + * skipper control, e.g., the CPU pulse skipper is controlled by the + * SOC_THERM IP block. (SOC_THERM is located outside the CPU + * complex.) + */ +static void throttlectl_cpu_mn(struct tegra_soctherm *ts, + enum soctherm_throttle_id throt) +{ + u32 r; + int depth; + u8 dividend; + + depth = ts->throt_cfgs[throt].cpu_throt_depth; + dividend = THROT_DEPTH_DIVIDEND(depth); + + r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU)); + r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1); + r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend); + r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff); + writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU)); + + r = readl(ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU)); + r = REG_SET_MASK(r, THROT_PSKIP_RAMP_DURATION_MASK, 0xff); + r = REG_SET_MASK(r, THROT_PSKIP_RAMP_STEP_MASK, 0xf); + writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU)); +} + +/** + * soctherm_throttle_program() - programs pulse skippers' configuration + * @throt: the LIGHT/HEAVY of the throttle event id. + * + * Pulse skippers are used to throttle clock frequencies. + * This function programs the pulse skippers. + */ +static void soctherm_throttle_program(struct tegra_soctherm *ts, + enum soctherm_throttle_id throt) +{ + u32 r; + struct soctherm_throt_cfg stc = ts->throt_cfgs[throt]; + + if (!stc.init) + return; + + /* Setup PSKIP parameters */ + if (ts->soc->use_ccroc) + throttlectl_cpu_level_select(ts, throt); + else + throttlectl_cpu_mn(ts, throt); + + r = REG_SET_MASK(0, THROT_PRIORITY_LITE_PRIO_MASK, stc.priority); + writel(r, ts->regs + THROT_PRIORITY_CTRL(throt)); + + r = REG_SET_MASK(0, THROT_DELAY_LITE_DELAY_MASK, 0); + writel(r, ts->regs + THROT_DELAY_CTRL(throt)); + + r = readl(ts->regs + THROT_PRIORITY_LOCK); + r = REG_GET_MASK(r, THROT_PRIORITY_LOCK_PRIORITY_MASK); + if (r >= stc.priority) + return; + r = REG_SET_MASK(0, THROT_PRIORITY_LOCK_PRIORITY_MASK, + stc.priority); + writel(r, ts->regs + THROT_PRIORITY_LOCK); +} + +static void tegra_soctherm_throttle(struct device *dev) +{ + struct tegra_soctherm *ts = dev_get_drvdata(dev); + u32 v; + int i; + + /* configure LOW, MED and HIGH levels for CCROC NV_THERM */ + if (ts->soc->use_ccroc) { + throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_LOW); + throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_MED); + throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_HIGH); + } + + /* Thermal HW throttle programming */ + for (i = 0; i < THROTTLE_SIZE; i++) + soctherm_throttle_program(ts, i); + + v = REG_SET_MASK(0, THROT_GLOBAL_ENB_MASK, 1); + if (ts->soc->use_ccroc) { + ccroc_writel(ts, v, CCROC_GLOBAL_CFG); + + v = ccroc_readl(ts, CCROC_SUPER_CCLKG_DIVIDER); + v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1); + ccroc_writel(ts, v, CCROC_SUPER_CCLKG_DIVIDER); + } else { + writel(v, ts->regs + THROT_GLOBAL_CFG); + + v = clk_readl(ts, CAR_SUPER_CCLKG_DIVIDER); + v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1); + clk_writel(ts, v, CAR_SUPER_CCLKG_DIVIDER); + } + + /* initialize stats collection */ + v = STATS_CTL_CLR_DN | STATS_CTL_EN_DN | + STATS_CTL_CLR_UP | STATS_CTL_EN_UP; + writel(v, ts->regs + THERMCTL_STATS_CTL); +} + static void soctherm_init(struct platform_device *pdev) { struct tegra_soctherm *tegra = platform_get_drvdata(pdev); @@ -475,6 +1253,9 @@ static void soctherm_init(struct platform_device *pdev) } writel(pdiv, tegra->regs + SENSOR_PDIV); writel(hotspot, tegra->regs + SENSOR_HOTSPOT_OFF); + + /* Configure hw throttle */ + tegra_soctherm_throttle(&pdev->dev); } static const struct of_device_id tegra_soctherm_of_match[] = { @@ -527,10 +1308,31 @@ static int tegra_soctherm_probe(struct platform_device *pdev) tegra->soc = soc; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "soctherm-reg"); tegra->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(tegra->regs)) + if (IS_ERR(tegra->regs)) { + dev_err(&pdev->dev, "can't get soctherm registers"); return PTR_ERR(tegra->regs); + } + + if (!tegra->soc->use_ccroc) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "car-reg"); + tegra->clk_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(tegra->clk_regs)) { + dev_err(&pdev->dev, "can't get car clk registers"); + return PTR_ERR(tegra->clk_regs); + } + } else { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ccroc-reg"); + tegra->ccroc_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(tegra->ccroc_regs)) { + dev_err(&pdev->dev, "can't get ccroc registers"); + return PTR_ERR(tegra->ccroc_regs); + } + } tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm"); if (IS_ERR(tegra->reset)) { @@ -580,6 +1382,8 @@ static int tegra_soctherm_probe(struct platform_device *pdev) if (err) return err; + soctherm_init_hw_throt_cdev(pdev); + soctherm_init(pdev); for (i = 0; i < soc->num_ttgs; ++i) { @@ -593,6 +1397,7 @@ static int tegra_soctherm_probe(struct platform_device *pdev) zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset; zone->dev = &pdev->dev; zone->sg = soc->ttgs[i]; + zone->ts = tegra; z = devm_thermal_zone_of_sensor_register(&pdev->dev, soc->ttgs[i]->id, zone, @@ -608,7 +1413,9 @@ static int tegra_soctherm_probe(struct platform_device *pdev) tegra->thermctl_tzs[soc->ttgs[i]->id] = z; /* Configure hw trip points */ - tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z); + err = tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z); + if (err) + goto disable_clocks; } soctherm_debug_init(pdev); @@ -661,7 +1468,12 @@ static int __maybe_unused soctherm_resume(struct device *dev) struct thermal_zone_device *tz; tz = tegra->thermctl_tzs[soc->ttgs[i]->id]; - tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz); + err = tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz); + if (err) { + dev_err(&pdev->dev, + "Resume failed: set hwtrips failed\n"); + return err; + } } return 0; diff --git a/drivers/thermal/tegra/soctherm.h b/drivers/thermal/tegra/soctherm.h index 28e18ec4b4c3..e96ca73fd780 100644 --- a/drivers/thermal/tegra/soctherm.h +++ b/drivers/thermal/tegra/soctherm.h @@ -15,6 +15,11 @@ #ifndef __DRIVERS_THERMAL_TEGRA_SOCTHERM_H #define __DRIVERS_THERMAL_TEGRA_SOCTHERM_H +#define THERMCTL_LEVEL0_GROUP_CPU 0x0 +#define THERMCTL_LEVEL0_GROUP_GPU 0x4 +#define THERMCTL_LEVEL0_GROUP_MEM 0x8 +#define THERMCTL_LEVEL0_GROUP_TSENSE 0xc + #define SENSOR_CONFIG2 8 #define SENSOR_CONFIG2_THERMA_MASK (0xffff << 16) #define SENSOR_CONFIG2_THERMA_SHIFT 16 @@ -65,6 +70,9 @@ struct tegra_tsensor_group { u32 thermtrip_enable_mask; u32 thermtrip_any_en_mask; u32 thermtrip_threshold_mask; + u16 thermctl_lvl0_offset; + u32 thermctl_lvl0_up_thresh_mask; + u32 thermctl_lvl0_dn_thresh_mask; }; struct tegra_tsensor_configuration { @@ -103,6 +111,8 @@ struct tegra_soctherm_soc { const unsigned int num_ttgs; const struct tegra_soctherm_fuse *tfuse; const int thresh_grain; + const unsigned int bptt; + const bool use_ccroc; }; int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse, diff --git a/drivers/thermal/tegra/tegra124-soctherm.c b/drivers/thermal/tegra/tegra124-soctherm.c index beb9d36b9c8a..36768630f78c 100644 --- a/drivers/thermal/tegra/tegra124-soctherm.c +++ b/drivers/thermal/tegra/tegra124-soctherm.c @@ -28,7 +28,11 @@ #define TEGRA124_THERMTRIP_CPU_THRESH_MASK (0xff << 8) #define TEGRA124_THERMTRIP_TSENSE_THRESH_MASK 0xff +#define TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK (0xff << 17) +#define TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK (0xff << 9) + #define TEGRA124_THRESH_GRAIN 1000 +#define TEGRA124_BPTT 8 static const struct tegra_tsensor_configuration tegra124_tsensor_config = { .tall = 16300, @@ -51,6 +55,9 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_cpu = { .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK, .thermtrip_enable_mask = TEGRA124_THERMTRIP_CPU_EN_MASK, .thermtrip_threshold_mask = TEGRA124_THERMTRIP_CPU_THRESH_MASK, + .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU, + .thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK, + .thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK, }; static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = { @@ -66,6 +73,9 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = { .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK, .thermtrip_enable_mask = TEGRA124_THERMTRIP_GPU_EN_MASK, .thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK, + .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU, + .thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK, + .thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK, }; static const struct tegra_tsensor_group tegra124_tsensor_group_pll = { @@ -79,6 +89,9 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_pll = { .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK, .thermtrip_enable_mask = TEGRA124_THERMTRIP_TSENSE_EN_MASK, .thermtrip_threshold_mask = TEGRA124_THERMTRIP_TSENSE_THRESH_MASK, + .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE, + .thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK, + .thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK, }; static const struct tegra_tsensor_group tegra124_tsensor_group_mem = { @@ -94,6 +107,9 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_mem = { .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK, .thermtrip_enable_mask = TEGRA124_THERMTRIP_MEM_EN_MASK, .thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK, + .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM, + .thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK, + .thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK, }; static const struct tegra_tsensor_group *tegra124_tsensor_groups[] = { @@ -193,4 +209,6 @@ const struct tegra_soctherm_soc tegra124_soctherm = { .num_ttgs = ARRAY_SIZE(tegra124_tsensor_groups), .tfuse = &tegra124_soctherm_fuse, .thresh_grain = TEGRA124_THRESH_GRAIN, + .bptt = TEGRA124_BPTT, + .use_ccroc = false, }; diff --git a/drivers/thermal/tegra/tegra132-soctherm.c b/drivers/thermal/tegra/tegra132-soctherm.c index e2aa84e1b307..97fa30501eb1 100644 --- a/drivers/thermal/tegra/tegra132-soctherm.c +++ b/drivers/thermal/tegra/tegra132-soctherm.c @@ -28,7 +28,11 @@ #define TEGRA132_THERMTRIP_CPU_THRESH_MASK (0xff << 8) #define TEGRA132_THERMTRIP_TSENSE_THRESH_MASK 0xff +#define TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK (0xff << 17) +#define TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK (0xff << 9) + #define TEGRA132_THRESH_GRAIN 1000 +#define TEGRA132_BPTT 8 static const struct tegra_tsensor_configuration tegra132_tsensor_config = { .tall = 16300, @@ -51,6 +55,9 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_cpu = { .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK, .thermtrip_enable_mask = TEGRA132_THERMTRIP_CPU_EN_MASK, .thermtrip_threshold_mask = TEGRA132_THERMTRIP_CPU_THRESH_MASK, + .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU, + .thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK, + .thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK, }; static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = { @@ -66,6 +73,9 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = { .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK, .thermtrip_enable_mask = TEGRA132_THERMTRIP_GPU_EN_MASK, .thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK, + .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU, + .thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK, + .thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK, }; static const struct tegra_tsensor_group tegra132_tsensor_group_pll = { @@ -79,6 +89,9 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_pll = { .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK, .thermtrip_enable_mask = TEGRA132_THERMTRIP_TSENSE_EN_MASK, .thermtrip_threshold_mask = TEGRA132_THERMTRIP_TSENSE_THRESH_MASK, + .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE, + .thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK, + .thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK, }; static const struct tegra_tsensor_group tegra132_tsensor_group_mem = { @@ -94,6 +107,9 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_mem = { .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK, .thermtrip_enable_mask = TEGRA132_THERMTRIP_MEM_EN_MASK, .thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK, + .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM, + .thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK, + .thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK, }; static const struct tegra_tsensor_group *tegra132_tsensor_groups[] = { @@ -193,4 +209,6 @@ const struct tegra_soctherm_soc tegra132_soctherm = { .num_ttgs = ARRAY_SIZE(tegra132_tsensor_groups), .tfuse = &tegra132_soctherm_fuse, .thresh_grain = TEGRA132_THRESH_GRAIN, + .bptt = TEGRA132_BPTT, + .use_ccroc = true, }; diff --git a/drivers/thermal/tegra/tegra210-soctherm.c b/drivers/thermal/tegra/tegra210-soctherm.c index 19cc0ab66f0e..ad53169a8e95 100644 --- a/drivers/thermal/tegra/tegra210-soctherm.c +++ b/drivers/thermal/tegra/tegra210-soctherm.c @@ -29,7 +29,11 @@ #define TEGRA210_THERMTRIP_CPU_THRESH_MASK (0x1ff << 9) #define TEGRA210_THERMTRIP_TSENSE_THRESH_MASK 0x1ff +#define TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK (0x1ff << 18) +#define TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK (0x1ff << 9) + #define TEGRA210_THRESH_GRAIN 500 +#define TEGRA210_BPTT 9 static const struct tegra_tsensor_configuration tegra210_tsensor_config = { .tall = 16300, @@ -52,6 +56,9 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_cpu = { .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK, .thermtrip_enable_mask = TEGRA210_THERMTRIP_CPU_EN_MASK, .thermtrip_threshold_mask = TEGRA210_THERMTRIP_CPU_THRESH_MASK, + .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU, + .thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK, + .thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK, }; static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = { @@ -67,6 +74,9 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = { .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK, .thermtrip_enable_mask = TEGRA210_THERMTRIP_GPU_EN_MASK, .thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK, + .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU, + .thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK, + .thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK, }; static const struct tegra_tsensor_group tegra210_tsensor_group_pll = { @@ -80,6 +90,9 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_pll = { .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK, .thermtrip_enable_mask = TEGRA210_THERMTRIP_TSENSE_EN_MASK, .thermtrip_threshold_mask = TEGRA210_THERMTRIP_TSENSE_THRESH_MASK, + .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE, + .thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK, + .thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK, }; static const struct tegra_tsensor_group tegra210_tsensor_group_mem = { @@ -95,6 +108,9 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_mem = { .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK, .thermtrip_enable_mask = TEGRA210_THERMTRIP_MEM_EN_MASK, .thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK, + .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM, + .thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK, + .thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK, }; static const struct tegra_tsensor_group *tegra210_tsensor_groups[] = { @@ -194,4 +210,6 @@ const struct tegra_soctherm_soc tegra210_soctherm = { .num_ttgs = ARRAY_SIZE(tegra210_tsensor_groups), .tfuse = &tegra210_soctherm_fuse, .thresh_grain = TEGRA210_THRESH_GRAIN, + .bptt = TEGRA210_BPTT, + .use_ccroc = false, }; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index e2fc6161dded..226b0b4aced6 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -520,6 +520,56 @@ exit: } EXPORT_SYMBOL_GPL(thermal_zone_get_temp); +void thermal_zone_set_trips(struct thermal_zone_device *tz) +{ + int low = -INT_MAX; + int high = INT_MAX; + int trip_temp, hysteresis; + int i, ret; + + mutex_lock(&tz->lock); + + if (!tz->ops->set_trips || !tz->ops->get_trip_hyst) + goto exit; + + for (i = 0; i < tz->trips; i++) { + int trip_low; + + tz->ops->get_trip_temp(tz, i, &trip_temp); + tz->ops->get_trip_hyst(tz, i, &hysteresis); + + trip_low = trip_temp - hysteresis; + + if (trip_low < tz->temperature && trip_low > low) + low = trip_low; + + if (trip_temp > tz->temperature && trip_temp < high) + high = trip_temp; + } + + /* No need to change trip points */ + if (tz->prev_low_trip == low && tz->prev_high_trip == high) + goto exit; + + tz->prev_low_trip = low; + tz->prev_high_trip = high; + + dev_dbg(&tz->device, + "new temperature boundaries: %d < x < %d\n", low, high); + + /* + * Set a temperature window. When this window is left the driver + * must inform the thermal core via thermal_zone_device_update. + */ + ret = tz->ops->set_trips(tz, low, high); + if (ret) + dev_err(&tz->device, "Failed to set trips: %d\n", ret); + +exit: + mutex_unlock(&tz->lock); +} +EXPORT_SYMBOL_GPL(thermal_zone_set_trips); + static void update_temperature(struct thermal_zone_device *tz) { int temp, ret; @@ -557,7 +607,8 @@ static void thermal_zone_device_reset(struct thermal_zone_device *tz) pos->initialized = false; } -void thermal_zone_device_update(struct thermal_zone_device *tz) +void thermal_zone_device_update(struct thermal_zone_device *tz, + enum thermal_notify_event event) { int count; @@ -569,6 +620,10 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) update_temperature(tz); + thermal_zone_set_trips(tz); + + tz->notify_event = event; + for (count = 0; count < tz->trips; count++) handle_thermal_trip(tz, count); } @@ -579,7 +634,7 @@ static void thermal_zone_device_check(struct work_struct *work) struct thermal_zone_device *tz = container_of(work, struct thermal_zone_device, poll_queue.work); - thermal_zone_device_update(tz); + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); } /* sys I/F for thermal zone */ @@ -703,7 +758,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr, if (ret) return ret; - thermal_zone_device_update(tz); + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); return count; } @@ -754,6 +809,9 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr, */ ret = tz->ops->set_trip_hyst(tz, trip, temperature); + if (!ret) + thermal_zone_set_trips(tz); + return ret ? ret : count; } @@ -822,7 +880,7 @@ passive_store(struct device *dev, struct device_attribute *attr, tz->forced_passive = state; - thermal_zone_device_update(tz); + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); return count; } @@ -913,7 +971,7 @@ emul_temp_store(struct device *dev, struct device_attribute *attr, } if (!ret) - thermal_zone_device_update(tz); + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); return ret ? ret : count; } @@ -1509,7 +1567,8 @@ __thermal_cooling_device_register(struct device_node *np, mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) if (atomic_cmpxchg(&pos->need_update, 1, 0)) - thermal_zone_device_update(pos); + thermal_zone_device_update(pos, + THERMAL_EVENT_UNSPECIFIED); mutex_unlock(&thermal_list_lock); return cdev; @@ -1952,7 +2011,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, thermal_zone_device_reset(tz); /* Update the new thermal zone and mark it as already updated. */ if (atomic_cmpxchg(&tz->need_update, 1, 0)) - thermal_zone_device_update(tz); + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); return tz; @@ -2069,6 +2128,36 @@ exit: } EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); +/** + * thermal_zone_get_slope - return the slope attribute of the thermal zone + * @tz: thermal zone device with the slope attribute + * + * Return: If the thermal zone device has a slope attribute, return it, else + * return 1. + */ +int thermal_zone_get_slope(struct thermal_zone_device *tz) +{ + if (tz && tz->tzp) + return tz->tzp->slope; + return 1; +} +EXPORT_SYMBOL_GPL(thermal_zone_get_slope); + +/** + * thermal_zone_get_offset - return the offset attribute of the thermal zone + * @tz: thermal zone device with the offset attribute + * + * Return: If the thermal zone device has a offset attribute, return it, else + * return 0. + */ +int thermal_zone_get_offset(struct thermal_zone_device *tz) +{ + if (tz && tz->tzp) + return tz->tzp->offset; + return 0; +} +EXPORT_SYMBOL_GPL(thermal_zone_get_offset); + #ifdef CONFIG_NET static const struct genl_multicast_group thermal_event_mcgrps[] = { { .name = THERMAL_GENL_MCAST_GROUP_NAME, }, @@ -2209,7 +2298,8 @@ static int thermal_pm_notify(struct notifier_block *nb, atomic_set(&in_suspend, 0); list_for_each_entry(tz, &thermal_tz_list, node) { thermal_zone_device_reset(tz); - thermal_zone_device_update(tz); + thermal_zone_device_update(tz, + THERMAL_EVENT_UNSPECIFIED); } break; default: diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index 15c0a9ac2209..0586bd0f2bab 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -52,7 +52,7 @@ static void ti_thermal_work(struct work_struct *work) struct ti_thermal_data *data = container_of(work, struct ti_thermal_data, thermal_wq); - thermal_zone_device_update(data->ti_thermal); + thermal_zone_device_update(data->ti_thermal, THERMAL_EVENT_UNSPECIFIED); dev_dbg(&data->ti_thermal->device, "updated thermal zone %s\n", data->ti_thermal->type); @@ -205,7 +205,7 @@ static int ti_thermal_set_mode(struct thermal_zone_device *thermal, data->mode = mode; ti_bandgap_write_update_interval(bgp, data->sensor_id, data->ti_thermal->polling_delay); - thermal_zone_device_update(data->ti_thermal); + thermal_zone_device_update(data->ti_thermal, THERMAL_EVENT_UNSPECIFIED); dev_dbg(&thermal->device, "thermal polling set for duration=%d msec\n", data->ti_thermal->polling_delay); @@ -239,7 +239,7 @@ static int ti_thermal_get_trip_temp(struct thermal_zone_device *thermal, return 0; } -static int __ti_thermal_get_trend(void *p, long *trend) +static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend) { struct ti_thermal_data *data = p; struct ti_bandgap *bgp; @@ -252,22 +252,6 @@ static int __ti_thermal_get_trend(void *p, long *trend) if (ret) return ret; - *trend = tr; - - return 0; -} - -/* Get the temperature trend callback functions for thermal zone */ -static int ti_thermal_get_trend(struct thermal_zone_device *thermal, - int trip, enum thermal_trend *trend) -{ - int ret; - long tr; - - ret = __ti_thermal_get_trend(thermal->devdata, &tr); - if (ret) - return ret; - if (tr > 0) *trend = THERMAL_TREND_RAISING; else if (tr < 0) @@ -278,6 +262,13 @@ static int ti_thermal_get_trend(struct thermal_zone_device *thermal, return 0; } +/* Get the temperature trend callback functions for thermal zone */ +static int ti_thermal_get_trend(struct thermal_zone_device *thermal, + int trip, enum thermal_trend *trend) +{ + return __ti_thermal_get_trend(thermal->devdata, trip, trend); +} + /* Get critical temperature callback functions for thermal zone */ static int ti_thermal_get_crit_temp(struct thermal_zone_device *thermal, int *temp) diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c index 10adcddc8821..c908150c268d 100644 --- a/drivers/thermal/user_space.c +++ b/drivers/thermal/user_space.c @@ -23,19 +23,30 @@ */ #include <linux/thermal.h> - +#include <linux/slab.h> #include "thermal_core.h" /** * notify_user_space - Notifies user space about thermal events * @tz - thermal_zone_device + * @trip - Trip point index * * This function notifies the user space through UEvents. */ static int notify_user_space(struct thermal_zone_device *tz, int trip) { + char *thermal_prop[5]; + int i; + mutex_lock(&tz->lock); - kobject_uevent(&tz->device.kobj, KOBJ_CHANGE); + thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", tz->type); + thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", tz->temperature); + thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP=%d", trip); + thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", tz->notify_event); + thermal_prop[4] = NULL; + kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, thermal_prop); + for (i = 0; i < 4; ++i) + kfree(thermal_prop[i]); mutex_unlock(&tz->lock); return 0; } diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c index 97f0a2bd93ed..95f4c1bcdb4c 100644 --- a/drivers/thermal/x86_pkg_temp_thermal.c +++ b/drivers/thermal/x86_pkg_temp_thermal.c @@ -348,7 +348,8 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) } if (notify) { pr_debug("thermal_zone_device_update\n"); - thermal_zone_device_update(phdev->tzone); + thermal_zone_device_update(phdev->tzone, + THERMAL_EVENT_UNSPECIFIED); } } diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 1e5f529d51a2..063064801ceb 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -1308,11 +1308,6 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER ehci_mv_driver #endif -#ifdef CONFIG_MIPS_SEAD3 -#include "ehci-sead3.c" -#define PLATFORM_DRIVER ehci_hcd_sead3_driver -#endif - static int __init ehci_hcd_init(void) { int retval = 0; diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c deleted file mode 100644 index 3d86cc2ffe68..000000000000 --- a/drivers/usb/host/ehci-sead3.c +++ /dev/null @@ -1,185 +0,0 @@ -/* - * MIPS CI13320A EHCI Host Controller driver - * Based on "ehci-au1xxx.c" by K.Boge <karsten.boge@amd.com> - * - * Copyright (C) 2012 MIPS Technologies, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/err.h> -#include <linux/platform_device.h> - -static int ehci_sead3_setup(struct usb_hcd *hcd) -{ - int ret; - struct ehci_hcd *ehci = hcd_to_ehci(hcd); - - ehci->caps = hcd->regs + 0x100; - -#ifdef __BIG_ENDIAN - ehci->big_endian_mmio = 1; - ehci->big_endian_desc = 1; -#endif - - ret = ehci_setup(hcd); - if (ret) - return ret; - - ehci->need_io_watchdog = 0; - - /* Set burst length to 16 words. */ - ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]); - - return ret; -} - -const struct hc_driver ehci_sead3_hc_driver = { - .description = hcd_name, - .product_desc = "SEAD-3 EHCI", - .hcd_priv_size = sizeof(struct ehci_hcd), - - /* - * generic hardware linkage - */ - .irq = ehci_irq, - .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, - - /* - * basic lifecycle operations - * - */ - .reset = ehci_sead3_setup, - .start = ehci_run, - .stop = ehci_stop, - .shutdown = ehci_shutdown, - - /* - * managing i/o requests and associated device resources - */ - .urb_enqueue = ehci_urb_enqueue, - .urb_dequeue = ehci_urb_dequeue, - .endpoint_disable = ehci_endpoint_disable, - .endpoint_reset = ehci_endpoint_reset, - - /* - * scheduling support - */ - .get_frame_number = ehci_get_frame, - - /* - * root hub support - */ - .hub_status_data = ehci_hub_status_data, - .hub_control = ehci_hub_control, - .bus_suspend = ehci_bus_suspend, - .bus_resume = ehci_bus_resume, - .relinquish_port = ehci_relinquish_port, - .port_handed_over = ehci_port_handed_over, - - .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, -}; - -static int ehci_hcd_sead3_drv_probe(struct platform_device *pdev) -{ - struct usb_hcd *hcd; - struct resource *res; - int ret; - - if (usb_disabled()) - return -ENODEV; - - if (pdev->resource[1].flags != IORESOURCE_IRQ) { - pr_debug("resource[1] is not IORESOURCE_IRQ"); - return -ENOMEM; - } - hcd = usb_create_hcd(&ehci_sead3_hc_driver, &pdev->dev, "SEAD-3"); - if (!hcd) - return -ENOMEM; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(hcd->regs)) { - ret = PTR_ERR(hcd->regs); - goto err1; - } - hcd->rsrc_start = res->start; - hcd->rsrc_len = resource_size(res); - - /* Root hub has integrated TT. */ - hcd->has_tt = 1; - - ret = usb_add_hcd(hcd, pdev->resource[1].start, - IRQF_SHARED); - if (ret == 0) { - platform_set_drvdata(pdev, hcd); - device_wakeup_enable(hcd->self.controller); - return ret; - } - -err1: - usb_put_hcd(hcd); - return ret; -} - -static int ehci_hcd_sead3_drv_remove(struct platform_device *pdev) -{ - struct usb_hcd *hcd = platform_get_drvdata(pdev); - - usb_remove_hcd(hcd); - usb_put_hcd(hcd); - - return 0; -} - -#ifdef CONFIG_PM -static int ehci_hcd_sead3_drv_suspend(struct device *dev) -{ - struct usb_hcd *hcd = dev_get_drvdata(dev); - bool do_wakeup = device_may_wakeup(dev); - - return ehci_suspend(hcd, do_wakeup); -} - -static int ehci_hcd_sead3_drv_resume(struct device *dev) -{ - struct usb_hcd *hcd = dev_get_drvdata(dev); - - ehci_resume(hcd, false); - return 0; -} - -static const struct dev_pm_ops sead3_ehci_pmops = { - .suspend = ehci_hcd_sead3_drv_suspend, - .resume = ehci_hcd_sead3_drv_resume, -}; - -#define SEAD3_EHCI_PMOPS (&sead3_ehci_pmops) - -#else -#define SEAD3_EHCI_PMOPS NULL -#endif - -static struct platform_driver ehci_hcd_sead3_driver = { - .probe = ehci_hcd_sead3_drv_probe, - .remove = ehci_hcd_sead3_drv_remove, - .shutdown = usb_hcd_platform_shutdown, - .driver = { - .name = "sead3-ehci", - .pm = SEAD3_EHCI_PMOPS, - } -}; - -MODULE_ALIAS("platform:sead3-ehci"); diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 88b008fb8a4e..5d3b0db5ce0a 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -284,12 +284,14 @@ config FB_PM2_FIFO_DISCONNECT config FB_ARMCLCD tristate "ARM PrimeCell PL110 support" depends on ARM || ARM64 || COMPILE_TEST - depends on FB && ARM_AMBA + depends on FB && ARM_AMBA && HAS_IOMEM select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT select FB_MODE_HELPERS if OF select VIDEOMODE_HELPERS if OF + select BACKLIGHT_LCD_SUPPORT if OF + select BACKLIGHT_CLASS_DEVICE if OF help This framebuffer device driver is for the ARM PrimeCell PL110 Colour LCD controller. ARM PrimeCells provide the building @@ -305,6 +307,8 @@ config PLAT_VERSATILE_CLCD def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR depends on ARM depends on FB_ARMCLCD && FB=y + select REGMAP + select MFD_SYSCON config FB_ACORN bool "Acorn VIDC support" @@ -2183,7 +2187,7 @@ config FB_GOLDFISH config FB_COBALT tristate "Cobalt server LCD frame buffer support" - depends on FB && (MIPS_COBALT || MIPS_SEAD3) + depends on FB && MIPS_COBALT config FB_SH7760 bool "SH7760/SH7763/SH7720/SH7721 LCDC support" @@ -2443,7 +2447,6 @@ config FB_SIMPLE source "drivers/video/fbdev/omap/Kconfig" source "drivers/video/fbdev/omap2/Kconfig" -source "drivers/video/fbdev/exynos/Kconfig" source "drivers/video/fbdev/mmp/Kconfig" config FB_SH_MOBILE_MERAM diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index f6731867dd26..ee8c81405a7f 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -6,8 +6,6 @@ obj-y += core/ -obj-$(CONFIG_EXYNOS_VIDEO) += exynos/ - obj-$(CONFIG_FB_MACMODES) += macmodes.o obj-$(CONFIG_FB_WMT_GE_ROPS) += wmt_ge_rops.o @@ -79,6 +77,7 @@ obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o obj-$(CONFIG_FB_PVR2) += pvr2fb.o obj-$(CONFIG_FB_VOODOO1) += sstfb.o obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o +obj-$(CONFIG_ARCH_NOMADIK) += amba-clcd-nomadik.o obj-$(CONFIG_PLAT_VERSATILE_CLCD) += amba-clcd-versatile.o obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o obj-$(CONFIG_FB_68328) += 68328fb.o diff --git a/drivers/video/fbdev/amba-clcd-nomadik.c b/drivers/video/fbdev/amba-clcd-nomadik.c new file mode 100644 index 000000000000..0c06fcaaa6e8 --- /dev/null +++ b/drivers/video/fbdev/amba-clcd-nomadik.c @@ -0,0 +1,259 @@ +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/gpio/consumer.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/delay.h> +#include <linux/bitops.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> + +#include "amba-clcd-nomadik.h" + +static struct gpio_desc *grestb; +static struct gpio_desc *scen; +static struct gpio_desc *scl; +static struct gpio_desc *sda; + +static u8 tpg110_readwrite_reg(bool write, u8 address, u8 outval) +{ + int i; + u8 inval = 0; + + /* Assert SCEN */ + gpiod_set_value_cansleep(scen, 1); + ndelay(150); + /* Hammer out the address */ + for (i = 5; i >= 0; i--) { + if (address & BIT(i)) + gpiod_set_value_cansleep(sda, 1); + else + gpiod_set_value_cansleep(sda, 0); + ndelay(150); + /* Send an SCL pulse */ + gpiod_set_value_cansleep(scl, 1); + ndelay(160); + gpiod_set_value_cansleep(scl, 0); + ndelay(160); + } + + if (write) { + /* WRITE */ + gpiod_set_value_cansleep(sda, 0); + } else { + /* READ */ + gpiod_set_value_cansleep(sda, 1); + } + ndelay(150); + /* Send an SCL pulse */ + gpiod_set_value_cansleep(scl, 1); + ndelay(160); + gpiod_set_value_cansleep(scl, 0); + ndelay(160); + + if (!write) + /* HiZ turn-around cycle */ + gpiod_direction_input(sda); + ndelay(150); + /* Send an SCL pulse */ + gpiod_set_value_cansleep(scl, 1); + ndelay(160); + gpiod_set_value_cansleep(scl, 0); + ndelay(160); + + /* Hammer in/out the data */ + for (i = 7; i >= 0; i--) { + int value; + + if (write) { + value = !!(outval & BIT(i)); + gpiod_set_value_cansleep(sda, value); + } else { + value = gpiod_get_value(sda); + if (value) + inval |= BIT(i); + } + ndelay(150); + /* Send an SCL pulse */ + gpiod_set_value_cansleep(scl, 1); + ndelay(160); + gpiod_set_value_cansleep(scl, 0); + ndelay(160); + } + + gpiod_direction_output(sda, 0); + /* Deassert SCEN */ + gpiod_set_value_cansleep(scen, 0); + /* Satisfies SCEN pulse width */ + udelay(1); + + return inval; +} + +static u8 tpg110_read_reg(u8 address) +{ + return tpg110_readwrite_reg(false, address, 0); +} + +static void tpg110_write_reg(u8 address, u8 outval) +{ + tpg110_readwrite_reg(true, address, outval); +} + +static void tpg110_startup(struct device *dev) +{ + u8 val; + + dev_info(dev, "TPG110 display enable\n"); + /* De-assert the reset signal */ + gpiod_set_value_cansleep(grestb, 0); + mdelay(1); + dev_info(dev, "de-asserted GRESTB\n"); + + /* Test display communication */ + tpg110_write_reg(0x00, 0x55); + val = tpg110_read_reg(0x00); + if (val == 0x55) + dev_info(dev, "passed communication test\n"); + val = tpg110_read_reg(0x01); + dev_info(dev, "TPG110 chip ID: %d version: %d\n", + val>>4, val&0x0f); + + /* Show display resolution */ + val = tpg110_read_reg(0x02); + val &= 7; + switch (val) { + case 0x0: + dev_info(dev, "IN 400x240 RGB -> OUT 800x480 RGB (dual scan)"); + break; + case 0x1: + dev_info(dev, "IN 480x272 RGB -> OUT 800x480 RGB (dual scan)"); + break; + case 0x4: + dev_info(dev, "480x640 RGB"); + break; + case 0x5: + dev_info(dev, "480x272 RGB"); + break; + case 0x6: + dev_info(dev, "640x480 RGB"); + break; + case 0x7: + dev_info(dev, "800x480 RGB"); + break; + default: + dev_info(dev, "ILLEGAL RESOLUTION"); + break; + } + + val = tpg110_read_reg(0x03); + dev_info(dev, "resolution is controlled by %s\n", + (val & BIT(7)) ? "software" : "hardware"); +} + +static void tpg110_enable(struct clcd_fb *fb) +{ + struct device *dev = &fb->dev->dev; + static bool startup; + u8 val; + + if (!startup) { + tpg110_startup(dev); + startup = true; + } + + /* Take chip out of standby */ + val = tpg110_read_reg(0x03); + val |= BIT(0); + tpg110_write_reg(0x03, val); +} + +static void tpg110_disable(struct clcd_fb *fb) +{ + u8 val; + + dev_info(&fb->dev->dev, "TPG110 display disable\n"); + val = tpg110_read_reg(0x03); + /* Put into standby */ + val &= ~BIT(0); + tpg110_write_reg(0x03, val); +} + +static void tpg110_init(struct device *dev, struct device_node *np, + struct clcd_board *board) +{ + dev_info(dev, "TPG110 display init\n"); + + grestb = devm_get_gpiod_from_child(dev, "grestb", &np->fwnode); + if (IS_ERR(grestb)) { + dev_err(dev, "no GRESTB GPIO\n"); + return; + } + /* This asserts the GRESTB signal, putting the display into reset */ + gpiod_direction_output(grestb, 1); + + scen = devm_get_gpiod_from_child(dev, "scen", &np->fwnode); + if (IS_ERR(scen)) { + dev_err(dev, "no SCEN GPIO\n"); + return; + } + gpiod_direction_output(scen, 0); + scl = devm_get_gpiod_from_child(dev, "scl", &np->fwnode); + if (IS_ERR(scl)) { + dev_err(dev, "no SCL GPIO\n"); + return; + } + gpiod_direction_output(scl, 0); + sda = devm_get_gpiod_from_child(dev, "sda", &np->fwnode); + if (IS_ERR(sda)) { + dev_err(dev, "no SDA GPIO\n"); + return; + } + gpiod_direction_output(sda, 0); + board->enable = tpg110_enable; + board->disable = tpg110_disable; +} + +int nomadik_clcd_init_panel(struct clcd_fb *fb, + struct device_node *endpoint) +{ + struct device_node *panel; + + panel = of_graph_get_remote_port_parent(endpoint); + if (!panel) + return -ENODEV; + + if (of_device_is_compatible(panel, "tpo,tpg110")) + tpg110_init(&fb->dev->dev, panel, fb->board); + else + dev_info(&fb->dev->dev, "unknown panel\n"); + + /* Unknown panel, fall through */ + return 0; +} +EXPORT_SYMBOL_GPL(nomadik_clcd_init_panel); + +#define PMU_CTRL_OFFSET 0x0000 +#define PMU_CTRL_LCDNDIF BIT(26) + +int nomadik_clcd_init_board(struct amba_device *adev, + struct clcd_board *board) +{ + struct regmap *pmu_regmap; + + dev_info(&adev->dev, "Nomadik CLCD board init\n"); + pmu_regmap = + syscon_regmap_lookup_by_compatible("stericsson,nomadik-pmu"); + if (IS_ERR(pmu_regmap)) { + dev_err(&adev->dev, "could not find PMU syscon regmap\n"); + return PTR_ERR(pmu_regmap); + } + regmap_update_bits(pmu_regmap, + PMU_CTRL_OFFSET, + PMU_CTRL_LCDNDIF, + 0); + dev_info(&adev->dev, "set PMU mux to CLCD mode\n"); + + return 0; +} +EXPORT_SYMBOL_GPL(nomadik_clcd_init_board); diff --git a/drivers/video/fbdev/amba-clcd-nomadik.h b/drivers/video/fbdev/amba-clcd-nomadik.h new file mode 100644 index 000000000000..50aa9bda69fd --- /dev/null +++ b/drivers/video/fbdev/amba-clcd-nomadik.h @@ -0,0 +1,24 @@ +#ifndef _AMBA_CLCD_NOMADIK_H +#define _AMBA_CLCD_NOMADIK_H + +#include <linux/amba/bus.h> + +#ifdef CONFIG_ARCH_NOMADIK +int nomadik_clcd_init_board(struct amba_device *adev, + struct clcd_board *board); +int nomadik_clcd_init_panel(struct clcd_fb *fb, + struct device_node *endpoint); +#else +static inline int nomadik_clcd_init_board(struct amba_device *adev, + struct clcd_board *board) +{ + return 0; +} +static inline int nomadik_clcd_init_panel(struct clcd_fb *fb, + struct device_node *endpoint) +{ + return 0; +} +#endif + +#endif /* inclusion guard */ diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c index a8a22daa3f9d..19ad8645d93c 100644 --- a/drivers/video/fbdev/amba-clcd-versatile.c +++ b/drivers/video/fbdev/amba-clcd-versatile.c @@ -3,6 +3,12 @@ #include <linux/amba/bus.h> #include <linux/amba/clcd.h> #include <linux/platform_data/video-clcd-versatile.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include <linux/bitops.h> +#include "amba-clcd-versatile.h" static struct clcd_panel vga = { .mode = { @@ -178,3 +184,392 @@ void versatile_clcd_remove_dma(struct clcd_fb *fb) dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base, fb->fb.fix.smem_start); } + +#ifdef CONFIG_OF + +static struct regmap *versatile_syscon_map; +static struct regmap *versatile_ib2_map; + +/* + * We detect the different syscon types from the compatible strings. + */ +enum versatile_clcd { + INTEGRATOR_CLCD_CM, + VERSATILE_CLCD, + REALVIEW_CLCD_EB, + REALVIEW_CLCD_PB1176, + REALVIEW_CLCD_PB11MP, + REALVIEW_CLCD_PBA8, + REALVIEW_CLCD_PBX, +}; + +static const struct of_device_id versatile_clcd_of_match[] = { + { + .compatible = "arm,core-module-integrator", + .data = (void *)INTEGRATOR_CLCD_CM, + }, + { + .compatible = "arm,versatile-sysreg", + .data = (void *)VERSATILE_CLCD, + }, + { + .compatible = "arm,realview-eb-syscon", + .data = (void *)REALVIEW_CLCD_EB, + }, + { + .compatible = "arm,realview-pb1176-syscon", + .data = (void *)REALVIEW_CLCD_PB1176, + }, + { + .compatible = "arm,realview-pb11mp-syscon", + .data = (void *)REALVIEW_CLCD_PB11MP, + }, + { + .compatible = "arm,realview-pba8-syscon", + .data = (void *)REALVIEW_CLCD_PBA8, + }, + { + .compatible = "arm,realview-pbx-syscon", + .data = (void *)REALVIEW_CLCD_PBX, + }, + {}, +}; + +/* + * Core module CLCD control on the Integrator/CP, bits + * 8 thru 19 of the CM_CONTROL register controls a bunch + * of CLCD settings. + */ +#define INTEGRATOR_HDR_CTRL_OFFSET 0x0C +#define INTEGRATOR_CLCD_LCDBIASEN BIT(8) +#define INTEGRATOR_CLCD_LCDBIASUP BIT(9) +#define INTEGRATOR_CLCD_LCDBIASDN BIT(10) +/* Bits 11,12,13 controls the LCD type */ +#define INTEGRATOR_CLCD_LCDMUX_MASK (BIT(11)|BIT(12)|BIT(13)) +#define INTEGRATOR_CLCD_LCDMUX_LCD24 BIT(11) +#define INTEGRATOR_CLCD_LCDMUX_VGA565 BIT(12) +#define INTEGRATOR_CLCD_LCDMUX_SHARP (BIT(11)|BIT(12)) +#define INTEGRATOR_CLCD_LCDMUX_VGA555 BIT(13) +#define INTEGRATOR_CLCD_LCDMUX_VGA24 (BIT(11)|BIT(12)|BIT(13)) +#define INTEGRATOR_CLCD_LCD0_EN BIT(14) +#define INTEGRATOR_CLCD_LCD1_EN BIT(15) +/* R/L flip on Sharp */ +#define INTEGRATOR_CLCD_LCD_STATIC1 BIT(16) +/* U/D flip on Sharp */ +#define INTEGRATOR_CLCD_LCD_STATIC2 BIT(17) +/* No connection on Sharp */ +#define INTEGRATOR_CLCD_LCD_STATIC BIT(18) +/* 0 = 24bit VGA, 1 = 18bit VGA */ +#define INTEGRATOR_CLCD_LCD_N24BITEN BIT(19) + +#define INTEGRATOR_CLCD_MASK (INTEGRATOR_CLCD_LCDBIASEN | \ + INTEGRATOR_CLCD_LCDBIASUP | \ + INTEGRATOR_CLCD_LCDBIASDN | \ + INTEGRATOR_CLCD_LCDMUX_MASK | \ + INTEGRATOR_CLCD_LCD0_EN | \ + INTEGRATOR_CLCD_LCD1_EN | \ + INTEGRATOR_CLCD_LCD_STATIC1 | \ + INTEGRATOR_CLCD_LCD_STATIC2 | \ + INTEGRATOR_CLCD_LCD_STATIC | \ + INTEGRATOR_CLCD_LCD_N24BITEN) + +static void integrator_clcd_enable(struct clcd_fb *fb) +{ + struct fb_var_screeninfo *var = &fb->fb.var; + u32 val; + + dev_info(&fb->dev->dev, "enable Integrator CLCD connectors\n"); + + /* FIXME: really needed? */ + val = INTEGRATOR_CLCD_LCD_STATIC1 | INTEGRATOR_CLCD_LCD_STATIC2 | + INTEGRATOR_CLCD_LCD0_EN | INTEGRATOR_CLCD_LCD1_EN; + if (var->bits_per_pixel <= 8 || + (var->bits_per_pixel == 16 && var->green.length == 5)) + /* Pseudocolor, RGB555, BGR555 */ + val |= INTEGRATOR_CLCD_LCDMUX_VGA555; + else if (fb->fb.var.bits_per_pixel <= 16) + /* truecolor RGB565 */ + val |= INTEGRATOR_CLCD_LCDMUX_VGA565; + else + val = 0; /* no idea for this, don't trust the docs */ + + regmap_update_bits(versatile_syscon_map, + INTEGRATOR_HDR_CTRL_OFFSET, + INTEGRATOR_CLCD_MASK, + val); +} + +/* + * This configuration register in the Versatile and RealView + * family is uniformly present but appears more and more + * unutilized starting with the RealView series. + */ +#define SYS_CLCD 0x50 +#define SYS_CLCD_MODE_MASK (BIT(0)|BIT(1)) +#define SYS_CLCD_MODE_888 0 +#define SYS_CLCD_MODE_5551 BIT(0) +#define SYS_CLCD_MODE_565_R_LSB BIT(1) +#define SYS_CLCD_MODE_565_B_LSB (BIT(0)|BIT(1)) +#define SYS_CLCD_CONNECTOR_MASK (BIT(2)|BIT(3)|BIT(4)|BIT(5)) +#define SYS_CLCD_NLCDIOON BIT(2) +#define SYS_CLCD_VDDPOSSWITCH BIT(3) +#define SYS_CLCD_PWR3V5SWITCH BIT(4) +#define SYS_CLCD_VDDNEGSWITCH BIT(5) +#define SYS_CLCD_TSNSS BIT(6) /* touchscreen enable */ +#define SYS_CLCD_SSPEXP BIT(7) /* SSP expansion enable */ + +/* The Versatile can detect the connected panel type */ +#define SYS_CLCD_CLCDID_MASK (BIT(8)|BIT(9)|BIT(10)|BIT(11)|BIT(12)) +#define SYS_CLCD_ID_SANYO_3_8 (0x00 << 8) +#define SYS_CLCD_ID_SHARP_8_4 (0x01 << 8) +#define SYS_CLCD_ID_EPSON_2_2 (0x02 << 8) +#define SYS_CLCD_ID_SANYO_2_5 (0x07 << 8) +#define SYS_CLCD_ID_VGA (0x1f << 8) + +#define SYS_CLCD_TSNDAV BIT(13) /* data ready from TS */ + +/* IB2 control register for the Versatile daughterboard */ +#define IB2_CTRL 0x00 +#define IB2_CTRL_LCD_SD BIT(1) /* 1 = shut down LCD */ +#define IB2_CTRL_LCD_BL_ON BIT(0) +#define IB2_CTRL_LCD_MASK (BIT(0)|BIT(1)) + +static void versatile_clcd_disable(struct clcd_fb *fb) +{ + dev_info(&fb->dev->dev, "disable Versatile CLCD connectors\n"); + regmap_update_bits(versatile_syscon_map, + SYS_CLCD, + SYS_CLCD_CONNECTOR_MASK, + 0); + + /* If we're on an IB2 daughterboard, turn off display */ + if (versatile_ib2_map) { + dev_info(&fb->dev->dev, "disable IB2 display\n"); + regmap_update_bits(versatile_ib2_map, + IB2_CTRL, + IB2_CTRL_LCD_MASK, + IB2_CTRL_LCD_SD); + } +} + +static void versatile_clcd_enable(struct clcd_fb *fb) +{ + struct fb_var_screeninfo *var = &fb->fb.var; + u32 val = 0; + + dev_info(&fb->dev->dev, "enable Versatile CLCD connectors\n"); + switch (var->green.length) { + case 5: + val |= SYS_CLCD_MODE_5551; + break; + case 6: + if (var->red.offset == 0) + val |= SYS_CLCD_MODE_565_R_LSB; + else + val |= SYS_CLCD_MODE_565_B_LSB; + break; + case 8: + val |= SYS_CLCD_MODE_888; + break; + } + + /* Set up the MUX */ + regmap_update_bits(versatile_syscon_map, + SYS_CLCD, + SYS_CLCD_MODE_MASK, + val); + + /* Then enable the display */ + regmap_update_bits(versatile_syscon_map, + SYS_CLCD, + SYS_CLCD_CONNECTOR_MASK, + SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH); + + /* If we're on an IB2 daughterboard, turn on display */ + if (versatile_ib2_map) { + dev_info(&fb->dev->dev, "enable IB2 display\n"); + regmap_update_bits(versatile_ib2_map, + IB2_CTRL, + IB2_CTRL_LCD_MASK, + IB2_CTRL_LCD_BL_ON); + } +} + +static void versatile_clcd_decode(struct clcd_fb *fb, struct clcd_regs *regs) +{ + clcdfb_decode(fb, regs); + + /* Always clear BGR for RGB565: we do the routing externally */ + if (fb->fb.var.green.length == 6) + regs->cntl &= ~CNTL_BGR; +} + +static void realview_clcd_disable(struct clcd_fb *fb) +{ + dev_info(&fb->dev->dev, "disable RealView CLCD connectors\n"); + regmap_update_bits(versatile_syscon_map, + SYS_CLCD, + SYS_CLCD_CONNECTOR_MASK, + 0); +} + +static void realview_clcd_enable(struct clcd_fb *fb) +{ + dev_info(&fb->dev->dev, "enable RealView CLCD connectors\n"); + regmap_update_bits(versatile_syscon_map, + SYS_CLCD, + SYS_CLCD_CONNECTOR_MASK, + SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH); +} + +struct versatile_panel { + u32 id; + char *compatible; + bool ib2; +}; + +static const struct versatile_panel versatile_panels[] = { + { + .id = SYS_CLCD_ID_VGA, + .compatible = "VGA", + }, + { + .id = SYS_CLCD_ID_SANYO_3_8, + .compatible = "sanyo,tm38qv67a02a", + }, + { + .id = SYS_CLCD_ID_SHARP_8_4, + .compatible = "sharp,lq084v1dg21", + }, + { + .id = SYS_CLCD_ID_EPSON_2_2, + .compatible = "epson,l2f50113t00", + }, + { + .id = SYS_CLCD_ID_SANYO_2_5, + .compatible = "sanyo,alr252rgt", + .ib2 = true, + }, +}; + +static void versatile_panel_probe(struct device *dev, + struct device_node *endpoint) +{ + struct versatile_panel const *vpanel = NULL; + struct device_node *panel = NULL; + u32 val; + int ret; + int i; + + /* + * The Versatile CLCD has a panel auto-detection mechanism. + * We use this and look for the compatible panel in the + * device tree. + */ + ret = regmap_read(versatile_syscon_map, SYS_CLCD, &val); + if (ret) { + dev_err(dev, "cannot read CLCD syscon register\n"); + return; + } + val &= SYS_CLCD_CLCDID_MASK; + + /* First find corresponding panel information */ + for (i = 0; i < ARRAY_SIZE(versatile_panels); i++) { + vpanel = &versatile_panels[i]; + + if (val == vpanel->id) { + dev_err(dev, "autodetected panel \"%s\"\n", + vpanel->compatible); + break; + } + } + if (i == ARRAY_SIZE(versatile_panels)) { + dev_err(dev, "could not auto-detect panel\n"); + return; + } + + panel = of_graph_get_remote_port_parent(endpoint); + if (!panel) { + dev_err(dev, "could not locate panel in DT\n"); + return; + } + if (!of_device_is_compatible(panel, vpanel->compatible)) + dev_err(dev, "panel in DT is not compatible with the " + "auto-detected panel, continuing anyway\n"); + + /* + * If we have a Sanyo 2.5" port + * that we're running on an IB2 and proceed to look for the + * IB2 syscon regmap. + */ + if (!vpanel->ib2) + return; + + versatile_ib2_map = syscon_regmap_lookup_by_compatible( + "arm,versatile-ib2-syscon"); + if (IS_ERR(versatile_ib2_map)) { + dev_err(dev, "could not locate IB2 control register\n"); + versatile_ib2_map = NULL; + return; + } +} + +int versatile_clcd_init_panel(struct clcd_fb *fb, + struct device_node *endpoint) +{ + const struct of_device_id *clcd_id; + enum versatile_clcd versatile_clcd_type; + struct device_node *np; + struct regmap *map; + struct device *dev = &fb->dev->dev; + + np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match, + &clcd_id); + if (!np) { + dev_err(dev, "no Versatile syscon node\n"); + return -ENODEV; + } + versatile_clcd_type = (enum versatile_clcd)clcd_id->data; + + map = syscon_node_to_regmap(np); + if (IS_ERR(map)) { + dev_err(dev, "no Versatile syscon regmap\n"); + return PTR_ERR(map); + } + + switch (versatile_clcd_type) { + case INTEGRATOR_CLCD_CM: + versatile_syscon_map = map; + fb->board->enable = integrator_clcd_enable; + /* Override the caps, we have only these */ + fb->board->caps = CLCD_CAP_5551 | CLCD_CAP_RGB565 | + CLCD_CAP_888; + dev_info(dev, "set up callbacks for Integrator PL110\n"); + break; + case VERSATILE_CLCD: + versatile_syscon_map = map; + fb->board->enable = versatile_clcd_enable; + fb->board->disable = versatile_clcd_disable; + fb->board->decode = versatile_clcd_decode; + versatile_panel_probe(dev, endpoint); + dev_info(dev, "set up callbacks for Versatile\n"); + break; + case REALVIEW_CLCD_EB: + case REALVIEW_CLCD_PB1176: + case REALVIEW_CLCD_PB11MP: + case REALVIEW_CLCD_PBA8: + case REALVIEW_CLCD_PBX: + versatile_syscon_map = map; + fb->board->enable = realview_clcd_enable; + fb->board->disable = realview_clcd_disable; + dev_info(dev, "set up callbacks for RealView PL111\n"); + break; + default: + dev_info(dev, "unknown Versatile system controller\n"); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(versatile_clcd_init_panel); +#endif diff --git a/drivers/video/fbdev/amba-clcd-versatile.h b/drivers/video/fbdev/amba-clcd-versatile.h new file mode 100644 index 000000000000..1b14359c2cf6 --- /dev/null +++ b/drivers/video/fbdev/amba-clcd-versatile.h @@ -0,0 +1,17 @@ +/* + * Special local versatile callbacks + */ +#include <linux/of.h> +#include <linux/amba/bus.h> +#include <linux/platform_data/video-clcd-versatile.h> + +#if defined(CONFIG_PLAT_VERSATILE_CLCD) && defined(CONFIG_OF) +int versatile_clcd_init_panel(struct clcd_fb *fb, + struct device_node *endpoint); +#else +static inline int versatile_clcd_init_panel(struct clcd_fb *fb, + struct device_node *endpoint) +{ + return 0; +} +#endif diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 9b158869cb89..ec2671d98abc 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -30,10 +30,14 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_graph.h> +#include <linux/backlight.h> #include <video/display_timing.h> #include <video/of_display_timing.h> #include <video/videomode.h> +#include "amba-clcd-nomadik.h" +#include "amba-clcd-versatile.h" + #define to_clcd(info) container_of(info, struct clcd_fb, fb) /* This is limited to 16 characters when displayed by X startup */ @@ -71,6 +75,11 @@ static void clcdfb_disable(struct clcd_fb *fb) if (fb->board->disable) fb->board->disable(fb); + if (fb->panel->backlight) { + fb->panel->backlight->props.power = FB_BLANK_POWERDOWN; + backlight_update_status(fb->panel->backlight); + } + val = readl(fb->regs + fb->off_cntl); if (val & CNTL_LCDPWR) { val &= ~CNTL_LCDPWR; @@ -117,6 +126,14 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl) writel(cntl, fb->regs + fb->off_cntl); /* + * Turn on backlight + */ + if (fb->panel->backlight) { + fb->panel->backlight->props.power = FB_BLANK_UNBLANK; + backlight_update_status(fb->panel->backlight); + } + + /* * finally, enable the interface. */ if (fb->board->enable) @@ -211,6 +228,15 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var) var->blue.length = 4; } break; + case 24: + if (fb->vendor->packed_24_bit_pixels) { + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + } else { + ret = -EINVAL; + } + break; case 32: /* If we can't do 888, reject */ caps &= CLCD_CAP_888; @@ -297,6 +323,12 @@ static int clcdfb_set_par(struct fb_info *info) clcdfb_disable(fb); + /* Some variants must be clocked here */ + if (fb->vendor->clock_timregs && !fb->clk_enabled) { + fb->clk_enabled = true; + clk_enable(fb->clk); + } + writel(regs.tim0, fb->regs + CLCD_TIM0); writel(regs.tim1, fb->regs + CLCD_TIM1); writel(regs.tim2, fb->regs + CLCD_TIM2); @@ -551,7 +583,7 @@ static int clcdfb_register(struct clcd_fb *fb) #ifdef CONFIG_OF static int clcdfb_of_get_dpi_panel_mode(struct device_node *node, - struct fb_videomode *mode) + struct clcd_panel *clcd_panel) { int err; struct display_timing timing; @@ -563,10 +595,31 @@ static int clcdfb_of_get_dpi_panel_mode(struct device_node *node, videomode_from_timing(&timing, &video); - err = fb_videomode_from_videomode(&video, mode); + err = fb_videomode_from_videomode(&video, &clcd_panel->mode); if (err) return err; + /* Set up some inversion flags */ + if (timing.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE) + clcd_panel->tim2 |= TIM2_IPC; + else if (!(timing.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)) + /* + * To preserve backwards compatibility, the IPC (inverted + * pixel clock) flag needs to be set on any display that + * doesn't explicitly specify that the pixel clock is + * active on the negative or positive edge. + */ + clcd_panel->tim2 |= TIM2_IPC; + + if (timing.flags & DISPLAY_FLAGS_HSYNC_LOW) + clcd_panel->tim2 |= TIM2_IHS; + + if (timing.flags & DISPLAY_FLAGS_VSYNC_LOW) + clcd_panel->tim2 |= TIM2_IVS; + + if (timing.flags & DISPLAY_FLAGS_DE_LOW) + clcd_panel->tim2 |= TIM2_IOE; + return 0; } @@ -576,11 +629,34 @@ static int clcdfb_snprintf_mode(char *buf, int size, struct fb_videomode *mode) mode->refresh); } +static int clcdfb_of_get_backlight(struct device_node *endpoint, + struct clcd_panel *clcd_panel) +{ + struct device_node *panel; + struct device_node *backlight; + + panel = of_graph_get_remote_port_parent(endpoint); + if (!panel) + return -ENODEV; + + /* Look up the optional backlight phandle */ + backlight = of_parse_phandle(panel, "backlight", 0); + if (backlight) { + clcd_panel->backlight = of_find_backlight_by_node(backlight); + of_node_put(backlight); + + if (!clcd_panel->backlight) + return -EPROBE_DEFER; + } + return 0; +} + static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint, - struct fb_videomode *mode) + struct clcd_panel *clcd_panel) { int err; struct device_node *panel; + struct fb_videomode *mode; char *name; int len; @@ -590,11 +666,12 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint, /* Only directly connected DPI panels supported for now */ if (of_device_is_compatible(panel, "panel-dpi")) - err = clcdfb_of_get_dpi_panel_mode(panel, mode); + err = clcdfb_of_get_dpi_panel_mode(panel, clcd_panel); else err = -ENOENT; if (err) return err; + mode = &clcd_panel->mode; len = clcdfb_snprintf_mode(NULL, 0, mode); name = devm_kzalloc(dev, len + 1, GFP_KERNEL); @@ -616,6 +693,7 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0) } panels[] = { { 0x110, 1, 7, 13, CLCD_CAP_5551 }, { 0x110, 0, 8, 16, CLCD_CAP_888 }, + { 0x110, 16, 8, 0, CLCD_CAP_888 }, { 0x111, 4, 14, 20, CLCD_CAP_444 }, { 0x111, 3, 11, 19, CLCD_CAP_444 | CLCD_CAP_5551 }, { 0x111, 3, 10, 19, CLCD_CAP_444 | CLCD_CAP_5551 | @@ -625,8 +703,8 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0) }; int i; - /* Bypass pixel clock divider, data output on the falling edge */ - fb->panel->tim2 = TIM2_BCD | TIM2_IPC; + /* Bypass pixel clock divider */ + fb->panel->tim2 |= TIM2_BCD; /* TFT display, vert. comp. interrupt at the start of the back porch */ fb->panel->cntl |= CNTL_LCDTFT | CNTL_LCDVCOMP(1); @@ -643,6 +721,49 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0) fb->panel->caps = panels[i].caps; } + /* + * If we actually physically connected the R lines to B and + * vice versa + */ + if (r0 != 0 && b0 == 0) + fb->panel->bgr_connection = true; + + if (fb->panel->caps && fb->vendor->st_bitmux_control) { + /* + * Set up the special bits for the Nomadik control register + * (other platforms tend to do this through an external + * register). + */ + + /* Offset of the highest used color */ + int maxoff = max3(r0, g0, b0); + /* Most significant bit out, highest used bit */ + int msb = 0; + + if (fb->panel->caps & CLCD_CAP_888) { + msb = maxoff + 8 - 1; + } else if (fb->panel->caps & CLCD_CAP_565) { + msb = maxoff + 5 - 1; + fb->panel->cntl |= CNTL_ST_1XBPP_565; + } else if (fb->panel->caps & CLCD_CAP_5551) { + msb = maxoff + 5 - 1; + fb->panel->cntl |= CNTL_ST_1XBPP_5551; + } else if (fb->panel->caps & CLCD_CAP_444) { + msb = maxoff + 4 - 1; + fb->panel->cntl |= CNTL_ST_1XBPP_444; + } + + /* Send out as many bits as we need */ + if (msb > 17) + fb->panel->cntl |= CNTL_ST_CDWID_24; + else if (msb > 15) + fb->panel->cntl |= CNTL_ST_CDWID_18; + else if (msb > 11) + fb->panel->cntl |= CNTL_ST_CDWID_16; + else + fb->panel->cntl |= CNTL_ST_CDWID_12; + } + return fb->panel->caps ? 0 : -EINVAL; } @@ -658,11 +779,24 @@ static int clcdfb_of_init_display(struct clcd_fb *fb) if (!fb->panel) return -ENOMEM; + /* + * Fetch the panel endpoint. + */ endpoint = of_graph_get_next_endpoint(fb->dev->dev.of_node, NULL); if (!endpoint) return -ENODEV; - err = clcdfb_of_get_mode(&fb->dev->dev, endpoint, &fb->panel->mode); + if (fb->vendor->init_panel) { + err = fb->vendor->init_panel(fb, endpoint); + if (err) + return err; + } + + err = clcdfb_of_get_backlight(endpoint, fb->panel); + if (err) + return err; + + err = clcdfb_of_get_mode(&fb->dev->dev, endpoint, fb->panel); if (err) return err; @@ -693,11 +827,11 @@ static int clcdfb_of_init_display(struct clcd_fb *fb) if (of_property_read_u32_array(endpoint, "arm,pl11x,tft-r0g0b0-pads", - tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) == 0) - return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0], - tft_r0b0g0[1], tft_r0b0g0[2]); + tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0) + return -ENOENT; - return -ENOENT; + return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0], + tft_r0b0g0[1], tft_r0b0g0[2]); } static int clcdfb_of_vram_setup(struct clcd_fb *fb) @@ -818,6 +952,7 @@ static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev) static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id) { struct clcd_board *board = dev_get_platdata(&dev->dev); + struct clcd_vendor_data *vendor = id->data; struct clcd_fb *fb; int ret; @@ -827,6 +962,12 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id) if (!board) return -EINVAL; + if (vendor->init_board) { + ret = vendor->init_board(dev, board); + if (ret) + return ret; + } + ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)); if (ret) goto out; @@ -845,17 +986,18 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id) } fb->dev = dev; + fb->vendor = vendor; fb->board = board; - dev_info(&fb->dev->dev, "PL%03x rev%u at 0x%08llx\n", - amba_part(dev), amba_rev(dev), + dev_info(&fb->dev->dev, "PL%03x designer %02x rev%u at 0x%08llx\n", + amba_part(dev), amba_manf(dev), amba_rev(dev), (unsigned long long)dev->res.start); ret = fb->board->setup(fb); if (ret) goto free_fb; - ret = clcdfb_register(fb); + ret = clcdfb_register(fb); if (ret == 0) { amba_set_drvdata(dev, fb); goto out; @@ -891,10 +1033,30 @@ static int clcdfb_remove(struct amba_device *dev) return 0; } +static struct clcd_vendor_data vendor_arm = { + /* Sets up the versatile board displays */ + .init_panel = versatile_clcd_init_panel, +}; + +static struct clcd_vendor_data vendor_nomadik = { + .clock_timregs = true, + .packed_24_bit_pixels = true, + .st_bitmux_control = true, + .init_board = nomadik_clcd_init_board, + .init_panel = nomadik_clcd_init_panel, +}; + static struct amba_id clcdfb_id_table[] = { { .id = 0x00041110, .mask = 0x000ffffe, + .data = &vendor_arm, + }, + /* ST Electronics Nomadik variant */ + { + .id = 0x00180110, + .mask = 0x00fffffe, + .data = &vendor_nomadik, }, { 0, 0 }, }; diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index 1b0b233b8b39..1928cb2b5386 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -79,7 +79,7 @@ struct arcfb_par { spinlock_t lock; }; -static struct fb_fix_screeninfo arcfb_fix = { +static const struct fb_fix_screeninfo arcfb_fix = { .id = "arcfb", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO01, @@ -89,7 +89,7 @@ static struct fb_fix_screeninfo arcfb_fix = { .accel = FB_ACCEL_NONE, }; -static struct fb_var_screeninfo arcfb_var = { +static const struct fb_var_screeninfo arcfb_var = { .xres = 128, .yres = 64, .xres_virtual = 128, diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c index 7e8ddf00ccc2..91eea4583382 100644 --- a/drivers/video/fbdev/asiliantfb.c +++ b/drivers/video/fbdev/asiliantfb.c @@ -474,7 +474,7 @@ static void chips_hw_init(struct fb_info *p) write_fr(chips_init_fr[i].addr, chips_init_fr[i].data); } -static struct fb_fix_screeninfo asiliantfb_fix = { +static const struct fb_fix_screeninfo asiliantfb_fix = { .id = "Asiliant 69000", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, @@ -483,7 +483,7 @@ static struct fb_fix_screeninfo asiliantfb_fix = { .smem_len = 0x200000, /* 2MB */ }; -static struct fb_var_screeninfo asiliantfb_var = { +static const struct fb_var_screeninfo asiliantfb_var = { .xres = 640, .yres = 480, .xres_virtual = 640, diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c index 0a4626886b00..fa07242a78d2 100644 --- a/drivers/video/fbdev/aty/aty128fb.c +++ b/drivers/video/fbdev/aty/aty128fb.c @@ -93,7 +93,7 @@ #ifndef CONFIG_PPC_PMAC /* default mode */ -static struct fb_var_screeninfo default_var = { +static const struct fb_var_screeninfo default_var = { /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */ 640, 480, 640, 480, 0, 0, 8, 0, {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, @@ -104,7 +104,7 @@ static struct fb_var_screeninfo default_var = { #else /* CONFIG_PPC_PMAC */ /* default to 1024x768 at 75Hz on PPC - this will work * on the iMac, the usual 640x480 @ 60Hz doesn't. */ -static struct fb_var_screeninfo default_var = { +static const struct fb_var_screeninfo default_var = { /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ 1024, 768, 1024, 768, 0, 0, 8, 0, {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, @@ -375,7 +375,7 @@ static const struct aty128_meminfo ddr_sgram = { .name = "64-bit DDR SGRAM", }; -static struct fb_fix_screeninfo aty128fb_fix = { +static const struct fb_fix_screeninfo aty128fb_fix = { .id = "ATY Rage128", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index f34ed47fcaf8..11026e726b68 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -212,7 +212,7 @@ struct pci_mmap_map { unsigned long prot_mask; }; -static struct fb_fix_screeninfo atyfb_fix = { +static const struct fb_fix_screeninfo atyfb_fix = { .id = "ATY Mach64", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, diff --git a/drivers/video/fbdev/aty/radeon_monitor.c b/drivers/video/fbdev/aty/radeon_monitor.c index f1ce229de78d..278b421ab3fe 100644 --- a/drivers/video/fbdev/aty/radeon_monitor.c +++ b/drivers/video/fbdev/aty/radeon_monitor.c @@ -4,7 +4,7 @@ #include "../edid.h" -static struct fb_var_screeninfo radeonfb_default_var = { +static const struct fb_var_screeninfo radeonfb_default_var = { .xres = 640, .yres = 480, .xres_virtual = 640, diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index f9507b1894df..6c2b2ca4a909 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -43,6 +43,7 @@ #include <linux/ctype.h> #include <linux/dma-mapping.h> #include <linux/slab.h> +#include <linux/uaccess.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1200fb.h> /* platform_data */ diff --git a/drivers/video/fbdev/bfin_adv7393fb.c b/drivers/video/fbdev/bfin_adv7393fb.c index e2d7d039ce3b..542ffaddc6ab 100644 --- a/drivers/video/fbdev/bfin_adv7393fb.c +++ b/drivers/video/fbdev/bfin_adv7393fb.c @@ -375,7 +375,6 @@ static int bfin_adv7393_fb_probe(struct i2c_client *client, { int ret = 0; struct proc_dir_entry *entry; - int num_modes = ARRAY_SIZE(known_modes); struct adv7393fb_device *fbdev = NULL; @@ -384,7 +383,7 @@ static int bfin_adv7393_fb_probe(struct i2c_client *client, return -EINVAL; } - if (mode > num_modes) { + if (mode >= ARRAY_SIZE(known_modes)) { dev_err(&client->dev, "mode %d: not supported", mode); return -EFAULT; } @@ -797,7 +796,7 @@ static struct i2c_driver bfin_adv7393_fb_driver = { static int __init bfin_adv7393_fb_driver_init(void) { -#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) +#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI) request_module("i2c-bfin-twi"); #else request_module("i2c-gpio"); diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c index 07675d6f323e..2d3b691f3fc4 100644 --- a/drivers/video/fbdev/cobalt_lcdfb.c +++ b/drivers/video/fbdev/cobalt_lcdfb.c @@ -63,7 +63,6 @@ #define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK) #define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE) -#ifdef CONFIG_MIPS_COBALT static inline void lcd_write_control(struct fb_info *info, u8 control) { writel((u32)control << 24, info->screen_base); @@ -83,47 +82,6 @@ static inline u8 lcd_read_data(struct fb_info *info) { return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24; } -#else - -#define LCD_CTL 0x00 -#define LCD_DATA 0x08 -#define CPLD_STATUS 0x10 -#define CPLD_DATA 0x18 - -static inline void cpld_wait(struct fb_info *info) -{ - do { - } while (readl(info->screen_base + CPLD_STATUS) & 1); -} - -static inline void lcd_write_control(struct fb_info *info, u8 control) -{ - cpld_wait(info); - writel(control, info->screen_base + LCD_CTL); -} - -static inline u8 lcd_read_control(struct fb_info *info) -{ - cpld_wait(info); - readl(info->screen_base + LCD_CTL); - cpld_wait(info); - return readl(info->screen_base + CPLD_DATA) & 0xff; -} - -static inline void lcd_write_data(struct fb_info *info, u8 data) -{ - cpld_wait(info); - writel(data, info->screen_base + LCD_DATA); -} - -static inline u8 lcd_read_data(struct fb_info *info) -{ - cpld_wait(info); - readl(info->screen_base + LCD_DATA); - cpld_wait(info); - return readl(info->screen_base + CPLD_DATA) & 0xff; -} -#endif static int lcd_busy_wait(struct fb_info *info) { diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 924bad45c176..37a37c4d04cb 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -50,9 +50,9 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, return 1; if (regno < 16) { - red >>= 8; - green >>= 8; - blue >>= 8; + red >>= 16 - info->var.red.length; + green >>= 16 - info->var.green.length; + blue >>= 16 - info->var.blue.length; ((u32 *)(info->pseudo_palette))[regno] = (red << info->var.red.offset) | (green << info->var.green.offset) | diff --git a/drivers/video/fbdev/exynos/Kconfig b/drivers/video/fbdev/exynos/Kconfig deleted file mode 100644 index d916bef94f25..000000000000 --- a/drivers/video/fbdev/exynos/Kconfig +++ /dev/null @@ -1,32 +0,0 @@ -# -# Exynos Video configuration -# - -menuconfig EXYNOS_VIDEO - tristate "Exynos Video driver support" - depends on ARCH_S5PV210 || ARCH_EXYNOS - help - This enables support for EXYNOS Video device. - -if EXYNOS_VIDEO - -# -# MIPI DSI driver -# - -config EXYNOS_MIPI_DSI - tristate "EXYNOS MIPI DSI driver support." - select GENERIC_PHY - help - This enables support for MIPI-DSI device. - -config EXYNOS_LCD_S6E8AX0 - tristate "S6E8AX0 MIPI AMOLED LCD Driver" - depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE - depends on (LCD_CLASS_DEVICE = y) - default n - help - If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its - LCD control driver. - -endif # EXYNOS_VIDEO diff --git a/drivers/video/fbdev/exynos/Makefile b/drivers/video/fbdev/exynos/Makefile deleted file mode 100644 index 02d8dc522fea..000000000000 --- a/drivers/video/fbdev/exynos/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -# -# Makefile for the exynos video drivers. -# - -obj-$(CONFIG_EXYNOS_MIPI_DSI) += exynos-mipi-dsi-mod.o - -exynos-mipi-dsi-mod-objs += exynos_mipi_dsi.o exynos_mipi_dsi_common.o \ - exynos_mipi_dsi_lowlevel.o -obj-$(CONFIG_EXYNOS_LCD_S6E8AX0) += s6e8ax0.o diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c deleted file mode 100644 index 92e4af3caaf8..000000000000 --- a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c +++ /dev/null @@ -1,574 +0,0 @@ -/* linux/drivers/video/exynos/exynos_mipi_dsi.c - * - * Samsung SoC MIPI-DSIM driver. - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd - * - * InKi Dae, <inki.dae@samsung.com> - * Donghwa Lee, <dh09.lee@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/clk.h> -#include <linux/mutex.h> -#include <linux/wait.h> -#include <linux/fs.h> -#include <linux/mm.h> -#include <linux/fb.h> -#include <linux/ctype.h> -#include <linux/platform_device.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/memory.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/kthread.h> -#include <linux/notifier.h> -#include <linux/phy/phy.h> -#include <linux/regulator/consumer.h> -#include <linux/pm_runtime.h> -#include <linux/err.h> - -#include <video/exynos_mipi_dsim.h> - -#include "exynos_mipi_dsi_common.h" -#include "exynos_mipi_dsi_lowlevel.h" - -struct mipi_dsim_ddi { - int bus_id; - struct list_head list; - struct mipi_dsim_lcd_device *dsim_lcd_dev; - struct mipi_dsim_lcd_driver *dsim_lcd_drv; -}; - -static LIST_HEAD(dsim_ddi_list); - -static DEFINE_MUTEX(mipi_dsim_lock); - -static struct mipi_dsim_platform_data *to_dsim_plat(struct platform_device - *pdev) -{ - return pdev->dev.platform_data; -} - -static struct regulator_bulk_data supplies[] = { - { .supply = "vdd11", }, - { .supply = "vdd18", }, -}; - -static int exynos_mipi_regulator_enable(struct mipi_dsim_device *dsim) -{ - int ret; - - mutex_lock(&dsim->lock); - ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies); - mutex_unlock(&dsim->lock); - - return ret; -} - -static int exynos_mipi_regulator_disable(struct mipi_dsim_device *dsim) -{ - int ret; - - mutex_lock(&dsim->lock); - ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies); - mutex_unlock(&dsim->lock); - - return ret; -} - -/* update all register settings to MIPI DSI controller. */ -static void exynos_mipi_update_cfg(struct mipi_dsim_device *dsim) -{ - /* - * data from Display controller(FIMD) is not transferred in video mode - * but in case of command mode, all settings is not updated to - * registers. - */ - exynos_mipi_dsi_stand_by(dsim, 0); - - exynos_mipi_dsi_init_dsim(dsim); - exynos_mipi_dsi_init_link(dsim); - - exynos_mipi_dsi_set_hs_enable(dsim); - - /* set display timing. */ - exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config); - - exynos_mipi_dsi_init_interrupt(dsim); - - /* - * data from Display controller(FIMD) is transferred in video mode - * but in case of command mode, all settings are updated to registers. - */ - exynos_mipi_dsi_stand_by(dsim, 1); -} - -static int exynos_mipi_dsi_early_blank_mode(struct mipi_dsim_device *dsim, - int power) -{ - struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; - struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; - - switch (power) { - case FB_BLANK_POWERDOWN: - if (dsim->suspended) - return 0; - - if (client_drv && client_drv->suspend) - client_drv->suspend(client_dev); - - clk_disable(dsim->clock); - - exynos_mipi_regulator_disable(dsim); - - dsim->suspended = true; - - break; - default: - break; - } - - return 0; -} - -static int exynos_mipi_dsi_blank_mode(struct mipi_dsim_device *dsim, int power) -{ - struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; - struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; - - switch (power) { - case FB_BLANK_UNBLANK: - if (!dsim->suspended) - return 0; - - /* lcd panel power on. */ - if (client_drv && client_drv->power_on) - client_drv->power_on(client_dev, 1); - - exynos_mipi_regulator_enable(dsim); - - /* enable MIPI-DSI PHY. */ - phy_power_on(dsim->phy); - - clk_enable(dsim->clock); - - exynos_mipi_update_cfg(dsim); - - /* set lcd panel sequence commands. */ - if (client_drv && client_drv->set_sequence) - client_drv->set_sequence(client_dev); - - dsim->suspended = false; - - break; - case FB_BLANK_NORMAL: - /* TODO. */ - break; - default: - break; - } - - return 0; -} - -int exynos_mipi_dsi_register_lcd_device(struct mipi_dsim_lcd_device *lcd_dev) -{ - struct mipi_dsim_ddi *dsim_ddi; - - if (!lcd_dev->name) { - pr_err("dsim_lcd_device name is NULL.\n"); - return -EFAULT; - } - - dsim_ddi = kzalloc(sizeof(struct mipi_dsim_ddi), GFP_KERNEL); - if (!dsim_ddi) { - pr_err("failed to allocate dsim_ddi object.\n"); - return -ENOMEM; - } - - dsim_ddi->dsim_lcd_dev = lcd_dev; - - mutex_lock(&mipi_dsim_lock); - list_add_tail(&dsim_ddi->list, &dsim_ddi_list); - mutex_unlock(&mipi_dsim_lock); - - return 0; -} - -static struct mipi_dsim_ddi *exynos_mipi_dsi_find_lcd_device( - struct mipi_dsim_lcd_driver *lcd_drv) -{ - struct mipi_dsim_ddi *dsim_ddi, *next; - struct mipi_dsim_lcd_device *lcd_dev; - - mutex_lock(&mipi_dsim_lock); - - list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) { - if (!dsim_ddi) - goto out; - - lcd_dev = dsim_ddi->dsim_lcd_dev; - if (!lcd_dev) - continue; - - if ((strcmp(lcd_drv->name, lcd_dev->name)) == 0) { - /** - * bus_id would be used to identify - * connected bus. - */ - dsim_ddi->bus_id = lcd_dev->bus_id; - mutex_unlock(&mipi_dsim_lock); - - return dsim_ddi; - } - - list_del(&dsim_ddi->list); - kfree(dsim_ddi); - } - -out: - mutex_unlock(&mipi_dsim_lock); - - return NULL; -} - -int exynos_mipi_dsi_register_lcd_driver(struct mipi_dsim_lcd_driver *lcd_drv) -{ - struct mipi_dsim_ddi *dsim_ddi; - - if (!lcd_drv->name) { - pr_err("dsim_lcd_driver name is NULL.\n"); - return -EFAULT; - } - - dsim_ddi = exynos_mipi_dsi_find_lcd_device(lcd_drv); - if (!dsim_ddi) { - pr_err("mipi_dsim_ddi object not found.\n"); - return -EFAULT; - } - - dsim_ddi->dsim_lcd_drv = lcd_drv; - - pr_info("registered panel driver(%s) to mipi-dsi driver.\n", - lcd_drv->name); - - return 0; - -} -EXPORT_SYMBOL_GPL(exynos_mipi_dsi_register_lcd_driver); - -static struct mipi_dsim_ddi *exynos_mipi_dsi_bind_lcd_ddi( - struct mipi_dsim_device *dsim, - const char *name) -{ - struct mipi_dsim_ddi *dsim_ddi, *next; - struct mipi_dsim_lcd_driver *lcd_drv; - struct mipi_dsim_lcd_device *lcd_dev; - int ret; - - mutex_lock(&dsim->lock); - - list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) { - lcd_drv = dsim_ddi->dsim_lcd_drv; - lcd_dev = dsim_ddi->dsim_lcd_dev; - if (!lcd_drv || !lcd_dev || - (dsim->id != dsim_ddi->bus_id)) - continue; - - dev_dbg(dsim->dev, "lcd_drv->id = %d, lcd_dev->id = %d\n", - lcd_drv->id, lcd_dev->id); - dev_dbg(dsim->dev, "lcd_dev->bus_id = %d, dsim->id = %d\n", - lcd_dev->bus_id, dsim->id); - - if ((strcmp(lcd_drv->name, name) == 0)) { - lcd_dev->master = dsim; - - lcd_dev->dev.parent = dsim->dev; - dev_set_name(&lcd_dev->dev, "%s", lcd_drv->name); - - ret = device_register(&lcd_dev->dev); - if (ret < 0) { - dev_err(dsim->dev, - "can't register %s, status %d\n", - dev_name(&lcd_dev->dev), ret); - mutex_unlock(&dsim->lock); - - return NULL; - } - - dsim->dsim_lcd_dev = lcd_dev; - dsim->dsim_lcd_drv = lcd_drv; - - mutex_unlock(&dsim->lock); - - return dsim_ddi; - } - } - - mutex_unlock(&dsim->lock); - - return NULL; -} - -/* define MIPI-DSI Master operations. */ -static struct mipi_dsim_master_ops master_ops = { - .cmd_read = exynos_mipi_dsi_rd_data, - .cmd_write = exynos_mipi_dsi_wr_data, - .get_dsim_frame_done = exynos_mipi_dsi_get_frame_done_status, - .clear_dsim_frame_done = exynos_mipi_dsi_clear_frame_done, - .set_early_blank_mode = exynos_mipi_dsi_early_blank_mode, - .set_blank_mode = exynos_mipi_dsi_blank_mode, -}; - -static int exynos_mipi_dsi_probe(struct platform_device *pdev) -{ - struct resource *res; - struct mipi_dsim_device *dsim; - struct mipi_dsim_config *dsim_config; - struct mipi_dsim_platform_data *dsim_pd; - struct mipi_dsim_ddi *dsim_ddi; - int ret = -EINVAL; - - dsim = devm_kzalloc(&pdev->dev, sizeof(struct mipi_dsim_device), - GFP_KERNEL); - if (!dsim) { - dev_err(&pdev->dev, "failed to allocate dsim object.\n"); - return -ENOMEM; - } - - dsim->pd = to_dsim_plat(pdev); - dsim->dev = &pdev->dev; - dsim->id = pdev->id; - - /* get mipi_dsim_platform_data. */ - dsim_pd = (struct mipi_dsim_platform_data *)dsim->pd; - if (dsim_pd == NULL) { - dev_err(&pdev->dev, "failed to get platform data for dsim.\n"); - return -EINVAL; - } - /* get mipi_dsim_config. */ - dsim_config = dsim_pd->dsim_config; - if (dsim_config == NULL) { - dev_err(&pdev->dev, "failed to get dsim config data.\n"); - return -EINVAL; - } - - dsim->dsim_config = dsim_config; - dsim->master_ops = &master_ops; - - mutex_init(&dsim->lock); - - ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(supplies), - supplies); - if (ret) { - dev_err(&pdev->dev, "Failed to get regulators: %d\n", ret); - return ret; - } - - dsim->phy = devm_phy_get(&pdev->dev, "dsim"); - if (IS_ERR(dsim->phy)) - return PTR_ERR(dsim->phy); - - dsim->clock = devm_clk_get(&pdev->dev, "dsim0"); - if (IS_ERR(dsim->clock)) { - dev_err(&pdev->dev, "failed to get dsim clock source\n"); - return -ENODEV; - } - - clk_enable(dsim->clock); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - dsim->reg_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dsim->reg_base)) { - ret = PTR_ERR(dsim->reg_base); - goto error; - } - - mutex_init(&dsim->lock); - - /* bind lcd ddi matched with panel name. */ - dsim_ddi = exynos_mipi_dsi_bind_lcd_ddi(dsim, dsim_pd->lcd_panel_name); - if (!dsim_ddi) { - dev_err(&pdev->dev, "mipi_dsim_ddi object not found.\n"); - ret = -EINVAL; - goto error; - } - - ret = platform_get_irq(pdev, 0); - if (ret < 0) { - dev_err(&pdev->dev, "failed to request dsim irq resource\n"); - goto error; - } - dsim->irq = ret; - - init_completion(&dsim_wr_comp); - init_completion(&dsim_rd_comp); - platform_set_drvdata(pdev, dsim); - - ret = devm_request_irq(&pdev->dev, dsim->irq, - exynos_mipi_dsi_interrupt_handler, - IRQF_SHARED, dev_name(&pdev->dev), dsim); - if (ret != 0) { - dev_err(&pdev->dev, "failed to request dsim irq\n"); - ret = -EINVAL; - goto error; - } - - /* enable interrupts */ - exynos_mipi_dsi_init_interrupt(dsim); - - /* initialize mipi-dsi client(lcd panel). */ - if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe) - dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev); - - /* in case mipi-dsi has been enabled by bootloader */ - if (dsim_pd->enabled) { - exynos_mipi_regulator_enable(dsim); - goto done; - } - - /* lcd panel power on. */ - if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on) - dsim_ddi->dsim_lcd_drv->power_on(dsim_ddi->dsim_lcd_dev, 1); - - exynos_mipi_regulator_enable(dsim); - - /* enable MIPI-DSI PHY. */ - phy_power_on(dsim->phy); - - exynos_mipi_update_cfg(dsim); - - /* set lcd panel sequence commands. */ - if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->set_sequence) - dsim_ddi->dsim_lcd_drv->set_sequence(dsim_ddi->dsim_lcd_dev); - - dsim->suspended = false; - -done: - platform_set_drvdata(pdev, dsim); - - dev_dbg(&pdev->dev, "%s() completed successfully (%s mode)\n", __func__, - dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB"); - - return 0; - -error: - clk_disable(dsim->clock); - return ret; -} - -static int exynos_mipi_dsi_remove(struct platform_device *pdev) -{ - struct mipi_dsim_device *dsim = platform_get_drvdata(pdev); - struct mipi_dsim_ddi *dsim_ddi, *next; - struct mipi_dsim_lcd_driver *dsim_lcd_drv; - - clk_disable(dsim->clock); - - list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) { - if (dsim_ddi) { - if (dsim->id != dsim_ddi->bus_id) - continue; - - dsim_lcd_drv = dsim_ddi->dsim_lcd_drv; - - if (dsim_lcd_drv->remove) - dsim_lcd_drv->remove(dsim_ddi->dsim_lcd_dev); - - kfree(dsim_ddi); - } - } - - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int exynos_mipi_dsi_suspend(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct mipi_dsim_device *dsim = platform_get_drvdata(pdev); - struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; - struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; - - disable_irq(dsim->irq); - - if (dsim->suspended) - return 0; - - if (client_drv && client_drv->suspend) - client_drv->suspend(client_dev); - - /* disable MIPI-DSI PHY. */ - phy_power_off(dsim->phy); - - clk_disable(dsim->clock); - - exynos_mipi_regulator_disable(dsim); - - dsim->suspended = true; - - return 0; -} - -static int exynos_mipi_dsi_resume(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct mipi_dsim_device *dsim = platform_get_drvdata(pdev); - struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; - struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; - - enable_irq(dsim->irq); - - if (!dsim->suspended) - return 0; - - /* lcd panel power on. */ - if (client_drv && client_drv->power_on) - client_drv->power_on(client_dev, 1); - - exynos_mipi_regulator_enable(dsim); - - /* enable MIPI-DSI PHY. */ - phy_power_on(dsim->phy); - - clk_enable(dsim->clock); - - exynos_mipi_update_cfg(dsim); - - /* set lcd panel sequence commands. */ - if (client_drv && client_drv->set_sequence) - client_drv->set_sequence(client_dev); - - dsim->suspended = false; - - return 0; -} -#endif - -static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(exynos_mipi_dsi_suspend, exynos_mipi_dsi_resume) -}; - -static struct platform_driver exynos_mipi_dsi_driver = { - .probe = exynos_mipi_dsi_probe, - .remove = exynos_mipi_dsi_remove, - .driver = { - .name = "exynos-mipi-dsim", - .pm = &exynos_mipi_dsi_pm_ops, - }, -}; - -module_platform_driver(exynos_mipi_dsi_driver); - -MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>"); -MODULE_DESCRIPTION("Samsung SoC MIPI-DSI driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c deleted file mode 100644 index 2358a2fbbbcd..000000000000 --- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c +++ /dev/null @@ -1,880 +0,0 @@ -/* linux/drivers/video/exynos/exynos_mipi_dsi_common.c - * - * Samsung SoC MIPI-DSI common driver. - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd - * - * InKi Dae, <inki.dae@samsung.com> - * Donghwa Lee, <dh09.lee@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/mutex.h> -#include <linux/wait.h> -#include <linux/fs.h> -#include <linux/mm.h> -#include <linux/fb.h> -#include <linux/ctype.h> -#include <linux/platform_device.h> -#include <linux/io.h> -#include <linux/memory.h> -#include <linux/delay.h> -#include <linux/irqreturn.h> -#include <linux/kthread.h> - -#include <video/mipi_display.h> -#include <video/exynos_mipi_dsim.h> - -#include "exynos_mipi_dsi_regs.h" -#include "exynos_mipi_dsi_lowlevel.h" -#include "exynos_mipi_dsi_common.h" - -#define MIPI_FIFO_TIMEOUT msecs_to_jiffies(250) -#define MIPI_RX_FIFO_READ_DONE 0x30800002 -#define MIPI_MAX_RX_FIFO 20 -#define MHZ (1000 * 1000) -#define FIN_HZ (24 * MHZ) - -#define DFIN_PLL_MIN_HZ (6 * MHZ) -#define DFIN_PLL_MAX_HZ (12 * MHZ) - -#define DFVCO_MIN_HZ (500 * MHZ) -#define DFVCO_MAX_HZ (1000 * MHZ) - -#define TRY_GET_FIFO_TIMEOUT (5000 * 2) -#define TRY_FIFO_CLEAR (10) - -/* MIPI-DSIM status types. */ -enum { - DSIM_STATE_INIT, /* should be initialized. */ - DSIM_STATE_STOP, /* CPU and LCDC are LP mode. */ - DSIM_STATE_HSCLKEN, /* HS clock was enabled. */ - DSIM_STATE_ULPS -}; - -/* define DSI lane types. */ -enum { - DSIM_LANE_CLOCK = (1 << 0), - DSIM_LANE_DATA0 = (1 << 1), - DSIM_LANE_DATA1 = (1 << 2), - DSIM_LANE_DATA2 = (1 << 3), - DSIM_LANE_DATA3 = (1 << 4) -}; - -static unsigned int dpll_table[15] = { - 100, 120, 170, 220, 270, - 320, 390, 450, 510, 560, - 640, 690, 770, 870, 950 -}; - -irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id) -{ - struct mipi_dsim_device *dsim = dev_id; - unsigned int intsrc, intmsk; - - intsrc = exynos_mipi_dsi_read_interrupt(dsim); - intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim); - intmsk = ~intmsk & intsrc; - - if (intsrc & INTMSK_RX_DONE) { - complete(&dsim_rd_comp); - dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n"); - } - if (intsrc & INTMSK_FIFO_EMPTY) { - complete(&dsim_wr_comp); - dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n"); - } - - exynos_mipi_dsi_clear_interrupt(dsim, intmsk); - - return IRQ_HANDLED; -} - -/* - * write long packet to mipi dsi slave - * @dsim: mipi dsim device structure. - * @data0: packet data to send. - * @data1: size of packet data - */ -static void exynos_mipi_dsi_long_data_wr(struct mipi_dsim_device *dsim, - const unsigned char *data0, unsigned int data_size) -{ - unsigned int data_cnt = 0, payload = 0; - - /* in case that data count is more then 4 */ - for (data_cnt = 0; data_cnt < data_size; data_cnt += 4) { - /* - * after sending 4bytes per one time, - * send remainder data less then 4. - */ - if ((data_size - data_cnt) < 4) { - if ((data_size - data_cnt) == 3) { - payload = data0[data_cnt] | - data0[data_cnt + 1] << 8 | - data0[data_cnt + 2] << 16; - dev_dbg(dsim->dev, "count = 3 payload = %x, %x %x %x\n", - payload, data0[data_cnt], - data0[data_cnt + 1], - data0[data_cnt + 2]); - } else if ((data_size - data_cnt) == 2) { - payload = data0[data_cnt] | - data0[data_cnt + 1] << 8; - dev_dbg(dsim->dev, - "count = 2 payload = %x, %x %x\n", payload, - data0[data_cnt], - data0[data_cnt + 1]); - } else if ((data_size - data_cnt) == 1) { - payload = data0[data_cnt]; - } - - exynos_mipi_dsi_wr_tx_data(dsim, payload); - /* send 4bytes per one time. */ - } else { - payload = data0[data_cnt] | - data0[data_cnt + 1] << 8 | - data0[data_cnt + 2] << 16 | - data0[data_cnt + 3] << 24; - - dev_dbg(dsim->dev, - "count = 4 payload = %x, %x %x %x %x\n", - payload, *(u8 *)(data0 + data_cnt), - data0[data_cnt + 1], - data0[data_cnt + 2], - data0[data_cnt + 3]); - - exynos_mipi_dsi_wr_tx_data(dsim, payload); - } - } -} - -int exynos_mipi_dsi_wr_data(struct mipi_dsim_device *dsim, unsigned int data_id, - const unsigned char *data0, unsigned int data_size) -{ - unsigned int check_rx_ack = 0; - - if (dsim->state == DSIM_STATE_ULPS) { - dev_err(dsim->dev, "state is ULPS.\n"); - - return -EINVAL; - } - - /* FIXME!!! why does it need this delay? */ - msleep(20); - - mutex_lock(&dsim->lock); - - switch (data_id) { - /* short packet types of packet types for command. */ - case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM: - case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM: - case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM: - case MIPI_DSI_DCS_SHORT_WRITE: - case MIPI_DSI_DCS_SHORT_WRITE_PARAM: - case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE: - exynos_mipi_dsi_wr_tx_header(dsim, data_id, data0[0], data0[1]); - if (check_rx_ack) { - /* process response func should be implemented */ - mutex_unlock(&dsim->lock); - return 0; - } else { - mutex_unlock(&dsim->lock); - return -EINVAL; - } - - /* general command */ - case MIPI_DSI_COLOR_MODE_OFF: - case MIPI_DSI_COLOR_MODE_ON: - case MIPI_DSI_SHUTDOWN_PERIPHERAL: - case MIPI_DSI_TURN_ON_PERIPHERAL: - exynos_mipi_dsi_wr_tx_header(dsim, data_id, data0[0], data0[1]); - if (check_rx_ack) { - /* process response func should be implemented. */ - mutex_unlock(&dsim->lock); - return 0; - } else { - mutex_unlock(&dsim->lock); - return -EINVAL; - } - - /* packet types for video data */ - case MIPI_DSI_V_SYNC_START: - case MIPI_DSI_V_SYNC_END: - case MIPI_DSI_H_SYNC_START: - case MIPI_DSI_H_SYNC_END: - case MIPI_DSI_END_OF_TRANSMISSION: - mutex_unlock(&dsim->lock); - return 0; - - /* long packet type and null packet */ - case MIPI_DSI_NULL_PACKET: - case MIPI_DSI_BLANKING_PACKET: - mutex_unlock(&dsim->lock); - return 0; - case MIPI_DSI_GENERIC_LONG_WRITE: - case MIPI_DSI_DCS_LONG_WRITE: - { - unsigned int size, payload = 0; - reinit_completion(&dsim_wr_comp); - - size = data_size * 4; - - /* if data count is less then 4, then send 3bytes data. */ - if (data_size < 4) { - payload = data0[0] | - data0[1] << 8 | - data0[2] << 16; - - exynos_mipi_dsi_wr_tx_data(dsim, payload); - - dev_dbg(dsim->dev, "count = %d payload = %x,%x %x %x\n", - data_size, payload, data0[0], - data0[1], data0[2]); - - /* in case that data count is more then 4 */ - } else - exynos_mipi_dsi_long_data_wr(dsim, data0, data_size); - - /* put data into header fifo */ - exynos_mipi_dsi_wr_tx_header(dsim, data_id, data_size & 0xff, - (data_size & 0xff00) >> 8); - - if (!wait_for_completion_interruptible_timeout(&dsim_wr_comp, - MIPI_FIFO_TIMEOUT)) { - dev_warn(dsim->dev, "command write timeout.\n"); - mutex_unlock(&dsim->lock); - return -EAGAIN; - } - - if (check_rx_ack) { - /* process response func should be implemented. */ - mutex_unlock(&dsim->lock); - return 0; - } else { - mutex_unlock(&dsim->lock); - return -EINVAL; - } - } - - /* packet typo for video data */ - case MIPI_DSI_PACKED_PIXEL_STREAM_16: - case MIPI_DSI_PACKED_PIXEL_STREAM_18: - case MIPI_DSI_PIXEL_STREAM_3BYTE_18: - case MIPI_DSI_PACKED_PIXEL_STREAM_24: - if (check_rx_ack) { - /* process response func should be implemented. */ - mutex_unlock(&dsim->lock); - return 0; - } else { - mutex_unlock(&dsim->lock); - return -EINVAL; - } - default: - dev_warn(dsim->dev, - "data id %x is not supported current DSI spec.\n", - data_id); - - mutex_unlock(&dsim->lock); - return -EINVAL; - } -} - -static unsigned int exynos_mipi_dsi_long_data_rd(struct mipi_dsim_device *dsim, - unsigned int req_size, unsigned int rx_data, u8 *rx_buf) -{ - unsigned int rcv_pkt, i, j; - u16 rxsize; - - /* for long packet */ - rxsize = (u16)((rx_data & 0x00ffff00) >> 8); - dev_dbg(dsim->dev, "mipi dsi rx size : %d\n", rxsize); - if (rxsize != req_size) { - dev_dbg(dsim->dev, - "received size mismatch received: %d, requested: %d\n", - rxsize, req_size); - goto err; - } - - for (i = 0; i < (rxsize >> 2); i++) { - rcv_pkt = exynos_mipi_dsi_rd_rx_fifo(dsim); - dev_dbg(dsim->dev, "received pkt : %08x\n", rcv_pkt); - for (j = 0; j < 4; j++) { - rx_buf[(i * 4) + j] = - (u8)(rcv_pkt >> (j * 8)) & 0xff; - dev_dbg(dsim->dev, "received value : %02x\n", - (rcv_pkt >> (j * 8)) & 0xff); - } - } - if (rxsize % 4) { - rcv_pkt = exynos_mipi_dsi_rd_rx_fifo(dsim); - dev_dbg(dsim->dev, "received pkt : %08x\n", rcv_pkt); - for (j = 0; j < (rxsize % 4); j++) { - rx_buf[(i * 4) + j] = - (u8)(rcv_pkt >> (j * 8)) & 0xff; - dev_dbg(dsim->dev, "received value : %02x\n", - (rcv_pkt >> (j * 8)) & 0xff); - } - } - - return rxsize; - -err: - return -EINVAL; -} - -static unsigned int exynos_mipi_dsi_response_size(unsigned int req_size) -{ - switch (req_size) { - case 1: - return MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE; - case 2: - return MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE; - default: - return MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE; - } -} - -int exynos_mipi_dsi_rd_data(struct mipi_dsim_device *dsim, unsigned int data_id, - unsigned int data0, unsigned int req_size, u8 *rx_buf) -{ - unsigned int rx_data, rcv_pkt, i; - u8 response = 0; - u16 rxsize; - - if (dsim->state == DSIM_STATE_ULPS) { - dev_err(dsim->dev, "state is ULPS.\n"); - - return -EINVAL; - } - - /* FIXME!!! */ - msleep(20); - - mutex_lock(&dsim->lock); - reinit_completion(&dsim_rd_comp); - exynos_mipi_dsi_rd_tx_header(dsim, - MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, req_size); - - response = exynos_mipi_dsi_response_size(req_size); - - switch (data_id) { - case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: - case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: - case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM: - case MIPI_DSI_DCS_READ: - exynos_mipi_dsi_rd_tx_header(dsim, - data_id, data0); - /* process response func should be implemented. */ - break; - default: - dev_warn(dsim->dev, - "data id %x is not supported current DSI spec.\n", - data_id); - - mutex_unlock(&dsim->lock); - return -EINVAL; - } - - if (!wait_for_completion_interruptible_timeout(&dsim_rd_comp, - MIPI_FIFO_TIMEOUT)) { - pr_err("RX done interrupt timeout\n"); - mutex_unlock(&dsim->lock); - return 0; - } - - msleep(20); - - rx_data = exynos_mipi_dsi_rd_rx_fifo(dsim); - - if ((u8)(rx_data & 0xff) != response) { - printk(KERN_ERR - "mipi dsi wrong response rx_data : %x, response:%x\n", - rx_data, response); - goto clear_rx_fifo; - } - - if (req_size <= 2) { - /* for short packet */ - for (i = 0; i < req_size; i++) - rx_buf[i] = (rx_data >> (8 + (i * 8))) & 0xff; - rxsize = req_size; - } else { - /* for long packet */ - rxsize = exynos_mipi_dsi_long_data_rd(dsim, req_size, rx_data, - rx_buf); - if (rxsize != req_size) - goto clear_rx_fifo; - } - - rcv_pkt = exynos_mipi_dsi_rd_rx_fifo(dsim); - - msleep(20); - - if (rcv_pkt != MIPI_RX_FIFO_READ_DONE) { - dev_info(dsim->dev, - "Can't found RX FIFO READ DONE FLAG : %x\n", rcv_pkt); - goto clear_rx_fifo; - } - - mutex_unlock(&dsim->lock); - - return rxsize; - -clear_rx_fifo: - i = 0; - while (1) { - rcv_pkt = exynos_mipi_dsi_rd_rx_fifo(dsim); - if ((rcv_pkt == MIPI_RX_FIFO_READ_DONE) - || (i > MIPI_MAX_RX_FIFO)) - break; - dev_dbg(dsim->dev, - "mipi dsi clear rx fifo : %08x\n", rcv_pkt); - i++; - } - dev_info(dsim->dev, - "mipi dsi rx done count : %d, rcv_pkt : %08x\n", i, rcv_pkt); - - mutex_unlock(&dsim->lock); - - return 0; -} - -static int exynos_mipi_dsi_pll_on(struct mipi_dsim_device *dsim, - unsigned int enable) -{ - int sw_timeout; - - if (enable) { - sw_timeout = 1000; - - exynos_mipi_dsi_enable_pll(dsim, 1); - while (1) { - sw_timeout--; - if (exynos_mipi_dsi_is_pll_stable(dsim)) - return 0; - if (sw_timeout == 0) - return -EINVAL; - } - } else - exynos_mipi_dsi_enable_pll(dsim, 0); - - return 0; -} - -static unsigned long exynos_mipi_dsi_change_pll(struct mipi_dsim_device *dsim, - unsigned int pre_divider, unsigned int main_divider, - unsigned int scaler) -{ - unsigned long dfin_pll, dfvco, dpll_out; - unsigned int i, freq_band = 0xf; - - dfin_pll = (FIN_HZ / pre_divider); - - /****************************************************** - * Serial Clock(=ByteClk X 8) FreqBand[3:0] * - ****************************************************** - * ~ 99.99 MHz 0000 - * 100 ~ 119.99 MHz 0001 - * 120 ~ 159.99 MHz 0010 - * 160 ~ 199.99 MHz 0011 - * 200 ~ 239.99 MHz 0100 - * 140 ~ 319.99 MHz 0101 - * 320 ~ 389.99 MHz 0110 - * 390 ~ 449.99 MHz 0111 - * 450 ~ 509.99 MHz 1000 - * 510 ~ 559.99 MHz 1001 - * 560 ~ 639.99 MHz 1010 - * 640 ~ 689.99 MHz 1011 - * 690 ~ 769.99 MHz 1100 - * 770 ~ 869.99 MHz 1101 - * 870 ~ 949.99 MHz 1110 - * 950 ~ 1000 MHz 1111 - ******************************************************/ - if (dfin_pll < DFIN_PLL_MIN_HZ || dfin_pll > DFIN_PLL_MAX_HZ) { - dev_warn(dsim->dev, "fin_pll range should be 6MHz ~ 12MHz\n"); - exynos_mipi_dsi_enable_afc(dsim, 0, 0); - } else { - if (dfin_pll < 7 * MHZ) - exynos_mipi_dsi_enable_afc(dsim, 1, 0x1); - else if (dfin_pll < 8 * MHZ) - exynos_mipi_dsi_enable_afc(dsim, 1, 0x0); - else if (dfin_pll < 9 * MHZ) - exynos_mipi_dsi_enable_afc(dsim, 1, 0x3); - else if (dfin_pll < 10 * MHZ) - exynos_mipi_dsi_enable_afc(dsim, 1, 0x2); - else if (dfin_pll < 11 * MHZ) - exynos_mipi_dsi_enable_afc(dsim, 1, 0x5); - else - exynos_mipi_dsi_enable_afc(dsim, 1, 0x4); - } - - dfvco = dfin_pll * main_divider; - dev_dbg(dsim->dev, "dfvco = %lu, dfin_pll = %lu, main_divider = %d\n", - dfvco, dfin_pll, main_divider); - if (dfvco < DFVCO_MIN_HZ || dfvco > DFVCO_MAX_HZ) - dev_warn(dsim->dev, "fvco range should be 500MHz ~ 1000MHz\n"); - - dpll_out = dfvco / (1 << scaler); - dev_dbg(dsim->dev, "dpll_out = %lu, dfvco = %lu, scaler = %d\n", - dpll_out, dfvco, scaler); - - for (i = 0; i < ARRAY_SIZE(dpll_table); i++) { - if (dpll_out < dpll_table[i] * MHZ) { - freq_band = i; - break; - } - } - - dev_dbg(dsim->dev, "freq_band = %d\n", freq_band); - - exynos_mipi_dsi_pll_freq(dsim, pre_divider, main_divider, scaler); - - exynos_mipi_dsi_hs_zero_ctrl(dsim, 0); - exynos_mipi_dsi_prep_ctrl(dsim, 0); - - /* Freq Band */ - exynos_mipi_dsi_pll_freq_band(dsim, freq_band); - - /* Stable time */ - exynos_mipi_dsi_pll_stable_time(dsim, dsim->dsim_config->pll_stable_time); - - /* Enable PLL */ - dev_dbg(dsim->dev, "FOUT of mipi dphy pll is %luMHz\n", - (dpll_out / MHZ)); - - return dpll_out; -} - -static int exynos_mipi_dsi_set_clock(struct mipi_dsim_device *dsim, - unsigned int byte_clk_sel, unsigned int enable) -{ - unsigned int esc_div; - unsigned long esc_clk_error_rate; - unsigned long hs_clk = 0, byte_clk = 0, escape_clk = 0; - - if (enable) { - dsim->e_clk_src = byte_clk_sel; - - /* Escape mode clock and byte clock source */ - exynos_mipi_dsi_set_byte_clock_src(dsim, byte_clk_sel); - - /* DPHY, DSIM Link : D-PHY clock out */ - if (byte_clk_sel == DSIM_PLL_OUT_DIV8) { - hs_clk = exynos_mipi_dsi_change_pll(dsim, - dsim->dsim_config->p, dsim->dsim_config->m, - dsim->dsim_config->s); - if (hs_clk == 0) { - dev_err(dsim->dev, - "failed to get hs clock.\n"); - return -EINVAL; - } - - byte_clk = hs_clk / 8; - exynos_mipi_dsi_enable_pll_bypass(dsim, 0); - exynos_mipi_dsi_pll_on(dsim, 1); - /* DPHY : D-PHY clock out, DSIM link : external clock out */ - } else if (byte_clk_sel == DSIM_EXT_CLK_DIV8) { - dev_warn(dsim->dev, "this project is not support\n"); - dev_warn(dsim->dev, - "external clock source for MIPI DSIM.\n"); - } else if (byte_clk_sel == DSIM_EXT_CLK_BYPASS) { - dev_warn(dsim->dev, "this project is not support\n"); - dev_warn(dsim->dev, - "external clock source for MIPI DSIM\n"); - } - - /* escape clock divider */ - esc_div = byte_clk / (dsim->dsim_config->esc_clk); - dev_dbg(dsim->dev, - "esc_div = %d, byte_clk = %lu, esc_clk = %lu\n", - esc_div, byte_clk, dsim->dsim_config->esc_clk); - if ((byte_clk / esc_div) >= (20 * MHZ) || - (byte_clk / esc_div) > - dsim->dsim_config->esc_clk) - esc_div += 1; - - escape_clk = byte_clk / esc_div; - dev_dbg(dsim->dev, - "escape_clk = %lu, byte_clk = %lu, esc_div = %d\n", - escape_clk, byte_clk, esc_div); - - /* enable escape clock. */ - exynos_mipi_dsi_enable_byte_clock(dsim, 1); - - /* enable byte clk and escape clock */ - exynos_mipi_dsi_set_esc_clk_prs(dsim, 1, esc_div); - /* escape clock on lane */ - exynos_mipi_dsi_enable_esc_clk_on_lane(dsim, - (DSIM_LANE_CLOCK | dsim->data_lane), 1); - - dev_dbg(dsim->dev, "byte clock is %luMHz\n", - (byte_clk / MHZ)); - dev_dbg(dsim->dev, "escape clock that user's need is %lu\n", - (dsim->dsim_config->esc_clk / MHZ)); - dev_dbg(dsim->dev, "escape clock divider is %x\n", esc_div); - dev_dbg(dsim->dev, "escape clock is %luMHz\n", - ((byte_clk / esc_div) / MHZ)); - - if ((byte_clk / esc_div) > escape_clk) { - esc_clk_error_rate = escape_clk / - (byte_clk / esc_div); - dev_warn(dsim->dev, "error rate is %lu over.\n", - (esc_clk_error_rate / 100)); - } else if ((byte_clk / esc_div) < (escape_clk)) { - esc_clk_error_rate = (byte_clk / esc_div) / - escape_clk; - dev_warn(dsim->dev, "error rate is %lu under.\n", - (esc_clk_error_rate / 100)); - } - } else { - exynos_mipi_dsi_enable_esc_clk_on_lane(dsim, - (DSIM_LANE_CLOCK | dsim->data_lane), 0); - exynos_mipi_dsi_set_esc_clk_prs(dsim, 0, 0); - - /* disable escape clock. */ - exynos_mipi_dsi_enable_byte_clock(dsim, 0); - - if (byte_clk_sel == DSIM_PLL_OUT_DIV8) - exynos_mipi_dsi_pll_on(dsim, 0); - } - - return 0; -} - -int exynos_mipi_dsi_init_dsim(struct mipi_dsim_device *dsim) -{ - dsim->state = DSIM_STATE_INIT; - - switch (dsim->dsim_config->e_no_data_lane) { - case DSIM_DATA_LANE_1: - dsim->data_lane = DSIM_LANE_DATA0; - break; - case DSIM_DATA_LANE_2: - dsim->data_lane = DSIM_LANE_DATA0 | DSIM_LANE_DATA1; - break; - case DSIM_DATA_LANE_3: - dsim->data_lane = DSIM_LANE_DATA0 | DSIM_LANE_DATA1 | - DSIM_LANE_DATA2; - break; - case DSIM_DATA_LANE_4: - dsim->data_lane = DSIM_LANE_DATA0 | DSIM_LANE_DATA1 | - DSIM_LANE_DATA2 | DSIM_LANE_DATA3; - break; - default: - dev_info(dsim->dev, "data lane is invalid.\n"); - return -EINVAL; - } - - exynos_mipi_dsi_sw_reset(dsim); - exynos_mipi_dsi_func_reset(dsim); - - exynos_mipi_dsi_dp_dn_swap(dsim, 0); - - return 0; -} - -void exynos_mipi_dsi_init_interrupt(struct mipi_dsim_device *dsim) -{ - unsigned int src = 0; - - src = (INTSRC_SFR_FIFO_EMPTY | INTSRC_RX_DATA_DONE); - exynos_mipi_dsi_set_interrupt(dsim, src, 1); - - src = 0; - src = ~(INTMSK_RX_DONE | INTMSK_FIFO_EMPTY); - exynos_mipi_dsi_set_interrupt_mask(dsim, src, 1); -} - -int exynos_mipi_dsi_enable_frame_done_int(struct mipi_dsim_device *dsim, - unsigned int enable) -{ - /* enable only frame done interrupt */ - exynos_mipi_dsi_set_interrupt_mask(dsim, INTMSK_FRAME_DONE, enable); - - return 0; -} - -void exynos_mipi_dsi_stand_by(struct mipi_dsim_device *dsim, - unsigned int enable) -{ - - /* consider Main display and Sub display. */ - - exynos_mipi_dsi_set_main_stand_by(dsim, enable); -} - -int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim, - struct mipi_dsim_config *dsim_config) -{ - struct mipi_dsim_platform_data *dsim_pd; - struct fb_videomode *timing; - - dsim_pd = (struct mipi_dsim_platform_data *)dsim->pd; - timing = (struct fb_videomode *)dsim_pd->lcd_panel_info; - - /* in case of VIDEO MODE (RGB INTERFACE), it sets polarities. */ - if (dsim_config->e_interface == (u32) DSIM_VIDEO) { - if (dsim_config->auto_vertical_cnt == 0) { - exynos_mipi_dsi_set_main_disp_vporch(dsim, - dsim_config->cmd_allow, - timing->lower_margin, - timing->upper_margin); - exynos_mipi_dsi_set_main_disp_hporch(dsim, - timing->right_margin, - timing->left_margin); - exynos_mipi_dsi_set_main_disp_sync_area(dsim, - timing->vsync_len, - timing->hsync_len); - } - } - - exynos_mipi_dsi_set_main_disp_resol(dsim, timing->xres, - timing->yres); - - exynos_mipi_dsi_display_config(dsim, dsim_config); - - dev_info(dsim->dev, "lcd panel ==> width = %d, height = %d\n", - timing->xres, timing->yres); - - return 0; -} - -int exynos_mipi_dsi_init_link(struct mipi_dsim_device *dsim) -{ - unsigned int time_out = 100; - - switch (dsim->state) { - case DSIM_STATE_INIT: - exynos_mipi_dsi_init_fifo_pointer(dsim, 0x1f); - - /* dsi configuration */ - exynos_mipi_dsi_init_config(dsim); - exynos_mipi_dsi_enable_lane(dsim, DSIM_LANE_CLOCK, 1); - exynos_mipi_dsi_enable_lane(dsim, dsim->data_lane, 1); - - /* set clock configuration */ - exynos_mipi_dsi_set_clock(dsim, dsim->dsim_config->e_byte_clk, 1); - - /* check clock and data lane state are stop state */ - while (!(exynos_mipi_dsi_is_lane_state(dsim))) { - time_out--; - if (time_out == 0) { - dev_err(dsim->dev, - "DSI Master is not stop state.\n"); - dev_err(dsim->dev, - "Check initialization process\n"); - - return -EINVAL; - } - } - if (time_out != 0) { - dev_info(dsim->dev, - "DSI Master driver has been completed.\n"); - dev_info(dsim->dev, "DSI Master state is stop state\n"); - } - - dsim->state = DSIM_STATE_STOP; - - /* BTA sequence counters */ - exynos_mipi_dsi_set_stop_state_counter(dsim, - dsim->dsim_config->stop_holding_cnt); - exynos_mipi_dsi_set_bta_timeout(dsim, - dsim->dsim_config->bta_timeout); - exynos_mipi_dsi_set_lpdr_timeout(dsim, - dsim->dsim_config->rx_timeout); - - return 0; - default: - dev_info(dsim->dev, "DSI Master is already init.\n"); - return 0; - } - - return 0; -} - -int exynos_mipi_dsi_set_hs_enable(struct mipi_dsim_device *dsim) -{ - if (dsim->state != DSIM_STATE_STOP) { - dev_warn(dsim->dev, "DSIM is not in stop state.\n"); - return 0; - } - - if (dsim->e_clk_src == DSIM_EXT_CLK_BYPASS) { - dev_warn(dsim->dev, "clock source is external bypass.\n"); - return 0; - } - - dsim->state = DSIM_STATE_HSCLKEN; - - /* set LCDC and CPU transfer mode to HS. */ - exynos_mipi_dsi_set_lcdc_transfer_mode(dsim, 0); - exynos_mipi_dsi_set_cpu_transfer_mode(dsim, 0); - exynos_mipi_dsi_enable_hs_clock(dsim, 1); - - return 0; -} - -int exynos_mipi_dsi_set_data_transfer_mode(struct mipi_dsim_device *dsim, - unsigned int mode) -{ - if (mode) { - if (dsim->state != DSIM_STATE_HSCLKEN) { - dev_err(dsim->dev, "HS Clock lane is not enabled.\n"); - return -EINVAL; - } - - exynos_mipi_dsi_set_lcdc_transfer_mode(dsim, 0); - } else { - if (dsim->state == DSIM_STATE_INIT || dsim->state == - DSIM_STATE_ULPS) { - dev_err(dsim->dev, - "DSI Master is not STOP or HSDT state.\n"); - return -EINVAL; - } - - exynos_mipi_dsi_set_cpu_transfer_mode(dsim, 0); - } - - return 0; -} - -int exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim) -{ - return _exynos_mipi_dsi_get_frame_done_status(dsim); -} - -int exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim) -{ - _exynos_mipi_dsi_clear_frame_done(dsim); - - return 0; -} - -int exynos_mipi_dsi_fifo_clear(struct mipi_dsim_device *dsim, - unsigned int val) -{ - int try = TRY_FIFO_CLEAR; - - exynos_mipi_dsi_sw_reset_release(dsim); - exynos_mipi_dsi_func_reset(dsim); - - do { - if (exynos_mipi_dsi_get_sw_reset_release(dsim)) { - exynos_mipi_dsi_init_interrupt(dsim); - dev_dbg(dsim->dev, "reset release done.\n"); - return 0; - } - } while (--try); - - dev_err(dsim->dev, "failed to clear dsim fifo.\n"); - return -EAGAIN; -} - -MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>"); -MODULE_DESCRIPTION("Samsung SoC MIPI-DSI common driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h deleted file mode 100644 index 412552274df3..000000000000 --- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h +++ /dev/null @@ -1,46 +0,0 @@ -/* linux/drivers/video/exynos_mipi_dsi_common.h - * - * Header file for Samsung SoC MIPI-DSI common driver. - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd - * - * InKi Dae <inki.dae@samsung.com> - * Donghwa Lee <dh09.lee@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#ifndef _EXYNOS_MIPI_DSI_COMMON_H -#define _EXYNOS_MIPI_DSI_COMMON_H - -static DECLARE_COMPLETION(dsim_rd_comp); -static DECLARE_COMPLETION(dsim_wr_comp); - -int exynos_mipi_dsi_wr_data(struct mipi_dsim_device *dsim, unsigned int data_id, - const unsigned char *data0, unsigned int data_size); -int exynos_mipi_dsi_rd_data(struct mipi_dsim_device *dsim, unsigned int data_id, - unsigned int data0, unsigned int req_size, u8 *rx_buf); -irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id); -void exynos_mipi_dsi_init_interrupt(struct mipi_dsim_device *dsim); -int exynos_mipi_dsi_init_dsim(struct mipi_dsim_device *dsim); -void exynos_mipi_dsi_stand_by(struct mipi_dsim_device *dsim, - unsigned int enable); -int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim, - struct mipi_dsim_config *dsim_info); -int exynos_mipi_dsi_init_link(struct mipi_dsim_device *dsim); -int exynos_mipi_dsi_set_hs_enable(struct mipi_dsim_device *dsim); -int exynos_mipi_dsi_set_data_transfer_mode(struct mipi_dsim_device *dsim, - unsigned int mode); -int exynos_mipi_dsi_enable_frame_done_int(struct mipi_dsim_device *dsim, - unsigned int enable); -int exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim); -int exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim); - -extern struct fb_info *registered_fb[FB_MAX] __read_mostly; - -int exynos_mipi_dsi_fifo_clear(struct mipi_dsim_device *dsim, - unsigned int val); - -#endif /* _EXYNOS_MIPI_DSI_COMMON_H */ diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c deleted file mode 100644 index c148d06540c1..000000000000 --- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c +++ /dev/null @@ -1,618 +0,0 @@ -/* linux/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c - * - * Samsung SoC MIPI-DSI lowlevel driver. - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd - * - * InKi Dae, <inki.dae@samsung.com> - * Donghwa Lee, <dh09.lee@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/mutex.h> -#include <linux/wait.h> -#include <linux/delay.h> -#include <linux/fs.h> -#include <linux/mm.h> -#include <linux/ctype.h> -#include <linux/platform_device.h> -#include <linux/io.h> - -#include <video/exynos_mipi_dsim.h> - -#include "exynos_mipi_dsi_regs.h" -#include "exynos_mipi_dsi_lowlevel.h" - -void exynos_mipi_dsi_func_reset(struct mipi_dsim_device *dsim) -{ - unsigned int reg; - - reg = readl(dsim->reg_base + EXYNOS_DSIM_SWRST); - - reg |= DSIM_FUNCRST; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_SWRST); -} - -void exynos_mipi_dsi_sw_reset(struct mipi_dsim_device *dsim) -{ - unsigned int reg; - - reg = readl(dsim->reg_base + EXYNOS_DSIM_SWRST); - - reg |= DSIM_SWRST; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_SWRST); -} - -void exynos_mipi_dsi_sw_reset_release(struct mipi_dsim_device *dsim) -{ - unsigned int reg; - - reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC); - - reg |= INTSRC_SW_RST_RELEASE; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_INTSRC); -} - -int exynos_mipi_dsi_get_sw_reset_release(struct mipi_dsim_device *dsim) -{ - return (readl(dsim->reg_base + EXYNOS_DSIM_INTSRC)) & - INTSRC_SW_RST_RELEASE; -} - -unsigned int exynos_mipi_dsi_read_interrupt_mask(struct mipi_dsim_device *dsim) -{ - unsigned int reg; - - reg = readl(dsim->reg_base + EXYNOS_DSIM_INTMSK); - - return reg; -} - -void exynos_mipi_dsi_set_interrupt_mask(struct mipi_dsim_device *dsim, - unsigned int mode, unsigned int mask) -{ - unsigned int reg = 0; - - if (mask) - reg |= mode; - else - reg &= ~mode; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_INTMSK); -} - -void exynos_mipi_dsi_init_fifo_pointer(struct mipi_dsim_device *dsim, - unsigned int cfg) -{ - unsigned int reg; - - reg = readl(dsim->reg_base + EXYNOS_DSIM_FIFOCTRL); - - writel(reg & ~(cfg), dsim->reg_base + EXYNOS_DSIM_FIFOCTRL); - mdelay(10); - reg |= cfg; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_FIFOCTRL); -} - -/* - * this function set PLL P, M and S value in D-PHY - */ -void exynos_mipi_dsi_set_phy_tunning(struct mipi_dsim_device *dsim, - unsigned int value) -{ - writel(DSIM_AFC_CTL(value), dsim->reg_base + EXYNOS_DSIM_PHYACCHR); -} - -void exynos_mipi_dsi_set_main_stand_by(struct mipi_dsim_device *dsim, - unsigned int enable) -{ - unsigned int reg; - - reg = readl(dsim->reg_base + EXYNOS_DSIM_MDRESOL); - - reg &= ~DSIM_MAIN_STAND_BY; - - if (enable) - reg |= DSIM_MAIN_STAND_BY; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_MDRESOL); -} - -void exynos_mipi_dsi_set_main_disp_resol(struct mipi_dsim_device *dsim, - unsigned int width_resol, unsigned int height_resol) -{ - unsigned int reg; - - /* standby should be set after configuration so set to not ready*/ - reg = (readl(dsim->reg_base + EXYNOS_DSIM_MDRESOL)) & - ~(DSIM_MAIN_STAND_BY); - writel(reg, dsim->reg_base + EXYNOS_DSIM_MDRESOL); - - reg &= ~((0x7ff << 16) | (0x7ff << 0)); - reg |= DSIM_MAIN_VRESOL(height_resol) | DSIM_MAIN_HRESOL(width_resol); - - reg |= DSIM_MAIN_STAND_BY; - writel(reg, dsim->reg_base + EXYNOS_DSIM_MDRESOL); -} - -void exynos_mipi_dsi_set_main_disp_vporch(struct mipi_dsim_device *dsim, - unsigned int cmd_allow, unsigned int vfront, unsigned int vback) -{ - unsigned int reg; - - reg = (readl(dsim->reg_base + EXYNOS_DSIM_MVPORCH)) & - ~((DSIM_CMD_ALLOW_MASK) | (DSIM_STABLE_VFP_MASK) | - (DSIM_MAIN_VBP_MASK)); - - reg |= (DSIM_CMD_ALLOW_SHIFT(cmd_allow & 0xf) | - DSIM_STABLE_VFP_SHIFT(vfront & 0x7ff) | - DSIM_MAIN_VBP_SHIFT(vback & 0x7ff)); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_MVPORCH); -} - -void exynos_mipi_dsi_set_main_disp_hporch(struct mipi_dsim_device *dsim, - unsigned int front, unsigned int back) -{ - unsigned int reg; - - reg = (readl(dsim->reg_base + EXYNOS_DSIM_MHPORCH)) & - ~((DSIM_MAIN_HFP_MASK) | (DSIM_MAIN_HBP_MASK)); - - reg |= DSIM_MAIN_HFP_SHIFT(front) | DSIM_MAIN_HBP_SHIFT(back); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_MHPORCH); -} - -void exynos_mipi_dsi_set_main_disp_sync_area(struct mipi_dsim_device *dsim, - unsigned int vert, unsigned int hori) -{ - unsigned int reg; - - reg = (readl(dsim->reg_base + EXYNOS_DSIM_MSYNC)) & - ~((DSIM_MAIN_VSA_MASK) | (DSIM_MAIN_HSA_MASK)); - - reg |= (DSIM_MAIN_VSA_SHIFT(vert & 0x3ff) | - DSIM_MAIN_HSA_SHIFT(hori)); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_MSYNC); -} - -void exynos_mipi_dsi_set_sub_disp_resol(struct mipi_dsim_device *dsim, - unsigned int vert, unsigned int hori) -{ - unsigned int reg; - - reg = (readl(dsim->reg_base + EXYNOS_DSIM_SDRESOL)) & - ~(DSIM_SUB_STANDY_MASK); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_SDRESOL); - - reg &= ~(DSIM_SUB_VRESOL_MASK) | ~(DSIM_SUB_HRESOL_MASK); - reg |= (DSIM_SUB_VRESOL_SHIFT(vert & 0x7ff) | - DSIM_SUB_HRESOL_SHIFT(hori & 0x7ff)); - writel(reg, dsim->reg_base + EXYNOS_DSIM_SDRESOL); - - reg |= DSIM_SUB_STANDY_SHIFT(1); - writel(reg, dsim->reg_base + EXYNOS_DSIM_SDRESOL); -} - -void exynos_mipi_dsi_init_config(struct mipi_dsim_device *dsim) -{ - struct mipi_dsim_config *dsim_config = dsim->dsim_config; - - unsigned int cfg = (readl(dsim->reg_base + EXYNOS_DSIM_CONFIG)) & - ~((1 << 28) | (0x1f << 20) | (0x3 << 5)); - - cfg = ((DSIM_AUTO_FLUSH(dsim_config->auto_flush)) | - (DSIM_EOT_DISABLE(dsim_config->eot_disable)) | - (DSIM_AUTO_MODE_SHIFT(dsim_config->auto_vertical_cnt)) | - (DSIM_HSE_MODE_SHIFT(dsim_config->hse)) | - (DSIM_HFP_MODE_SHIFT(dsim_config->hfp)) | - (DSIM_HBP_MODE_SHIFT(dsim_config->hbp)) | - (DSIM_HSA_MODE_SHIFT(dsim_config->hsa)) | - (DSIM_NUM_OF_DATALANE_SHIFT(dsim_config->e_no_data_lane))); - - writel(cfg, dsim->reg_base + EXYNOS_DSIM_CONFIG); -} - -void exynos_mipi_dsi_display_config(struct mipi_dsim_device *dsim, - struct mipi_dsim_config *dsim_config) -{ - u32 reg = (readl(dsim->reg_base + EXYNOS_DSIM_CONFIG)) & - ~((0x3 << 26) | (1 << 25) | (0x3 << 18) | (0x7 << 12) | - (0x3 << 16) | (0x7 << 8)); - - if (dsim_config->e_interface == DSIM_VIDEO) - reg |= (1 << 25); - else if (dsim_config->e_interface == DSIM_COMMAND) - reg &= ~(1 << 25); - else { - dev_err(dsim->dev, "unknown lcd type.\n"); - return; - } - - /* main lcd */ - reg |= ((u8) (dsim_config->e_burst_mode) & 0x3) << 26 | - ((u8) (dsim_config->e_virtual_ch) & 0x3) << 18 | - ((u8) (dsim_config->e_pixel_format) & 0x7) << 12; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_CONFIG); -} - -void exynos_mipi_dsi_enable_lane(struct mipi_dsim_device *dsim, unsigned int lane, - unsigned int enable) -{ - unsigned int reg; - - reg = readl(dsim->reg_base + EXYNOS_DSIM_CONFIG); - - if (enable) - reg |= DSIM_LANE_ENx(lane); - else - reg &= ~DSIM_LANE_ENx(lane); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_CONFIG); -} - - -void exynos_mipi_dsi_set_data_lane_number(struct mipi_dsim_device *dsim, - unsigned int count) -{ - unsigned int cfg; - - /* get the data lane number. */ - cfg = DSIM_NUM_OF_DATALANE_SHIFT(count); - - writel(cfg, dsim->reg_base + EXYNOS_DSIM_CONFIG); -} - -void exynos_mipi_dsi_enable_afc(struct mipi_dsim_device *dsim, unsigned int enable, - unsigned int afc_code) -{ - unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_PHYACCHR); - - if (enable) { - reg |= (1 << 14); - reg &= ~(0x7 << 5); - reg |= (afc_code & 0x7) << 5; - } else - reg &= ~(1 << 14); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_PHYACCHR); -} - -void exynos_mipi_dsi_enable_pll_bypass(struct mipi_dsim_device *dsim, - unsigned int enable) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) & - ~(DSIM_PLL_BYPASS_SHIFT(0x1)); - - reg |= DSIM_PLL_BYPASS_SHIFT(enable); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL); -} - -void exynos_mipi_dsi_set_pll_pms(struct mipi_dsim_device *dsim, unsigned int p, - unsigned int m, unsigned int s) -{ - unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL); - - reg |= ((p & 0x3f) << 13) | ((m & 0x1ff) << 4) | ((s & 0x7) << 1); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL); -} - -void exynos_mipi_dsi_pll_freq_band(struct mipi_dsim_device *dsim, - unsigned int freq_band) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) & - ~(DSIM_FREQ_BAND_SHIFT(0x1f)); - - reg |= DSIM_FREQ_BAND_SHIFT(freq_band & 0x1f); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL); -} - -void exynos_mipi_dsi_pll_freq(struct mipi_dsim_device *dsim, - unsigned int pre_divider, unsigned int main_divider, - unsigned int scaler) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) & - ~(0x7ffff << 1); - - reg |= (pre_divider & 0x3f) << 13 | (main_divider & 0x1ff) << 4 | - (scaler & 0x7) << 1; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL); -} - -void exynos_mipi_dsi_pll_stable_time(struct mipi_dsim_device *dsim, - unsigned int lock_time) -{ - writel(lock_time, dsim->reg_base + EXYNOS_DSIM_PLLTMR); -} - -void exynos_mipi_dsi_enable_pll(struct mipi_dsim_device *dsim, unsigned int enable) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) & - ~(DSIM_PLL_EN_SHIFT(0x1)); - - reg |= DSIM_PLL_EN_SHIFT(enable & 0x1); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL); -} - -void exynos_mipi_dsi_set_byte_clock_src(struct mipi_dsim_device *dsim, - unsigned int src) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) & - ~(DSIM_BYTE_CLK_SRC_SHIFT(0x3)); - - reg |= (DSIM_BYTE_CLK_SRC_SHIFT(src)); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL); -} - -void exynos_mipi_dsi_enable_byte_clock(struct mipi_dsim_device *dsim, - unsigned int enable) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) & - ~(DSIM_BYTE_CLKEN_SHIFT(0x1)); - - reg |= DSIM_BYTE_CLKEN_SHIFT(enable); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL); -} - -void exynos_mipi_dsi_set_esc_clk_prs(struct mipi_dsim_device *dsim, - unsigned int enable, unsigned int prs_val) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) & - ~(DSIM_ESC_CLKEN_SHIFT(0x1) | 0xffff); - - reg |= DSIM_ESC_CLKEN_SHIFT(enable); - if (enable) - reg |= prs_val; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL); -} - -void exynos_mipi_dsi_enable_esc_clk_on_lane(struct mipi_dsim_device *dsim, - unsigned int lane_sel, unsigned int enable) -{ - unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL); - - if (enable) - reg |= DSIM_LANE_ESC_CLKEN(lane_sel); - else - - reg &= ~DSIM_LANE_ESC_CLKEN(lane_sel); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL); -} - -void exynos_mipi_dsi_force_dphy_stop_state(struct mipi_dsim_device *dsim, - unsigned int enable) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE)) & - ~(DSIM_FORCE_STOP_STATE_SHIFT(0x1)); - - reg |= (DSIM_FORCE_STOP_STATE_SHIFT(enable & 0x1)); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE); -} - -unsigned int exynos_mipi_dsi_is_lane_state(struct mipi_dsim_device *dsim) -{ - unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_STATUS); - - /** - * check clock and data lane states. - * if MIPI-DSI controller was enabled at bootloader then - * TX_READY_HS_CLK is enabled otherwise STOP_STATE_CLK. - * so it should be checked for two case. - */ - if ((reg & DSIM_STOP_STATE_DAT(0xf)) && - ((reg & DSIM_STOP_STATE_CLK) || - (reg & DSIM_TX_READY_HS_CLK))) - return 1; - - return 0; -} - -void exynos_mipi_dsi_set_stop_state_counter(struct mipi_dsim_device *dsim, - unsigned int cnt_val) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE)) & - ~(DSIM_STOP_STATE_CNT_SHIFT(0x7ff)); - - reg |= (DSIM_STOP_STATE_CNT_SHIFT(cnt_val & 0x7ff)); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE); -} - -void exynos_mipi_dsi_set_bta_timeout(struct mipi_dsim_device *dsim, - unsigned int timeout) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_TIMEOUT)) & - ~(DSIM_BTA_TOUT_SHIFT(0xff)); - - reg |= (DSIM_BTA_TOUT_SHIFT(timeout)); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_TIMEOUT); -} - -void exynos_mipi_dsi_set_lpdr_timeout(struct mipi_dsim_device *dsim, - unsigned int timeout) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_TIMEOUT)) & - ~(DSIM_LPDR_TOUT_SHIFT(0xffff)); - - reg |= (DSIM_LPDR_TOUT_SHIFT(timeout)); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_TIMEOUT); -} - -void exynos_mipi_dsi_set_cpu_transfer_mode(struct mipi_dsim_device *dsim, - unsigned int lp) -{ - unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE); - - reg &= ~DSIM_CMD_LPDT_LP; - - if (lp) - reg |= DSIM_CMD_LPDT_LP; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE); -} - -void exynos_mipi_dsi_set_lcdc_transfer_mode(struct mipi_dsim_device *dsim, - unsigned int lp) -{ - unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE); - - reg &= ~DSIM_TX_LPDT_LP; - - if (lp) - reg |= DSIM_TX_LPDT_LP; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE); -} - -void exynos_mipi_dsi_enable_hs_clock(struct mipi_dsim_device *dsim, - unsigned int enable) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) & - ~(DSIM_TX_REQUEST_HSCLK_SHIFT(0x1)); - - reg |= DSIM_TX_REQUEST_HSCLK_SHIFT(enable); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL); -} - -void exynos_mipi_dsi_dp_dn_swap(struct mipi_dsim_device *dsim, - unsigned int swap_en) -{ - unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_PHYACCHR1); - - reg &= ~(0x3 << 0); - reg |= (swap_en & 0x3) << 0; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_PHYACCHR1); -} - -void exynos_mipi_dsi_hs_zero_ctrl(struct mipi_dsim_device *dsim, - unsigned int hs_zero) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) & - ~(0xf << 28); - - reg |= ((hs_zero & 0xf) << 28); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL); -} - -void exynos_mipi_dsi_prep_ctrl(struct mipi_dsim_device *dsim, unsigned int prep) -{ - unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) & - ~(0x7 << 20); - - reg |= ((prep & 0x7) << 20); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL); -} - -unsigned int exynos_mipi_dsi_read_interrupt(struct mipi_dsim_device *dsim) -{ - return readl(dsim->reg_base + EXYNOS_DSIM_INTSRC); -} - -void exynos_mipi_dsi_clear_interrupt(struct mipi_dsim_device *dsim, - unsigned int src) -{ - unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC); - - reg |= src; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_INTSRC); -} - -void exynos_mipi_dsi_set_interrupt(struct mipi_dsim_device *dsim, - unsigned int src, unsigned int enable) -{ - unsigned int reg = 0; - - if (enable) - reg |= src; - else - reg &= ~src; - - writel(reg, dsim->reg_base + EXYNOS_DSIM_INTSRC); -} - -unsigned int exynos_mipi_dsi_is_pll_stable(struct mipi_dsim_device *dsim) -{ - unsigned int reg; - - reg = readl(dsim->reg_base + EXYNOS_DSIM_STATUS); - - return reg & (1 << 31) ? 1 : 0; -} - -unsigned int exynos_mipi_dsi_get_fifo_state(struct mipi_dsim_device *dsim) -{ - return readl(dsim->reg_base + EXYNOS_DSIM_FIFOCTRL) & ~(0x1f); -} - -void exynos_mipi_dsi_wr_tx_header(struct mipi_dsim_device *dsim, - unsigned int di, unsigned int data0, unsigned int data1) -{ - unsigned int reg = (data1 << 16) | (data0 << 8) | ((di & 0x3f) << 0); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_PKTHDR); -} - -void exynos_mipi_dsi_rd_tx_header(struct mipi_dsim_device *dsim, - unsigned int di, unsigned int data0) -{ - unsigned int reg = (data0 << 8) | (di << 0); - - writel(reg, dsim->reg_base + EXYNOS_DSIM_PKTHDR); -} - -unsigned int exynos_mipi_dsi_rd_rx_fifo(struct mipi_dsim_device *dsim) -{ - return readl(dsim->reg_base + EXYNOS_DSIM_RXFIFO); -} - -unsigned int _exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim) -{ - unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC); - - return (reg & INTSRC_FRAME_DONE) ? 1 : 0; -} - -void _exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim) -{ - unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC); - - writel(reg | INTSRC_FRAME_DONE, dsim->reg_base + - EXYNOS_DSIM_INTSRC); -} - -void exynos_mipi_dsi_wr_tx_data(struct mipi_dsim_device *dsim, - unsigned int tx_data) -{ - writel(tx_data, dsim->reg_base + EXYNOS_DSIM_PAYLOAD); -} diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h b/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h deleted file mode 100644 index 85460701c7ea..000000000000 --- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h +++ /dev/null @@ -1,112 +0,0 @@ -/* linux/drivers/video/exynos/exynos_mipi_dsi_lowlevel.h - * - * Header file for Samsung SoC MIPI-DSI lowlevel driver. - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd - * - * InKi Dae <inki.dae@samsung.com> - * Donghwa Lee <dh09.lee@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#ifndef _EXYNOS_MIPI_DSI_LOWLEVEL_H -#define _EXYNOS_MIPI_DSI_LOWLEVEL_H - -void exynos_mipi_dsi_func_reset(struct mipi_dsim_device *dsim); -void exynos_mipi_dsi_sw_reset(struct mipi_dsim_device *dsim); -void exynos_mipi_dsi_sw_reset_release(struct mipi_dsim_device *dsim); -int exynos_mipi_dsi_get_sw_reset_release(struct mipi_dsim_device *dsim); -void exynos_mipi_dsi_set_interrupt_mask(struct mipi_dsim_device *dsim, - unsigned int mode, unsigned int mask); -void exynos_mipi_dsi_set_data_lane_number(struct mipi_dsim_device *dsim, - unsigned int count); -void exynos_mipi_dsi_init_fifo_pointer(struct mipi_dsim_device *dsim, - unsigned int cfg); -void exynos_mipi_dsi_set_phy_tunning(struct mipi_dsim_device *dsim, - unsigned int value); -void exynos_mipi_dsi_set_phy_tunning(struct mipi_dsim_device *dsim, - unsigned int value); -void exynos_mipi_dsi_set_main_stand_by(struct mipi_dsim_device *dsim, - unsigned int enable); -void exynos_mipi_dsi_set_main_disp_resol(struct mipi_dsim_device *dsim, - unsigned int width_resol, unsigned int height_resol); -void exynos_mipi_dsi_set_main_disp_vporch(struct mipi_dsim_device *dsim, - unsigned int cmd_allow, unsigned int vfront, unsigned int vback); -void exynos_mipi_dsi_set_main_disp_hporch(struct mipi_dsim_device *dsim, - unsigned int front, unsigned int back); -void exynos_mipi_dsi_set_main_disp_sync_area(struct mipi_dsim_device *dsim, - unsigned int vert, unsigned int hori); -void exynos_mipi_dsi_set_sub_disp_resol(struct mipi_dsim_device *dsim, - unsigned int vert, unsigned int hori); -void exynos_mipi_dsi_init_config(struct mipi_dsim_device *dsim); -void exynos_mipi_dsi_display_config(struct mipi_dsim_device *dsim, - struct mipi_dsim_config *dsim_config); -void exynos_mipi_dsi_set_data_lane_number(struct mipi_dsim_device *dsim, - unsigned int count); -void exynos_mipi_dsi_enable_lane(struct mipi_dsim_device *dsim, unsigned int lane, - unsigned int enable); -void exynos_mipi_dsi_enable_afc(struct mipi_dsim_device *dsim, unsigned int enable, - unsigned int afc_code); -void exynos_mipi_dsi_enable_pll_bypass(struct mipi_dsim_device *dsim, - unsigned int enable); -void exynos_mipi_dsi_set_pll_pms(struct mipi_dsim_device *dsim, unsigned int p, - unsigned int m, unsigned int s); -void exynos_mipi_dsi_pll_freq_band(struct mipi_dsim_device *dsim, - unsigned int freq_band); -void exynos_mipi_dsi_pll_freq(struct mipi_dsim_device *dsim, - unsigned int pre_divider, unsigned int main_divider, - unsigned int scaler); -void exynos_mipi_dsi_pll_stable_time(struct mipi_dsim_device *dsim, - unsigned int lock_time); -void exynos_mipi_dsi_enable_pll(struct mipi_dsim_device *dsim, - unsigned int enable); -void exynos_mipi_dsi_set_byte_clock_src(struct mipi_dsim_device *dsim, - unsigned int src); -void exynos_mipi_dsi_enable_byte_clock(struct mipi_dsim_device *dsim, - unsigned int enable); -void exynos_mipi_dsi_set_esc_clk_prs(struct mipi_dsim_device *dsim, - unsigned int enable, unsigned int prs_val); -void exynos_mipi_dsi_enable_esc_clk_on_lane(struct mipi_dsim_device *dsim, - unsigned int lane_sel, unsigned int enable); -void exynos_mipi_dsi_force_dphy_stop_state(struct mipi_dsim_device *dsim, - unsigned int enable); -unsigned int exynos_mipi_dsi_is_lane_state(struct mipi_dsim_device *dsim); -void exynos_mipi_dsi_set_stop_state_counter(struct mipi_dsim_device *dsim, - unsigned int cnt_val); -void exynos_mipi_dsi_set_bta_timeout(struct mipi_dsim_device *dsim, - unsigned int timeout); -void exynos_mipi_dsi_set_lpdr_timeout(struct mipi_dsim_device *dsim, - unsigned int timeout); -void exynos_mipi_dsi_set_lcdc_transfer_mode(struct mipi_dsim_device *dsim, - unsigned int lp); -void exynos_mipi_dsi_set_cpu_transfer_mode(struct mipi_dsim_device *dsim, - unsigned int lp); -void exynos_mipi_dsi_enable_hs_clock(struct mipi_dsim_device *dsim, - unsigned int enable); -void exynos_mipi_dsi_dp_dn_swap(struct mipi_dsim_device *dsim, - unsigned int swap_en); -void exynos_mipi_dsi_hs_zero_ctrl(struct mipi_dsim_device *dsim, - unsigned int hs_zero); -void exynos_mipi_dsi_prep_ctrl(struct mipi_dsim_device *dsim, unsigned int prep); -unsigned int exynos_mipi_dsi_read_interrupt(struct mipi_dsim_device *dsim); -unsigned int exynos_mipi_dsi_read_interrupt_mask(struct mipi_dsim_device *dsim); -void exynos_mipi_dsi_clear_interrupt(struct mipi_dsim_device *dsim, - unsigned int src); -void exynos_mipi_dsi_set_interrupt(struct mipi_dsim_device *dsim, - unsigned int src, unsigned int enable); -unsigned int exynos_mipi_dsi_is_pll_stable(struct mipi_dsim_device *dsim); -unsigned int exynos_mipi_dsi_get_fifo_state(struct mipi_dsim_device *dsim); -unsigned int _exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim); -void _exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim); -void exynos_mipi_dsi_wr_tx_header(struct mipi_dsim_device *dsim, unsigned int di, - unsigned int data0, unsigned int data1); -void exynos_mipi_dsi_wr_tx_data(struct mipi_dsim_device *dsim, - unsigned int tx_data); -void exynos_mipi_dsi_rd_tx_header(struct mipi_dsim_device *dsim, - unsigned int data0, unsigned int data1); -unsigned int exynos_mipi_dsi_rd_rx_fifo(struct mipi_dsim_device *dsim); - -#endif /* _EXYNOS_MIPI_DSI_LOWLEVEL_H */ diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h b/drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h deleted file mode 100644 index 4227106d3fd0..000000000000 --- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h +++ /dev/null @@ -1,149 +0,0 @@ -/* linux/driver/video/exynos/exynos_mipi_dsi_regs.h - * - * Register definition file for Samsung MIPI-DSIM driver - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd - * - * InKi Dae <inki.dae@samsung.com> - * Donghwa Lee <dh09.lee@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#ifndef _EXYNOS_MIPI_DSI_REGS_H -#define _EXYNOS_MIPI_DSI_REGS_H - -#define EXYNOS_DSIM_STATUS 0x0 /* Status register */ -#define EXYNOS_DSIM_SWRST 0x4 /* Software reset register */ -#define EXYNOS_DSIM_CLKCTRL 0x8 /* Clock control register */ -#define EXYNOS_DSIM_TIMEOUT 0xc /* Time out register */ -#define EXYNOS_DSIM_CONFIG 0x10 /* Configuration register */ -#define EXYNOS_DSIM_ESCMODE 0x14 /* Escape mode register */ - -/* Main display image resolution register */ -#define EXYNOS_DSIM_MDRESOL 0x18 -#define EXYNOS_DSIM_MVPORCH 0x1c /* Main display Vporch register */ -#define EXYNOS_DSIM_MHPORCH 0x20 /* Main display Hporch register */ -#define EXYNOS_DSIM_MSYNC 0x24 /* Main display sync area register */ - -/* Sub display image resolution register */ -#define EXYNOS_DSIM_SDRESOL 0x28 -#define EXYNOS_DSIM_INTSRC 0x2c /* Interrupt source register */ -#define EXYNOS_DSIM_INTMSK 0x30 /* Interrupt mask register */ -#define EXYNOS_DSIM_PKTHDR 0x34 /* Packet Header FIFO register */ -#define EXYNOS_DSIM_PAYLOAD 0x38 /* Payload FIFO register */ -#define EXYNOS_DSIM_RXFIFO 0x3c /* Read FIFO register */ -#define EXYNOS_DSIM_FIFOTHLD 0x40 /* FIFO threshold level register */ -#define EXYNOS_DSIM_FIFOCTRL 0x44 /* FIFO status and control register */ - -/* FIFO memory AC characteristic register */ -#define EXYNOS_DSIM_PLLCTRL 0x4c /* PLL control register */ -#define EXYNOS_DSIM_PLLTMR 0x50 /* PLL timer register */ -#define EXYNOS_DSIM_PHYACCHR 0x54 /* D-PHY AC characteristic register */ -#define EXYNOS_DSIM_PHYACCHR1 0x58 /* D-PHY AC characteristic register1 */ - -/* DSIM_STATUS */ -#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0) -#define DSIM_STOP_STATE_CLK (1 << 8) -#define DSIM_TX_READY_HS_CLK (1 << 10) - -/* DSIM_SWRST */ -#define DSIM_FUNCRST (1 << 16) -#define DSIM_SWRST (1 << 0) - -/* EXYNOS_DSIM_TIMEOUT */ -#define DSIM_LPDR_TOUT_SHIFT(x) ((x) << 0) -#define DSIM_BTA_TOUT_SHIFT(x) ((x) << 16) - -/* EXYNOS_DSIM_CLKCTRL */ -#define DSIM_LANE_ESC_CLKEN(x) (((x) & 0x1f) << 19) -#define DSIM_BYTE_CLKEN_SHIFT(x) ((x) << 24) -#define DSIM_BYTE_CLK_SRC_SHIFT(x) ((x) << 25) -#define DSIM_PLL_BYPASS_SHIFT(x) ((x) << 27) -#define DSIM_ESC_CLKEN_SHIFT(x) ((x) << 28) -#define DSIM_TX_REQUEST_HSCLK_SHIFT(x) ((x) << 31) - -/* EXYNOS_DSIM_CONFIG */ -#define DSIM_LANE_ENx(x) (((x) & 0x1f) << 0) -#define DSIM_NUM_OF_DATALANE_SHIFT(x) ((x) << 5) -#define DSIM_HSA_MODE_SHIFT(x) ((x) << 20) -#define DSIM_HBP_MODE_SHIFT(x) ((x) << 21) -#define DSIM_HFP_MODE_SHIFT(x) ((x) << 22) -#define DSIM_HSE_MODE_SHIFT(x) ((x) << 23) -#define DSIM_AUTO_MODE_SHIFT(x) ((x) << 24) -#define DSIM_EOT_DISABLE(x) ((x) << 28) -#define DSIM_AUTO_FLUSH(x) ((x) << 29) - -#define DSIM_NUM_OF_DATA_LANE(x) ((x) << DSIM_NUM_OF_DATALANE_SHIFT) - -/* EXYNOS_DSIM_ESCMODE */ -#define DSIM_TX_LPDT_LP (1 << 6) -#define DSIM_CMD_LPDT_LP (1 << 7) -#define DSIM_FORCE_STOP_STATE_SHIFT(x) ((x) << 20) -#define DSIM_STOP_STATE_CNT_SHIFT(x) ((x) << 21) - -/* EXYNOS_DSIM_MDRESOL */ -#define DSIM_MAIN_STAND_BY (1 << 31) -#define DSIM_MAIN_VRESOL(x) (((x) & 0x7ff) << 16) -#define DSIM_MAIN_HRESOL(x) (((x) & 0X7ff) << 0) - -/* EXYNOS_DSIM_MVPORCH */ -#define DSIM_CMD_ALLOW_SHIFT(x) ((x) << 28) -#define DSIM_STABLE_VFP_SHIFT(x) ((x) << 16) -#define DSIM_MAIN_VBP_SHIFT(x) ((x) << 0) -#define DSIM_CMD_ALLOW_MASK (0xf << 28) -#define DSIM_STABLE_VFP_MASK (0x7ff << 16) -#define DSIM_MAIN_VBP_MASK (0x7ff << 0) - -/* EXYNOS_DSIM_MHPORCH */ -#define DSIM_MAIN_HFP_SHIFT(x) ((x) << 16) -#define DSIM_MAIN_HBP_SHIFT(x) ((x) << 0) -#define DSIM_MAIN_HFP_MASK ((0xffff) << 16) -#define DSIM_MAIN_HBP_MASK ((0xffff) << 0) - -/* EXYNOS_DSIM_MSYNC */ -#define DSIM_MAIN_VSA_SHIFT(x) ((x) << 22) -#define DSIM_MAIN_HSA_SHIFT(x) ((x) << 0) -#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22) -#define DSIM_MAIN_HSA_MASK ((0xffff) << 0) - -/* EXYNOS_DSIM_SDRESOL */ -#define DSIM_SUB_STANDY_SHIFT(x) ((x) << 31) -#define DSIM_SUB_VRESOL_SHIFT(x) ((x) << 16) -#define DSIM_SUB_HRESOL_SHIFT(x) ((x) << 0) -#define DSIM_SUB_STANDY_MASK ((0x1) << 31) -#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16) -#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0) - -/* EXYNOS_DSIM_INTSRC */ -#define INTSRC_PLL_STABLE (1 << 31) -#define INTSRC_SW_RST_RELEASE (1 << 30) -#define INTSRC_SFR_FIFO_EMPTY (1 << 29) -#define INTSRC_FRAME_DONE (1 << 24) -#define INTSRC_RX_DATA_DONE (1 << 18) - -/* EXYNOS_DSIM_INTMSK */ -#define INTMSK_FIFO_EMPTY (1 << 29) -#define INTMSK_BTA (1 << 25) -#define INTMSK_FRAME_DONE (1 << 24) -#define INTMSK_RX_TIMEOUT (1 << 21) -#define INTMSK_BTA_TIMEOUT (1 << 20) -#define INTMSK_RX_DONE (1 << 18) -#define INTMSK_RX_TE (1 << 17) -#define INTMSK_RX_ACK (1 << 16) -#define INTMSK_RX_ECC_ERR (1 << 15) -#define INTMSK_RX_CRC_ERR (1 << 14) - -/* EXYNOS_DSIM_FIFOCTRL */ -#define SFR_HEADER_EMPTY (1 << 22) - -/* EXYNOS_DSIM_PHYACCHR */ -#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5) - -/* EXYNOS_DSIM_PLLCTRL */ -#define DSIM_PLL_EN_SHIFT(x) ((x) << 23) -#define DSIM_FREQ_BAND_SHIFT(x) ((x) << 24) - -#endif /* _EXYNOS_MIPI_DSI_REGS_H */ diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c deleted file mode 100644 index de2f3e793786..000000000000 --- a/drivers/video/fbdev/exynos/s6e8ax0.c +++ /dev/null @@ -1,887 +0,0 @@ -/* linux/drivers/video/exynos/s6e8ax0.c - * - * MIPI-DSI based s6e8ax0 AMOLED lcd 4.65 inch panel driver. - * - * Inki Dae, <inki.dae@samsung.com> - * Donghwa Lee, <dh09.lee@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/mutex.h> -#include <linux/wait.h> -#include <linux/ctype.h> -#include <linux/io.h> -#include <linux/delay.h> -#include <linux/irq.h> -#include <linux/interrupt.h> -#include <linux/lcd.h> -#include <linux/fb.h> -#include <linux/backlight.h> -#include <linux/regulator/consumer.h> - -#include <video/mipi_display.h> -#include <video/exynos_mipi_dsim.h> - -#define LDI_MTP_LENGTH 24 -#define DSIM_PM_STABLE_TIME 10 -#define MIN_BRIGHTNESS 0 -#define MAX_BRIGHTNESS 24 -#define GAMMA_TABLE_COUNT 26 - -#define POWER_IS_ON(pwr) ((pwr) == FB_BLANK_UNBLANK) -#define POWER_IS_OFF(pwr) ((pwr) == FB_BLANK_POWERDOWN) -#define POWER_IS_NRM(pwr) ((pwr) == FB_BLANK_NORMAL) - -#define lcd_to_master(a) (a->dsim_dev->master) -#define lcd_to_master_ops(a) ((lcd_to_master(a))->master_ops) - -enum { - DSIM_NONE_STATE = 0, - DSIM_RESUME_COMPLETE = 1, - DSIM_FRAME_DONE = 2, -}; - -struct s6e8ax0 { - struct device *dev; - unsigned int power; - unsigned int id; - unsigned int gamma; - unsigned int acl_enable; - unsigned int cur_acl; - - struct lcd_device *ld; - struct backlight_device *bd; - - struct mipi_dsim_lcd_device *dsim_dev; - struct lcd_platform_data *ddi_pd; - struct mutex lock; - bool enabled; -}; - - -static struct regulator_bulk_data supplies[] = { - { .supply = "vdd3", }, - { .supply = "vci", }, -}; - -static void s6e8ax0_regulator_enable(struct s6e8ax0 *lcd) -{ - int ret = 0; - struct lcd_platform_data *pd = NULL; - - pd = lcd->ddi_pd; - mutex_lock(&lcd->lock); - if (!lcd->enabled) { - ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies); - if (ret) - goto out; - - lcd->enabled = true; - } - msleep(pd->power_on_delay); -out: - mutex_unlock(&lcd->lock); -} - -static void s6e8ax0_regulator_disable(struct s6e8ax0 *lcd) -{ - int ret = 0; - - mutex_lock(&lcd->lock); - if (lcd->enabled) { - ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies); - if (ret) - goto out; - - lcd->enabled = false; - } -out: - mutex_unlock(&lcd->lock); -} - -static const unsigned char s6e8ax0_22_gamma_30[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xf5, 0x00, 0xff, 0xad, 0xaf, - 0xbA, 0xc3, 0xd8, 0xc5, 0x9f, 0xc6, 0x9e, 0xc1, 0xdc, 0xc0, - 0x00, 0x61, 0x00, 0x5a, 0x00, 0x74, -}; - -static const unsigned char s6e8ax0_22_gamma_50[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xe8, 0x1f, 0xf7, 0xad, 0xc0, - 0xb5, 0xc4, 0xdc, 0xc4, 0x9e, 0xc6, 0x9c, 0xbb, 0xd8, 0xbb, - 0x00, 0x70, 0x00, 0x68, 0x00, 0x86, -}; - -static const unsigned char s6e8ax0_22_gamma_60[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xde, 0x1f, 0xef, 0xad, 0xc4, - 0xb3, 0xc3, 0xdd, 0xc4, 0x9e, 0xc6, 0x9c, 0xbc, 0xd6, 0xba, - 0x00, 0x75, 0x00, 0x6e, 0x00, 0x8d, -}; - -static const unsigned char s6e8ax0_22_gamma_70[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xd8, 0x1f, 0xe7, 0xaf, 0xc8, - 0xb4, 0xc4, 0xdd, 0xc3, 0x9d, 0xc6, 0x9c, 0xbb, 0xd6, 0xb9, - 0x00, 0x7a, 0x00, 0x72, 0x00, 0x93, -}; - -static const unsigned char s6e8ax0_22_gamma_80[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xc9, 0x1f, 0xde, 0xae, 0xc9, - 0xb1, 0xc3, 0xdd, 0xc2, 0x9d, 0xc5, 0x9b, 0xbc, 0xd6, 0xbb, - 0x00, 0x7f, 0x00, 0x77, 0x00, 0x99, -}; - -static const unsigned char s6e8ax0_22_gamma_90[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xc7, 0x1f, 0xd9, 0xb0, 0xcc, - 0xb2, 0xc3, 0xdc, 0xc1, 0x9c, 0xc6, 0x9c, 0xbc, 0xd4, 0xb9, - 0x00, 0x83, 0x00, 0x7b, 0x00, 0x9e, -}; - -static const unsigned char s6e8ax0_22_gamma_100[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xbd, 0x80, 0xcd, 0xba, 0xce, - 0xb3, 0xc4, 0xde, 0xc3, 0x9c, 0xc4, 0x9, 0xb8, 0xd3, 0xb6, - 0x00, 0x88, 0x00, 0x80, 0x00, 0xa5, -}; - -static const unsigned char s6e8ax0_22_gamma_120[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb9, 0x95, 0xc8, 0xb1, 0xcf, - 0xb2, 0xc6, 0xdf, 0xc5, 0x9b, 0xc3, 0x99, 0xb6, 0xd2, 0xb6, - 0x00, 0x8f, 0x00, 0x86, 0x00, 0xac, -}; - -static const unsigned char s6e8ax0_22_gamma_130[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb7, 0xa0, 0xc7, 0xb1, 0xd0, - 0xb2, 0xc4, 0xdd, 0xc3, 0x9a, 0xc3, 0x98, 0xb6, 0xd0, 0xb4, - 0x00, 0x92, 0x00, 0x8a, 0x00, 0xb1, -}; - -static const unsigned char s6e8ax0_22_gamma_140[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb7, 0xa0, 0xc5, 0xb2, 0xd0, - 0xb3, 0xc3, 0xde, 0xc3, 0x9b, 0xc2, 0x98, 0xb6, 0xd0, 0xb4, - 0x00, 0x95, 0x00, 0x8d, 0x00, 0xb5, -}; - -static const unsigned char s6e8ax0_22_gamma_150[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb3, 0xa0, 0xc2, 0xb2, 0xd0, - 0xb2, 0xc1, 0xdd, 0xc2, 0x9b, 0xc2, 0x98, 0xb4, 0xcf, 0xb1, - 0x00, 0x99, 0x00, 0x90, 0x00, 0xba, -}; - -static const unsigned char s6e8ax0_22_gamma_160[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xa5, 0xbf, 0xb0, 0xd0, - 0xb1, 0xc3, 0xde, 0xc2, 0x99, 0xc1, 0x97, 0xb4, 0xce, 0xb1, - 0x00, 0x9c, 0x00, 0x93, 0x00, 0xbe, -}; - -static const unsigned char s6e8ax0_22_gamma_170[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xb5, 0xbf, 0xb1, 0xd1, - 0xb1, 0xc3, 0xde, 0xc3, 0x99, 0xc0, 0x96, 0xb4, 0xce, 0xb1, - 0x00, 0x9f, 0x00, 0x96, 0x00, 0xc2, -}; - -static const unsigned char s6e8ax0_22_gamma_180[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xb7, 0xbe, 0xb3, 0xd2, - 0xb3, 0xc3, 0xde, 0xc2, 0x97, 0xbf, 0x95, 0xb4, 0xcd, 0xb1, - 0x00, 0xa2, 0x00, 0x99, 0x00, 0xc5, -}; - -static const unsigned char s6e8ax0_22_gamma_190[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xb9, 0xbe, 0xb2, 0xd2, - 0xb2, 0xc3, 0xdd, 0xc3, 0x98, 0xbf, 0x95, 0xb2, 0xcc, 0xaf, - 0x00, 0xa5, 0x00, 0x9c, 0x00, 0xc9, -}; - -static const unsigned char s6e8ax0_22_gamma_200[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xb9, 0xbc, 0xb2, 0xd2, - 0xb1, 0xc4, 0xdd, 0xc3, 0x97, 0xbe, 0x95, 0xb1, 0xcb, 0xae, - 0x00, 0xa8, 0x00, 0x9f, 0x00, 0xcd, -}; - -static const unsigned char s6e8ax0_22_gamma_210[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb1, 0xc1, 0xbd, 0xb1, 0xd1, - 0xb1, 0xc2, 0xde, 0xc2, 0x97, 0xbe, 0x94, 0xB0, 0xc9, 0xad, - 0x00, 0xae, 0x00, 0xa4, 0x00, 0xd4, -}; - -static const unsigned char s6e8ax0_22_gamma_220[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb1, 0xc7, 0xbd, 0xb1, 0xd1, - 0xb1, 0xc2, 0xdd, 0xc2, 0x97, 0xbd, 0x94, 0xb0, 0xc9, 0xad, - 0x00, 0xad, 0x00, 0xa2, 0x00, 0xd3, -}; - -static const unsigned char s6e8ax0_22_gamma_230[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb1, 0xc3, 0xbd, 0xb2, 0xd1, - 0xb1, 0xc3, 0xdd, 0xc1, 0x96, 0xbd, 0x94, 0xb0, 0xc9, 0xad, - 0x00, 0xb0, 0x00, 0xa7, 0x00, 0xd7, -}; - -static const unsigned char s6e8ax0_22_gamma_240[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb1, 0xcb, 0xbd, 0xb1, 0xd2, - 0xb1, 0xc3, 0xdD, 0xc2, 0x95, 0xbd, 0x93, 0xaf, 0xc8, 0xab, - 0x00, 0xb3, 0x00, 0xa9, 0x00, 0xdb, -}; - -static const unsigned char s6e8ax0_22_gamma_250[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb3, 0xcc, 0xbe, 0xb0, 0xd2, - 0xb0, 0xc3, 0xdD, 0xc2, 0x94, 0xbc, 0x92, 0xae, 0xc8, 0xab, - 0x00, 0xb6, 0x00, 0xab, 0x00, 0xde, -}; - -static const unsigned char s6e8ax0_22_gamma_260[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb3, 0xd0, 0xbe, 0xaf, 0xd1, - 0xaf, 0xc2, 0xdd, 0xc1, 0x96, 0xbc, 0x93, 0xaf, 0xc8, 0xac, - 0x00, 0xb7, 0x00, 0xad, 0x00, 0xe0, -}; - -static const unsigned char s6e8ax0_22_gamma_270[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb2, 0xcF, 0xbd, 0xb0, 0xd2, - 0xaf, 0xc2, 0xdc, 0xc1, 0x95, 0xbd, 0x93, 0xae, 0xc6, 0xaa, - 0x00, 0xba, 0x00, 0xb0, 0x00, 0xe4, -}; - -static const unsigned char s6e8ax0_22_gamma_280[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb2, 0xd0, 0xbd, 0xaf, 0xd0, - 0xad, 0xc4, 0xdd, 0xc3, 0x95, 0xbd, 0x93, 0xac, 0xc5, 0xa9, - 0x00, 0xbd, 0x00, 0xb2, 0x00, 0xe7, -}; - -static const unsigned char s6e8ax0_22_gamma_300[] = { - 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb5, 0xd3, 0xbd, 0xb1, 0xd2, - 0xb0, 0xc0, 0xdc, 0xc0, 0x94, 0xba, 0x91, 0xac, 0xc5, 0xa9, - 0x00, 0xc2, 0x00, 0xb7, 0x00, 0xed, -}; - -static const unsigned char *s6e8ax0_22_gamma_table[] = { - s6e8ax0_22_gamma_30, - s6e8ax0_22_gamma_50, - s6e8ax0_22_gamma_60, - s6e8ax0_22_gamma_70, - s6e8ax0_22_gamma_80, - s6e8ax0_22_gamma_90, - s6e8ax0_22_gamma_100, - s6e8ax0_22_gamma_120, - s6e8ax0_22_gamma_130, - s6e8ax0_22_gamma_140, - s6e8ax0_22_gamma_150, - s6e8ax0_22_gamma_160, - s6e8ax0_22_gamma_170, - s6e8ax0_22_gamma_180, - s6e8ax0_22_gamma_190, - s6e8ax0_22_gamma_200, - s6e8ax0_22_gamma_210, - s6e8ax0_22_gamma_220, - s6e8ax0_22_gamma_230, - s6e8ax0_22_gamma_240, - s6e8ax0_22_gamma_250, - s6e8ax0_22_gamma_260, - s6e8ax0_22_gamma_270, - s6e8ax0_22_gamma_280, - s6e8ax0_22_gamma_300, -}; - -static void s6e8ax0_panel_cond(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - - static const unsigned char data_to_send[] = { - 0xf8, 0x3d, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d, - 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08, - 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0, - 0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8 - }; - static const unsigned char data_to_send_panel_reverse[] = { - 0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d, - 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08, - 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0, - 0xc1, 0x01, 0x41, 0xc1, 0x00, 0xc1, 0xf6, 0xf6, 0xc1 - }; - - if (lcd->dsim_dev->panel_reverse) - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send_panel_reverse, - ARRAY_SIZE(data_to_send_panel_reverse)); - else - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_display_cond(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xf2, 0x80, 0x03, 0x0d - }; - - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -/* Gamma 2.2 Setting (200cd, 7500K, 10MPCD) */ -static void s6e8ax0_gamma_cond(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - unsigned int gamma = lcd->bd->props.brightness; - - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - s6e8ax0_22_gamma_table[gamma], - GAMMA_TABLE_COUNT); -} - -static void s6e8ax0_gamma_update(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xf7, 0x03 - }; - - ops->cmd_write(lcd_to_master(lcd), - MIPI_DSI_DCS_SHORT_WRITE_PARAM, data_to_send, - ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_etc_cond1(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xd1, 0xfe, 0x80, 0x00, 0x01, 0x0b, 0x00, 0x00, 0x40, - 0x0d, 0x00, 0x00 - }; - - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_etc_cond2(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xb6, 0x0c, 0x02, 0x03, 0x32, 0xff, 0x44, 0x44, 0xc0, - 0x00 - }; - - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_etc_cond3(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xe1, 0x10, 0x1c, 0x17, 0x08, 0x1d - }; - - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_etc_cond4(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xe2, 0xed, 0x07, 0xc3, 0x13, 0x0d, 0x03 - }; - - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_etc_cond5(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xf4, 0xcf, 0x0a, 0x12, 0x10, 0x19, 0x33, 0x02 - }; - - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} -static void s6e8ax0_etc_cond6(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xe3, 0x40 - }; - - ops->cmd_write(lcd_to_master(lcd), - MIPI_DSI_DCS_SHORT_WRITE_PARAM, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_etc_cond7(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xe4, 0x00, 0x00, 0x14, 0x80, 0x00, 0x00, 0x00 - }; - - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_elvss_set(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xb1, 0x04, 0x00 - }; - - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_elvss_nvm_set(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xd9, 0x5c, 0x20, 0x0c, 0x0f, 0x41, 0x00, 0x10, 0x11, - 0x12, 0xd1, 0x00, 0x00, 0x00, 0x00, 0x80, 0xcb, 0xed, - 0x64, 0xaf - }; - - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_sleep_in(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0x10, 0x00 - }; - - ops->cmd_write(lcd_to_master(lcd), - MIPI_DSI_DCS_SHORT_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_sleep_out(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0x11, 0x00 - }; - - ops->cmd_write(lcd_to_master(lcd), - MIPI_DSI_DCS_SHORT_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_display_on(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0x29, 0x00 - }; - - ops->cmd_write(lcd_to_master(lcd), - MIPI_DSI_DCS_SHORT_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_display_off(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0x28, 0x00 - }; - - ops->cmd_write(lcd_to_master(lcd), - MIPI_DSI_DCS_SHORT_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_apply_level2_key(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xf0, 0x5a, 0x5a - }; - - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_acl_on(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xc0, 0x01 - }; - - ops->cmd_write(lcd_to_master(lcd), - MIPI_DSI_DCS_SHORT_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -static void s6e8ax0_acl_off(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - static const unsigned char data_to_send[] = { - 0xc0, 0x00 - }; - - ops->cmd_write(lcd_to_master(lcd), - MIPI_DSI_DCS_SHORT_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); -} - -/* Full white 50% reducing setting */ -static void s6e8ax0_acl_ctrl_set(struct s6e8ax0 *lcd) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - /* Full white 50% reducing setting */ - static const unsigned char cutoff_50[] = { - 0xc1, 0x47, 0x53, 0x13, 0x53, 0x00, 0x00, 0x02, 0xcf, - 0x00, 0x00, 0x04, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x08, 0x0f, 0x16, 0x1d, 0x24, 0x2a, 0x31, 0x38, - 0x3f, 0x46 - }; - /* Full white 45% reducing setting */ - static const unsigned char cutoff_45[] = { - 0xc1, 0x47, 0x53, 0x13, 0x53, 0x00, 0x00, 0x02, 0xcf, - 0x00, 0x00, 0x04, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x07, 0x0d, 0x13, 0x19, 0x1f, 0x25, 0x2b, 0x31, - 0x37, 0x3d - }; - /* Full white 40% reducing setting */ - static const unsigned char cutoff_40[] = { - 0xc1, 0x47, 0x53, 0x13, 0x53, 0x00, 0x00, 0x02, 0xcf, - 0x00, 0x00, 0x04, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x06, 0x0c, 0x11, 0x16, 0x1c, 0x21, 0x26, 0x2b, - 0x31, 0x36 - }; - - if (lcd->acl_enable) { - if (lcd->cur_acl == 0) { - if (lcd->gamma == 0 || lcd->gamma == 1) { - s6e8ax0_acl_off(lcd); - dev_dbg(&lcd->ld->dev, - "cur_acl=%d\n", lcd->cur_acl); - } else - s6e8ax0_acl_on(lcd); - } - switch (lcd->gamma) { - case 0: /* 30cd */ - s6e8ax0_acl_off(lcd); - lcd->cur_acl = 0; - break; - case 1 ... 3: /* 50cd ~ 90cd */ - ops->cmd_write(lcd_to_master(lcd), - MIPI_DSI_DCS_LONG_WRITE, - cutoff_40, - ARRAY_SIZE(cutoff_40)); - lcd->cur_acl = 40; - break; - case 4 ... 7: /* 120cd ~ 210cd */ - ops->cmd_write(lcd_to_master(lcd), - MIPI_DSI_DCS_LONG_WRITE, - cutoff_45, - ARRAY_SIZE(cutoff_45)); - lcd->cur_acl = 45; - break; - case 8 ... 10: /* 220cd ~ 300cd */ - ops->cmd_write(lcd_to_master(lcd), - MIPI_DSI_DCS_LONG_WRITE, - cutoff_50, - ARRAY_SIZE(cutoff_50)); - lcd->cur_acl = 50; - break; - default: - break; - } - } else { - s6e8ax0_acl_off(lcd); - lcd->cur_acl = 0; - dev_dbg(&lcd->ld->dev, "cur_acl = %d\n", lcd->cur_acl); - } -} - -static void s6e8ax0_read_id(struct s6e8ax0 *lcd, u8 *mtp_id) -{ - unsigned int ret; - unsigned int addr = 0xd1; /* MTP ID */ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - - ret = ops->cmd_read(lcd_to_master(lcd), - MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM, - addr, 3, mtp_id); -} - -static int s6e8ax0_panel_init(struct s6e8ax0 *lcd) -{ - s6e8ax0_apply_level2_key(lcd); - s6e8ax0_sleep_out(lcd); - msleep(1); - s6e8ax0_panel_cond(lcd); - s6e8ax0_display_cond(lcd); - s6e8ax0_gamma_cond(lcd); - s6e8ax0_gamma_update(lcd); - - s6e8ax0_etc_cond1(lcd); - s6e8ax0_etc_cond2(lcd); - s6e8ax0_etc_cond3(lcd); - s6e8ax0_etc_cond4(lcd); - s6e8ax0_etc_cond5(lcd); - s6e8ax0_etc_cond6(lcd); - s6e8ax0_etc_cond7(lcd); - - s6e8ax0_elvss_nvm_set(lcd); - s6e8ax0_elvss_set(lcd); - - s6e8ax0_acl_ctrl_set(lcd); - s6e8ax0_acl_on(lcd); - - /* if ID3 value is not 33h, branch private elvss mode */ - msleep(lcd->ddi_pd->power_on_delay); - - return 0; -} - -static int s6e8ax0_update_gamma_ctrl(struct s6e8ax0 *lcd, int brightness) -{ - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - s6e8ax0_22_gamma_table[brightness], - ARRAY_SIZE(s6e8ax0_22_gamma_table)); - - /* update gamma table. */ - s6e8ax0_gamma_update(lcd); - lcd->gamma = brightness; - - return 0; -} - -static int s6e8ax0_gamma_ctrl(struct s6e8ax0 *lcd, int gamma) -{ - s6e8ax0_update_gamma_ctrl(lcd, gamma); - - return 0; -} - -static int s6e8ax0_set_power(struct lcd_device *ld, int power) -{ - struct s6e8ax0 *lcd = lcd_get_data(ld); - struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); - int ret = 0; - - if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN && - power != FB_BLANK_NORMAL) { - dev_err(lcd->dev, "power value should be 0, 1 or 4.\n"); - return -EINVAL; - } - - if ((power == FB_BLANK_UNBLANK) && ops->set_blank_mode) { - /* LCD power on */ - if ((POWER_IS_ON(power) && POWER_IS_OFF(lcd->power)) - || (POWER_IS_ON(power) && POWER_IS_NRM(lcd->power))) { - ret = ops->set_blank_mode(lcd_to_master(lcd), power); - if (!ret && lcd->power != power) - lcd->power = power; - } - } else if ((power == FB_BLANK_POWERDOWN) && ops->set_early_blank_mode) { - /* LCD power off */ - if ((POWER_IS_OFF(power) && POWER_IS_ON(lcd->power)) || - (POWER_IS_ON(lcd->power) && POWER_IS_NRM(power))) { - ret = ops->set_early_blank_mode(lcd_to_master(lcd), - power); - if (!ret && lcd->power != power) - lcd->power = power; - } - } - - return ret; -} - -static int s6e8ax0_get_power(struct lcd_device *ld) -{ - struct s6e8ax0 *lcd = lcd_get_data(ld); - - return lcd->power; -} - -static int s6e8ax0_set_brightness(struct backlight_device *bd) -{ - int ret = 0, brightness = bd->props.brightness; - struct s6e8ax0 *lcd = bl_get_data(bd); - - if (brightness < MIN_BRIGHTNESS || - brightness > bd->props.max_brightness) { - dev_err(lcd->dev, "lcd brightness should be %d to %d.\n", - MIN_BRIGHTNESS, MAX_BRIGHTNESS); - return -EINVAL; - } - - ret = s6e8ax0_gamma_ctrl(lcd, brightness); - if (ret) { - dev_err(&bd->dev, "lcd brightness setting failed.\n"); - return -EIO; - } - - return ret; -} - -static struct lcd_ops s6e8ax0_lcd_ops = { - .set_power = s6e8ax0_set_power, - .get_power = s6e8ax0_get_power, -}; - -static const struct backlight_ops s6e8ax0_backlight_ops = { - .update_status = s6e8ax0_set_brightness, -}; - -static void s6e8ax0_power_on(struct mipi_dsim_lcd_device *dsim_dev, int power) -{ - struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); - - msleep(lcd->ddi_pd->power_on_delay); - - /* lcd power on */ - if (power) - s6e8ax0_regulator_enable(lcd); - else - s6e8ax0_regulator_disable(lcd); - - msleep(lcd->ddi_pd->reset_delay); - - /* lcd reset */ - if (lcd->ddi_pd->reset) - lcd->ddi_pd->reset(lcd->ld); - msleep(5); -} - -static void s6e8ax0_set_sequence(struct mipi_dsim_lcd_device *dsim_dev) -{ - struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); - - s6e8ax0_panel_init(lcd); - s6e8ax0_display_on(lcd); - - lcd->power = FB_BLANK_UNBLANK; -} - -static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev) -{ - struct s6e8ax0 *lcd; - int ret; - u8 mtp_id[3] = {0, }; - - lcd = devm_kzalloc(&dsim_dev->dev, sizeof(struct s6e8ax0), GFP_KERNEL); - if (!lcd) { - dev_err(&dsim_dev->dev, "failed to allocate s6e8ax0 structure.\n"); - return -ENOMEM; - } - - lcd->dsim_dev = dsim_dev; - lcd->ddi_pd = (struct lcd_platform_data *)dsim_dev->platform_data; - lcd->dev = &dsim_dev->dev; - - mutex_init(&lcd->lock); - - ret = devm_regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies); - if (ret) { - dev_err(lcd->dev, "Failed to get regulators: %d\n", ret); - return ret; - } - - lcd->ld = devm_lcd_device_register(lcd->dev, "s6e8ax0", lcd->dev, lcd, - &s6e8ax0_lcd_ops); - if (IS_ERR(lcd->ld)) { - dev_err(lcd->dev, "failed to register lcd ops.\n"); - return PTR_ERR(lcd->ld); - } - - lcd->bd = devm_backlight_device_register(lcd->dev, "s6e8ax0-bl", - lcd->dev, lcd, &s6e8ax0_backlight_ops, NULL); - if (IS_ERR(lcd->bd)) { - dev_err(lcd->dev, "failed to register backlight ops.\n"); - return PTR_ERR(lcd->bd); - } - - lcd->bd->props.max_brightness = MAX_BRIGHTNESS; - lcd->bd->props.brightness = MAX_BRIGHTNESS; - - s6e8ax0_read_id(lcd, mtp_id); - if (mtp_id[0] == 0x00) - dev_err(lcd->dev, "read id failed\n"); - - dev_info(lcd->dev, "Read ID : %x, %x, %x\n", - mtp_id[0], mtp_id[1], mtp_id[2]); - - if (mtp_id[2] == 0x33) - dev_info(lcd->dev, - "ID-3 is 0xff does not support dynamic elvss\n"); - else - dev_info(lcd->dev, - "ID-3 is 0x%x support dynamic elvss\n", mtp_id[2]); - - lcd->acl_enable = 1; - lcd->cur_acl = 0; - - dev_set_drvdata(&dsim_dev->dev, lcd); - - dev_dbg(lcd->dev, "probed s6e8ax0 panel driver.\n"); - - return 0; -} - -static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev) -{ - struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); - - s6e8ax0_sleep_in(lcd); - msleep(lcd->ddi_pd->power_off_delay); - s6e8ax0_display_off(lcd); - - s6e8ax0_regulator_disable(lcd); - - return 0; -} - -static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev) -{ - struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); - - s6e8ax0_sleep_out(lcd); - msleep(lcd->ddi_pd->power_on_delay); - - s6e8ax0_regulator_enable(lcd); - s6e8ax0_set_sequence(dsim_dev); - - return 0; -} - -static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = { - .name = "s6e8ax0", - .id = -1, - - .power_on = s6e8ax0_power_on, - .set_sequence = s6e8ax0_set_sequence, - .probe = s6e8ax0_probe, - .suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL, - .resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL, -}; - -static int s6e8ax0_init(void) -{ - exynos_mipi_dsi_register_lcd_driver(&s6e8ax0_dsim_ddi_driver); - - return 0; -} - -static void s6e8ax0_exit(void) -{ - return; -} - -module_init(s6e8ax0_init); -module_exit(s6e8ax0_exit); - -MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>"); -MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); -MODULE_DESCRIPTION("MIPI-DSI based s6e8ax0 AMOLED LCD Panel Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/hecubafb.c b/drivers/video/fbdev/hecubafb.c index e4031ef39491..8577195cb533 100644 --- a/drivers/video/fbdev/hecubafb.c +++ b/drivers/video/fbdev/hecubafb.c @@ -47,7 +47,7 @@ #define DPY_W 600 #define DPY_H 800 -static struct fb_fix_screeninfo hecubafb_fix = { +static const struct fb_fix_screeninfo hecubafb_fix = { .id = "hecubafb", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO01, @@ -58,7 +58,7 @@ static struct fb_fix_screeninfo hecubafb_fix = { .accel = FB_ACCEL_NONE, }; -static struct fb_var_screeninfo hecubafb_var = { +static const struct fb_var_screeninfo hecubafb_var = { .xres = DPY_W, .yres = DPY_H, .xres_virtual = DPY_W, diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c index 15d3ccff2965..463028543173 100644 --- a/drivers/video/fbdev/hgafb.c +++ b/drivers/video/fbdev/hgafb.c @@ -106,7 +106,7 @@ static DEFINE_SPINLOCK(hga_reg_lock); /* Framebuffer driver structures */ -static struct fb_var_screeninfo hga_default_var = { +static const struct fb_var_screeninfo hga_default_var = { .xres = 720, .yres = 348, .xres_virtual = 720, diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c index cf5ccd0f2252..7bc5f6056c77 100644 --- a/drivers/video/fbdev/i740fb.c +++ b/drivers/video/fbdev/i740fb.c @@ -82,7 +82,7 @@ struct i740fb_par { #define DACSPEED24_SD 128 #define DACSPEED32 86 -static struct fb_fix_screeninfo i740fb_fix = { +static const struct fb_fix_screeninfo i740fb_fix = { .id = "i740fb", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c index 025b882a4826..483ab2592d0c 100644 --- a/drivers/video/fbdev/i810/i810_main.c +++ b/drivers/video/fbdev/i810/i810_main.c @@ -1691,7 +1691,7 @@ static int i810_alloc_agp_mem(struct fb_info *info) if (!(par->i810_gtt.i810_cursor_memory = agp_allocate_memory(bridge, par->cursor_heap.size >> 12, AGP_PHYSICAL_MEMORY))) { - printk("i810fb_alloc_cursormem: can't allocate" + printk("i810fb_alloc_cursormem: can't allocate " "cursor memory\n"); agp_backend_release(bridge); return -ENOMEM; diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index bf207444ba0c..ff2a5d2023e1 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -1301,11 +1301,6 @@ static int intelfb_check_var(struct fb_var_screeninfo *var, break; } - if (v.xoffset < 0) - v.xoffset = 0; - if (v.yoffset < 0) - v.yoffset = 0; - if (v.xoffset > v.xres_virtual - v.xres) v.xoffset = v.xres_virtual - v.xres; if (v.yoffset > v.yres_virtual - v.yres) diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c index 5bb01533271e..f77478fb3d14 100644 --- a/drivers/video/fbdev/kyro/fbdev.c +++ b/drivers/video/fbdev/kyro/fbdev.c @@ -44,7 +44,7 @@ static struct fb_fix_screeninfo kyro_fix = { .accel = FB_ACCEL_NONE, }; -static struct fb_var_screeninfo kyro_var = { +static const struct fb_var_screeninfo kyro_var = { /* 640x480, 16bpp @ 60 Hz */ .xres = 640, .yres = 480, diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c index 195ad7cac1ba..68fa037d8cbc 100644 --- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c +++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c @@ -372,7 +372,7 @@ static int Ti3026_init(struct matrox_fb_info *minfo, struct my_timming *m) DBG(__func__) - memcpy(hw->DACreg, MGADACbpp32, sizeof(hw->DACreg)); + memcpy(hw->DACreg, MGADACbpp32, sizeof(MGADACbpp32)); switch (minfo->fbcon.var.bits_per_pixel) { case 4: hw->DACreg[POS3026_XLATCHCTRL] = TVP3026_XLATCHCTRL_16_1; /* or _8_1, they are same */ hw->DACreg[POS3026_XTRUECOLORCTRL] = TVP3026_XTRUECOLORCTRL_PSEUDOCOLOR; diff --git a/drivers/video/fbdev/matrox/matroxfb_g450.c b/drivers/video/fbdev/matrox/matroxfb_g450.c index cff0546ea6fd..f108ae66fc83 100644 --- a/drivers/video/fbdev/matrox/matroxfb_g450.c +++ b/drivers/video/fbdev/matrox/matroxfb_g450.c @@ -433,7 +433,7 @@ static void cve2_init_TVdata(int norm, struct mavenregs* data, const struct outp 0x00, /* 3E written multiple times */ 0x00, /* 3F not written */ } }; - static struct mavenregs ntscregs = { { + static const struct mavenregs ntscregs = { { 0x21, 0xF0, 0x7C, 0x1F, /* 00: chroma subcarrier */ 0x00, 0x00, /* test */ diff --git a/drivers/video/fbdev/mb862xx/mb862xx-i2c.c b/drivers/video/fbdev/mb862xx/mb862xx-i2c.c index c87e17afb3e2..ba96c44f2761 100644 --- a/drivers/video/fbdev/mb862xx/mb862xx-i2c.c +++ b/drivers/video/fbdev/mb862xx/mb862xx-i2c.c @@ -157,17 +157,10 @@ static struct i2c_adapter mb862xx_i2c_adapter = { int mb862xx_i2c_init(struct mb862xxfb_par *par) { - int ret; - mb862xx_i2c_adapter.algo_data = par; par->adap = &mb862xx_i2c_adapter; - ret = i2c_add_adapter(par->adap); - if (ret < 0) { - dev_err(par->dev, "failed to add %s\n", - mb862xx_i2c_adapter.name); - } - return ret; + return i2c_add_adapter(par->adap); } void mb862xx_i2c_exit(struct mb862xxfb_par *par) diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c index f91b1db262b0..8778e01cebac 100644 --- a/drivers/video/fbdev/mx3fb.c +++ b/drivers/video/fbdev/mx3fb.c @@ -845,7 +845,7 @@ static int __set_par(struct fb_info *fbi, bool lock) if (fbi->var.sync & FB_SYNC_SHARP_MODE) mode = IPU_PANEL_SHARP_TFT; - dev_dbg(fbi->device, "pixclock = %ul Hz\n", + dev_dbg(fbi->device, "pixclock = %u Hz\n", (u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL)); if (sdc_init_panel(mx3fb, mode, diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c index 4e6608ceac09..7846f0e8bbbb 100644 --- a/drivers/video/fbdev/mxsfb.c +++ b/drivers/video/fbdev/mxsfb.c @@ -800,6 +800,7 @@ static int mxsfb_init_fbinfo(struct mxsfb_info *host, struct fb_videomode *vmode) { int ret; + struct device *dev = &host->pdev->dev; struct fb_info *fb_info = &host->fb_info; struct fb_var_screeninfo *var = &fb_info->var; dma_addr_t fb_phys; @@ -825,12 +826,10 @@ static int mxsfb_init_fbinfo(struct mxsfb_info *host, /* Memory allocation for framebuffer */ fb_size = SZ_2M; - fb_virt = alloc_pages_exact(fb_size, GFP_DMA); + fb_virt = dma_alloc_wc(dev, PAGE_ALIGN(fb_size), &fb_phys, GFP_KERNEL); if (!fb_virt) return -ENOMEM; - fb_phys = virt_to_phys(fb_virt); - fb_info->fix.smem_start = fb_phys; fb_info->screen_base = fb_virt; fb_info->screen_size = fb_info->fix.smem_len = fb_size; @@ -843,9 +842,11 @@ static int mxsfb_init_fbinfo(struct mxsfb_info *host, static void mxsfb_free_videomem(struct mxsfb_info *host) { + struct device *dev = &host->pdev->dev; struct fb_info *fb_info = &host->fb_info; - free_pages_exact(fb_info->screen_base, fb_info->fix.smem_len); + dma_free_wc(dev, fb_info->screen_size, fb_info->screen_base, + fb_info->fix.smem_start); } static const struct platform_device_id mxsfb_devtype[] = { diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index fb60a8f0cc94..906c6e75c260 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -625,6 +625,21 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) if (address == OF_BAD_ADDR && addr_prop) address = (u64)addr_prop; if (address != OF_BAD_ADDR) { +#ifdef CONFIG_PCI + const __be32 *vidp, *didp; + u32 vid, did; + struct pci_dev *pdev; + + vidp = of_get_property(dp, "vendor-id", NULL); + didp = of_get_property(dp, "device-id", NULL); + if (vidp && didp) { + vid = be32_to_cpup(vidp); + did = be32_to_cpup(didp); + pdev = pci_get_device(vid, did, NULL); + if (!pdev || pci_enable_device(pdev)) + return; + } +#endif /* kludge for valkyrie */ if (strcmp(dp->name, "valkyrie") == 0) address += 0x1000; diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c index 0e4cee9a8d79..c81f150589e1 100644 --- a/drivers/video/fbdev/omap/lcd_mipid.c +++ b/drivers/video/fbdev/omap/lcd_mipid.c @@ -60,7 +60,6 @@ struct mipid_device { struct mutex mutex; struct lcd_panel panel; - struct workqueue_struct *esd_wq; struct delayed_work esd_work; void (*esd_check)(struct mipid_device *m); }; @@ -390,7 +389,7 @@ static void ls041y3_esd_check(struct mipid_device *md) static void mipid_esd_start_check(struct mipid_device *md) { if (md->esd_check != NULL) - queue_delayed_work(md->esd_wq, &md->esd_work, + schedule_delayed_work(&md->esd_work, MIPID_ESD_CHECK_PERIOD); } @@ -476,11 +475,6 @@ static int mipid_init(struct lcd_panel *panel, struct mipid_device *md = to_mipid_device(panel); md->fbdev = fbdev; - md->esd_wq = create_singlethread_workqueue("mipid_esd"); - if (md->esd_wq == NULL) { - dev_err(&md->spi->dev, "can't create ESD workqueue\n"); - return -ENOMEM; - } INIT_DELAYED_WORK(&md->esd_work, mipid_esd_work); mutex_init(&md->mutex); @@ -500,7 +494,6 @@ static void mipid_cleanup(struct lcd_panel *panel) if (md->enabled) mipid_esd_stop_check(md); - destroy_workqueue(md->esd_wq); } static struct lcd_panel mipid_panel = { diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index b58012b82b6f..8b810696a42b 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -75,8 +75,6 @@ struct panel_drv_data { bool intro_printed; - struct workqueue_struct *workqueue; - bool ulps_enabled; unsigned ulps_timeout; struct delayed_work ulps_work; @@ -232,7 +230,7 @@ static int dsicm_set_update_window(struct panel_drv_data *ddata, static void dsicm_queue_ulps_work(struct panel_drv_data *ddata) { if (ddata->ulps_timeout > 0) - queue_delayed_work(ddata->workqueue, &ddata->ulps_work, + schedule_delayed_work(&ddata->ulps_work, msecs_to_jiffies(ddata->ulps_timeout)); } @@ -1244,11 +1242,6 @@ static int dsicm_probe(struct platform_device *pdev) dev_dbg(dev, "Using GPIO TE\n"); } - ddata->workqueue = create_singlethread_workqueue("dsicm_wq"); - if (ddata->workqueue == NULL) { - dev_err(dev, "can't create workqueue\n"); - return -ENOMEM; - } INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work); dsicm_hw_reset(ddata); @@ -1262,7 +1255,7 @@ static int dsicm_probe(struct platform_device *pdev) dev, ddata, &dsicm_bl_ops, &props); if (IS_ERR(bldev)) { r = PTR_ERR(bldev); - goto err_bl; + goto err_reg; } ddata->bldev = bldev; @@ -1285,8 +1278,6 @@ static int dsicm_probe(struct platform_device *pdev) err_sysfs_create: if (bldev != NULL) backlight_device_unregister(bldev); -err_bl: - destroy_workqueue(ddata->workqueue); err_reg: return r; } @@ -1316,7 +1307,6 @@ static int __exit dsicm_remove(struct platform_device *pdev) omap_dss_put_device(ddata->in); dsicm_cancel_ulps_work(ddata); - destroy_workqueue(ddata->workqueue); /* reset, to be sure that the panel is in a valid state */ dsicm_hw_reset(ddata); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c index 3691bde4ce0a..a864608c5df1 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c @@ -644,6 +644,7 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask, { int r; + long time_left; DECLARE_COMPLETION_ONSTACK(completion); r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion, @@ -652,15 +653,15 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask, if (r) return r; - timeout = wait_for_completion_interruptible_timeout(&completion, + time_left = wait_for_completion_interruptible_timeout(&completion, timeout); omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask); - if (timeout == 0) + if (time_left == 0) return -ETIMEDOUT; - if (timeout == -ERESTARTSYS) + if (time_left == -ERESTARTSYS) return -ERESTARTSYS; return 0; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c index 9e4800a4e3d1..30d49f3800b3 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c @@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) { struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); struct regulator *vdds_dsi; - int r; if (dsi->vdds_dsi_reg != NULL) return 0; @@ -1180,13 +1179,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) return PTR_ERR(vdds_dsi); } - r = regulator_set_voltage(vdds_dsi, 1800000, 1800000); - if (r) { - devm_regulator_put(vdds_dsi); - DSSERR("can't set the DSI regulator voltage\n"); - return r; - } - dsi->vdds_dsi_reg = vdds_dsi; return 0; @@ -5348,7 +5340,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) dsi->phy_base = devm_ioremap(&dsidev->dev, res->start, resource_size(res)); - if (!dsi->proto_base) { + if (!dsi->phy_base) { DSSERR("can't ioremap DSI PHY\n"); return -ENOMEM; } @@ -5368,7 +5360,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) dsi->pll_base = devm_ioremap(&dsidev->dev, res->start, resource_size(res)); - if (!dsi->proto_base) { + if (!dsi->pll_base) { DSSERR("can't ioremap DSI PLL\n"); return -ENOMEM; } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c index 926a6f20dbb2..156a254705ea 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c @@ -100,7 +100,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) static int hdmi_init_regulator(void) { - int r; struct regulator *reg; if (hdmi.vdda_reg != NULL) @@ -114,13 +113,6 @@ static int hdmi_init_regulator(void) return PTR_ERR(reg); } - r = regulator_set_voltage(reg, 1800000, 1800000); - if (r) { - devm_regulator_put(reg); - DSSWARN("can't set the regulator voltage\n"); - return r; - } - hdmi.vdda_reg = reg; return 0; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c index 0ee829a165c3..4da36bcab977 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c @@ -119,7 +119,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) static int hdmi_init_regulator(void) { - int r; struct regulator *reg; if (hdmi.vdda_reg != NULL) @@ -131,13 +130,6 @@ static int hdmi_init_regulator(void) return PTR_ERR(reg); } - r = regulator_set_voltage(reg, 1800000, 1800000); - if (r) { - devm_regulator_put(reg); - DSSWARN("can't set the regulator voltage\n"); - return r; - } - hdmi.vdda_reg = reg; return 0; diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c index aa8d28880912..1a4070f719c2 100644 --- a/drivers/video/fbdev/pm2fb.c +++ b/drivers/video/fbdev/pm2fb.c @@ -113,7 +113,7 @@ static struct fb_fix_screeninfo pm2fb_fix = { /* * Default video mode. In case the modedb doesn't work. */ -static struct fb_var_screeninfo pm2fb_var = { +static const struct fb_var_screeninfo pm2fb_var = { /* "640x480, 8 bpp @ 60 Hz */ .xres = 640, .yres = 480, diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index 2c0487f4f805..ef73f14d7ba0 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -2125,7 +2125,7 @@ static int of_get_pxafb_display(struct device *dev, struct device_node *disp, timings = of_get_display_timings(disp); if (!timings) - goto out; + return -EINVAL; ret = -ENOMEM; info->modes = kmalloc_array(timings->num_timings, @@ -2186,6 +2186,7 @@ static int of_get_pxafb_mode_info(struct device *dev, ret = of_property_read_u32(np, "bus-width", &bus_width); if (ret) { dev_err(dev, "no bus-width specified: %d\n", ret); + of_node_put(np); return ret; } diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c index 96aa46dc696c..5d6179ef0298 100644 --- a/drivers/video/fbdev/s1d13xxxfb.c +++ b/drivers/video/fbdev/s1d13xxxfb.c @@ -83,7 +83,7 @@ static const char *s1d13xxxfb_prod_names[] = { /* * here we define the default struct fb_fix_screeninfo */ -static struct fb_fix_screeninfo s1d13xxxfb_fix = { +static const struct fb_fix_screeninfo s1d13xxxfb_fix = { .id = S1D_FBID, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, @@ -929,7 +929,7 @@ static int s1d13xxxfb_suspend(struct platform_device *dev, pm_message_t state) s1dfb->disp_save = kmalloc(info->fix.smem_len, GFP_KERNEL); if (!s1dfb->disp_save) { - printk(KERN_ERR PFX "no memory to save screen"); + printk(KERN_ERR PFX "no memory to save screen\n"); return -ENOMEM; } diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c index 0dd86be36afb..a67e4567e656 100644 --- a/drivers/video/fbdev/s3c2410fb.c +++ b/drivers/video/fbdev/s3c2410fb.c @@ -767,7 +767,7 @@ static irqreturn_t s3c2410fb_irq(int irq, void *dev_id) return IRQ_HANDLED; } -#ifdef CONFIG_CPU_FREQ +#ifdef CONFIG_ARM_S3C24XX_CPUFREQ static int s3c2410fb_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) diff --git a/drivers/video/fbdev/s3c2410fb.h b/drivers/video/fbdev/s3c2410fb.h index 47a17bd23011..cdd11e2f8859 100644 --- a/drivers/video/fbdev/s3c2410fb.h +++ b/drivers/video/fbdev/s3c2410fb.h @@ -32,7 +32,7 @@ struct s3c2410fb_info { unsigned long clk_rate; unsigned int palette_ready; -#ifdef CONFIG_CPU_FREQ +#ifdef CONFIG_ARM_S3C24XX_CPUFREQ struct notifier_block freq_transition; #endif diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c index 6c77ab09b0b2..c30a91c1137c 100644 --- a/drivers/video/fbdev/savage/savagefb_driver.c +++ b/drivers/video/fbdev/savage/savagefb_driver.c @@ -1660,7 +1660,7 @@ static struct fb_ops savagefb_ops = { /* --------------------------------------------------------------------- */ -static struct fb_var_screeninfo savagefb_var800x600x8 = { +static const struct fb_var_screeninfo savagefb_var800x600x8 = { .accel_flags = FB_ACCELF_TEXT, .xres = 800, .yres = 600, diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index e9cf19977285..61f799a515dc 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c @@ -33,14 +33,14 @@ #include <linux/parser.h> #include <linux/regulator/consumer.h> -static struct fb_fix_screeninfo simplefb_fix = { +static const struct fb_fix_screeninfo simplefb_fix = { .id = "simple", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .accel = FB_ACCEL_NONE, }; -static struct fb_var_screeninfo simplefb_var = { +static const struct fb_var_screeninfo simplefb_var = { .height = -1, .width = -1, .activate = FB_ACTIVATE_NOW, @@ -74,8 +74,14 @@ static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, return 0; } +struct simplefb_par; +static void simplefb_clocks_destroy(struct simplefb_par *par); +static void simplefb_regulators_destroy(struct simplefb_par *par); + static void simplefb_destroy(struct fb_info *info) { + simplefb_regulators_destroy(info->par); + simplefb_clocks_destroy(info->par); if (info->screen_base) iounmap(info->screen_base); } @@ -487,11 +493,8 @@ error_fb_release: static int simplefb_remove(struct platform_device *pdev) { struct fb_info *info = platform_get_drvdata(pdev); - struct simplefb_par *par = info->par; unregister_framebuffer(info); - simplefb_regulators_destroy(par); - simplefb_clocks_destroy(par); framebuffer_release(info); return 0; diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c index 86ae1d4556fc..73cb4ffff3c5 100644 --- a/drivers/video/fbdev/sm712fb.c +++ b/drivers/video/fbdev/sm712fb.c @@ -56,7 +56,7 @@ struct smtcfb_info { void __iomem *smtc_regbaseaddress; /* Memory Map IO starting address */ -static struct fb_var_screeninfo smtcfb_var = { +static const struct fb_var_screeninfo smtcfb_var = { .xres = 1024, .yres = 600, .xres_virtual = 1024, diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index 9279e5f6696e..ec2e7e353685 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -1761,10 +1761,8 @@ error: static void ufx_usb_disconnect(struct usb_interface *interface) { struct ufx_data *dev; - struct fb_info *info; dev = usb_get_intfdata(interface); - info = dev->info; pr_debug("USB disconnect starting\n"); diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index a9c45c89b15e..2925d5ce8d3e 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -64,7 +64,7 @@ struct ssd1307fb_par { u32 contrast; u32 dclk_div; u32 dclk_frq; - struct ssd1307fb_deviceinfo *device_info; + const struct ssd1307fb_deviceinfo *device_info; struct i2c_client *client; u32 height; struct fb_info *info; @@ -84,7 +84,7 @@ struct ssd1307fb_array { u8 data[0]; }; -static struct fb_fix_screeninfo ssd1307fb_fix = { +static const struct fb_fix_screeninfo ssd1307fb_fix = { .id = "Solomon SSD1307", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO10, @@ -94,7 +94,7 @@ static struct fb_fix_screeninfo ssd1307fb_fix = { .accel = FB_ACCEL_NONE, }; -static struct fb_var_screeninfo ssd1307fb_var = { +static const struct fb_var_screeninfo ssd1307fb_var = { .bits_per_pixel = 1, }; @@ -559,8 +559,7 @@ static int ssd1307fb_probe(struct i2c_client *client, par->info = info; par->client = client; - par->device_info = (struct ssd1307fb_deviceinfo *)of_match_device( - ssd1307fb_of_match, &client->dev)->data; + par->device_info = of_device_get_match_data(&client->dev); par->reset = of_get_named_gpio(client->dev.of_node, "reset-gpios", 0); diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c index 621fa441a6db..d5fa313806fe 100644 --- a/drivers/video/fbdev/tdfxfb.c +++ b/drivers/video/fbdev/tdfxfb.c @@ -82,7 +82,7 @@ #define VOODOO3_MAX_PIXCLOCK 300000 #define VOODOO5_MAX_PIXCLOCK 350000 -static struct fb_fix_screeninfo tdfx_fix = { +static const struct fb_fix_screeninfo tdfx_fix = { .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .ypanstep = 1, @@ -90,7 +90,7 @@ static struct fb_fix_screeninfo tdfx_fix = { .accel = FB_ACCEL_3DFX_BANSHEE }; -static struct fb_var_screeninfo tdfx_var = { +static const struct fb_var_screeninfo tdfx_var = { /* "640x480, 8 bpp @ 60 Hz */ .xres = 640, .yres = 480, diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c index 178ae93b7ebd..98af9e02959b 100644 --- a/drivers/video/fbdev/uvesafb.c +++ b/drivers/video/fbdev/uvesafb.c @@ -33,7 +33,7 @@ static struct cb_id uvesafb_cn_id = { static char v86d_path[PATH_MAX] = "/sbin/v86d"; static char v86d_started; /* has v86d been started by uvesafb? */ -static struct fb_fix_screeninfo uvesafb_fix = { +static const struct fb_fix_screeninfo uvesafb_fix = { .id = "VESA VGA", .type = FB_TYPE_PACKED_PIXELS, .accel = FB_ACCEL_NONE, diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c index b9c2f81fb6b9..da653a080394 100644 --- a/drivers/video/fbdev/vfb.c +++ b/drivers/video/fbdev/vfb.c @@ -35,76 +35,23 @@ static void *videomemory; static u_long videomemorysize = VIDEOMEMSIZE; module_param(videomemorysize, ulong, 0); +MODULE_PARM_DESC(videomemorysize, "RAM available to frame buffer (in bytes)"); -/********************************************************************** - * - * Memory management - * - **********************************************************************/ -static void *rvmalloc(unsigned long size) -{ - void *mem; - unsigned long adr; - - size = PAGE_ALIGN(size); - mem = vmalloc_32(size); - if (!mem) - return NULL; - - /* - * VFB must clear memory to prevent kernel info - * leakage into userspace - * VGA-based drivers MUST NOT clear memory if - * they want to be able to take over vgacon - */ - - memset(mem, 0, size); - adr = (unsigned long) mem; - while (size > 0) { - SetPageReserved(vmalloc_to_page((void *)adr)); - adr += PAGE_SIZE; - size -= PAGE_SIZE; - } - - return mem; -} - -static void rvfree(void *mem, unsigned long size) -{ - unsigned long adr; - - if (!mem) - return; - - adr = (unsigned long) mem; - while ((long) size > 0) { - ClearPageReserved(vmalloc_to_page((void *)adr)); - adr += PAGE_SIZE; - size -= PAGE_SIZE; - } - vfree(mem); -} +static char *mode_option = NULL; +module_param(mode_option, charp, 0); +MODULE_PARM_DESC(mode_option, "Preferred video mode (e.g. 640x480-8@60)"); -static struct fb_var_screeninfo vfb_default = { +static const struct fb_videomode vfb_default = { .xres = 640, .yres = 480, - .xres_virtual = 640, - .yres_virtual = 480, - .bits_per_pixel = 8, - .red = { 0, 8, 0 }, - .green = { 0, 8, 0 }, - .blue = { 0, 8, 0 }, - .activate = FB_ACTIVATE_TEST, - .height = -1, - .width = -1, - .pixclock = 20000, - .left_margin = 64, - .right_margin = 64, - .upper_margin = 32, - .lower_margin = 32, - .hsync_len = 64, - .vsync_len = 2, - .vmode = FB_VMODE_NONINTERLACED, + .pixclock = 20000, + .left_margin = 64, + .right_margin = 64, + .upper_margin = 32, + .lower_margin = 32, + .hsync_len = 64, + .vsync_len = 2, + .vmode = FB_VMODE_NONINTERLACED, }; static struct fb_fix_screeninfo vfb_fix = { @@ -119,6 +66,7 @@ static struct fb_fix_screeninfo vfb_fix = { static bool vfb_enable __initdata = 0; /* disabled by default */ module_param(vfb_enable, bool, 0); +MODULE_PARM_DESC(vfb_enable, "Enable Virtual FB driver"); static int vfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); @@ -421,35 +369,7 @@ static int vfb_pan_display(struct fb_var_screeninfo *var, static int vfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { - unsigned long start = vma->vm_start; - unsigned long size = vma->vm_end - vma->vm_start; - unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; - unsigned long page, pos; - - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) - return -EINVAL; - if (size > info->fix.smem_len) - return -EINVAL; - if (offset > info->fix.smem_len - size) - return -EINVAL; - - pos = (unsigned long)info->fix.smem_start + offset; - - while (size > 0) { - page = vmalloc_to_pfn((void *)pos); - if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) { - return -EAGAIN; - } - start += PAGE_SIZE; - pos += PAGE_SIZE; - if (size > PAGE_SIZE) - size -= PAGE_SIZE; - else - size = 0; - } - - return 0; - + return remap_vmalloc_range(vma, (void *)info->fix.smem_start, vma->vm_pgoff); } #ifndef MODULE @@ -477,6 +397,8 @@ static int __init vfb_setup(char *options) /* Test disable for backwards compatibility */ if (!strcmp(this_opt, "disable")) vfb_enable = 0; + else + mode_option = this_opt; } return 1; } @@ -489,12 +411,13 @@ static int __init vfb_setup(char *options) static int vfb_probe(struct platform_device *dev) { struct fb_info *info; + unsigned int size = PAGE_ALIGN(videomemorysize); int retval = -ENOMEM; /* * For real video cards we use ioremap. */ - if (!(videomemory = rvmalloc(videomemorysize))) + if (!(videomemory = vmalloc_32_user(size))) return retval; info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev); @@ -504,11 +427,13 @@ static int vfb_probe(struct platform_device *dev) info->screen_base = (char __iomem *)videomemory; info->fbops = &vfb_ops; - retval = fb_find_mode(&info->var, info, NULL, - NULL, 0, NULL, 8); + if (!fb_find_mode(&info->var, info, mode_option, + NULL, 0, &vfb_default, 8)){ + fb_err(info, "Unable to find usable video mode.\n"); + retval = -EINVAL; + goto err1; + } - if (!retval || (retval == 4)) - info->var = vfb_default; vfb_fix.smem_start = (unsigned long) videomemory; vfb_fix.smem_len = videomemorysize; info->fix = vfb_fix; @@ -533,7 +458,7 @@ err2: err1: framebuffer_release(info); err: - rvfree(videomemory, videomemorysize); + vfree(videomemory); return retval; } @@ -543,7 +468,7 @@ static int vfb_remove(struct platform_device *dev) if (info) { unregister_framebuffer(info); - rvfree(videomemory, videomemorysize); + vfree(videomemory); fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c index 283d335a759f..5f0690c8fc93 100644 --- a/drivers/video/fbdev/vga16fb.c +++ b/drivers/video/fbdev/vga16fb.c @@ -85,7 +85,7 @@ static struct fb_var_screeninfo vga16fb_defined = { }; /* name should not depend on EGA/VGA */ -static struct fb_fix_screeninfo vga16fb_fix = { +static const struct fb_fix_screeninfo vga16fb_fix = { .id = "VGA16 VGA", .smem_start = VGA_FB_PHYS, .smem_len = VGA_FB_PHYS_LEN, diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 50dbaa805658..fdd3228e0678 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1844,4 +1844,53 @@ config USBPCWATCHDOG Most people will say N. +comment "Watchdog Pretimeout Governors" + +config WATCHDOG_PRETIMEOUT_GOV + bool "Enable watchdog pretimeout governors" + help + The option allows to select watchdog pretimeout governors. + +if WATCHDOG_PRETIMEOUT_GOV + +choice + prompt "Default Watchdog Pretimeout Governor" + default WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC + help + This option selects a default watchdog pretimeout governor. + The governor takes its action, if a watchdog is capable + to report a pretimeout event. + +config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP + bool "noop" + select WATCHDOG_PRETIMEOUT_GOV_NOOP + help + Use noop watchdog pretimeout governor by default. If noop + governor is selected by a user, write a short message to + the kernel log buffer and don't do any system changes. + +config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC + bool "panic" + select WATCHDOG_PRETIMEOUT_GOV_PANIC + help + Use panic watchdog pretimeout governor by default, if + a watchdog pretimeout event happens, consider that + a watchdog feeder is dead and reboot is unavoidable. + +endchoice + +config WATCHDOG_PRETIMEOUT_GOV_NOOP + tristate "Noop watchdog pretimeout governor" + help + Noop watchdog pretimeout governor, only an informational + message is added to kernel log buffer. + +config WATCHDOG_PRETIMEOUT_GOV_PANIC + tristate "Panic watchdog pretimeout governor" + help + Panic watchdog pretimeout governor, on watchdog pretimeout + event put the kernel into panic. + +endif # WATCHDOG_PRETIMEOUT_GOV + endif # WATCHDOG diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index cba00430151b..caa9f4aa492a 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -3,9 +3,15 @@ # # The WatchDog Timer Driver Core. -watchdog-objs += watchdog_core.o watchdog_dev.o obj-$(CONFIG_WATCHDOG_CORE) += watchdog.o +watchdog-objs += watchdog_core.o watchdog_dev.o + +watchdog-$(CONFIG_WATCHDOG_PRETIMEOUT_GOV) += watchdog_pretimeout.o + +obj-$(CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP) += pretimeout_noop.o +obj-$(CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC) += pretimeout_panic.o + # Only one watchdog can succeed. We probe the ISA/PCI/USB based # watchdog-cards first, then the architecture specific watchdog # drivers and then the architecture independent "softdog" driver. diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c index c9686b2fdafd..d0b59ba0f661 100644 --- a/drivers/watchdog/asm9260_wdt.c +++ b/drivers/watchdog/asm9260_wdt.c @@ -389,7 +389,6 @@ MODULE_DEVICE_TABLE(of, asm9260_wdt_of_match); static struct platform_driver asm9260_wdt_driver = { .driver = { .name = "asm9260-wdt", - .owner = THIS_MODULE, .of_match_table = asm9260_wdt_of_match, }, .probe = asm9260_wdt_probe, diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c index 835d310081e1..e2209bf5fa8a 100644 --- a/drivers/watchdog/ath79_wdt.c +++ b/drivers/watchdog/ath79_wdt.c @@ -35,6 +35,7 @@ #include <linux/err.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/uaccess.h> #define DRIVER_NAME "ath79-wdt" diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c index 4245b65d645c..e238df4d75a2 100644 --- a/drivers/watchdog/bcm7038_wdt.c +++ b/drivers/watchdog/bcm7038_wdt.c @@ -107,7 +107,7 @@ static struct watchdog_info bcm7038_wdt_info = { WDIOF_MAGICCLOSE }; -static struct watchdog_ops bcm7038_wdt_ops = { +static const struct watchdog_ops bcm7038_wdt_ops = { .owner = THIS_MODULE, .start = bcm7038_wdt_start, .stop = bcm7038_wdt_stop, diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c index 4dda9024e229..98acef72334d 100644 --- a/drivers/watchdog/cadence_wdt.c +++ b/drivers/watchdog/cadence_wdt.c @@ -269,7 +269,7 @@ static struct watchdog_info cdns_wdt_info = { }; /* Watchdog Core Ops */ -static struct watchdog_ops cdns_wdt_ops = { +static const struct watchdog_ops cdns_wdt_ops = { .owner = THIS_MODULE, .start = cdns_wdt_start, .stop = cdns_wdt_stop, @@ -424,8 +424,10 @@ static int __maybe_unused cdns_wdt_suspend(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct cdns_wdt *wdt = platform_get_drvdata(pdev); - cdns_wdt_stop(&wdt->cdns_wdt_device); - clk_disable_unprepare(wdt->clk); + if (watchdog_active(&wdt->cdns_wdt_device)) { + cdns_wdt_stop(&wdt->cdns_wdt_device); + clk_disable_unprepare(wdt->clk); + } return 0; } @@ -442,12 +444,14 @@ static int __maybe_unused cdns_wdt_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct cdns_wdt *wdt = platform_get_drvdata(pdev); - ret = clk_prepare_enable(wdt->clk); - if (ret) { - dev_err(dev, "unable to enable clock\n"); - return ret; + if (watchdog_active(&wdt->cdns_wdt_device)) { + ret = clk_prepare_enable(wdt->clk); + if (ret) { + dev_err(dev, "unable to enable clock\n"); + return ret; + } + cdns_wdt_start(&wdt->cdns_wdt_device); } - cdns_wdt_start(&wdt->cdns_wdt_device); return 0; } diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index 2acb51cf5504..3c6a3de13a1b 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -54,6 +54,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " struct dw_wdt { void __iomem *regs; struct clk *clk; + unsigned long rate; struct notifier_block restart_handler; struct watchdog_device wdd; }; @@ -72,7 +73,7 @@ static inline int dw_wdt_top_in_seconds(struct dw_wdt *dw_wdt, unsigned top) * There are 16 possible timeout values in 0..15 where the number of * cycles is 2 ^ (16 + i) and the watchdog counts down. */ - return (1U << (16 + top)) / clk_get_rate(dw_wdt->clk); + return (1U << (16 + top)) / dw_wdt->rate; } static int dw_wdt_get_top(struct dw_wdt *dw_wdt) @@ -163,7 +164,7 @@ static unsigned int dw_wdt_get_timeleft(struct watchdog_device *wdd) struct dw_wdt *dw_wdt = to_dw_wdt(wdd); return readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET) / - clk_get_rate(dw_wdt->clk); + dw_wdt->rate; } static const struct watchdog_info dw_wdt_ident = { @@ -231,6 +232,12 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) if (ret) return ret; + dw_wdt->rate = clk_get_rate(dw_wdt->clk); + if (dw_wdt->rate == 0) { + ret = -EINVAL; + goto out_disable_clk; + } + wdd = &dw_wdt->wdd; wdd->info = &dw_wdt_ident; wdd->ops = &dw_wdt_ops; diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 8f89bd8a826a..70c7194e2810 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -39,7 +39,7 @@ #include <asm/nmi.h> #include <asm/frame.h> -#define HPWDT_VERSION "1.3.3" +#define HPWDT_VERSION "1.4.0" #define SECS_TO_TICKS(secs) ((secs) * 1000 / 128) #define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000) #define HPWDT_MAX_TIMER TICKS_TO_SECS(65535) @@ -814,7 +814,8 @@ static int hpwdt_init_one(struct pci_dev *dev, * not run on a legacy ASM box. * So we only support the G5 ProLiant servers and higher. */ - if (dev->subsystem_vendor != PCI_VENDOR_ID_HP) { + if (dev->subsystem_vendor != PCI_VENDOR_ID_HP && + dev->subsystem_vendor != PCI_VENDOR_ID_HP_3PAR) { dev_warn(&dev->dev, "This server does not have an iLO2+ ASIC.\n"); return -ENODEV; @@ -823,7 +824,8 @@ static int hpwdt_init_one(struct pci_dev *dev, /* * Ignore all auxilary iLO devices with the following PCI ID */ - if (dev->subsystem_device == 0x1979) + if (dev->subsystem_vendor == PCI_VENDOR_ID_HP && + dev->subsystem_device == 0x1979) return -ENODEV; if (pci_enable_device(dev)) { diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 54cab189a763..06fcb6c8c917 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -629,7 +629,7 @@ static int iTCO_wdt_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops iTCO_wdt_pm = { +static const struct dev_pm_ops iTCO_wdt_pm = { .suspend_noirq = iTCO_wdt_suspend_noirq, .resume_noirq = iTCO_wdt_resume_noirq, }; diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index 62f346bb4348..4874b0f18650 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -24,6 +24,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/init.h> +#include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> @@ -37,18 +38,23 @@ #define IMX2_WDT_WCR 0x00 /* Control Register */ #define IMX2_WDT_WCR_WT (0xFF << 8) /* -> Watchdog Timeout Field */ -#define IMX2_WDT_WCR_WDA (1 << 5) /* -> External Reset WDOG_B */ -#define IMX2_WDT_WCR_SRS (1 << 4) /* -> Software Reset Signal */ -#define IMX2_WDT_WCR_WRE (1 << 3) /* -> WDOG Reset Enable */ -#define IMX2_WDT_WCR_WDE (1 << 2) /* -> Watchdog Enable */ -#define IMX2_WDT_WCR_WDZST (1 << 0) /* -> Watchdog timer Suspend */ +#define IMX2_WDT_WCR_WDA BIT(5) /* -> External Reset WDOG_B */ +#define IMX2_WDT_WCR_SRS BIT(4) /* -> Software Reset Signal */ +#define IMX2_WDT_WCR_WRE BIT(3) /* -> WDOG Reset Enable */ +#define IMX2_WDT_WCR_WDE BIT(2) /* -> Watchdog Enable */ +#define IMX2_WDT_WCR_WDZST BIT(0) /* -> Watchdog timer Suspend */ #define IMX2_WDT_WSR 0x02 /* Service Register */ #define IMX2_WDT_SEQ1 0x5555 /* -> service sequence 1 */ #define IMX2_WDT_SEQ2 0xAAAA /* -> service sequence 2 */ #define IMX2_WDT_WRSR 0x04 /* Reset Status Register */ -#define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */ +#define IMX2_WDT_WRSR_TOUT BIT(1) /* -> Reset due to Timeout */ + +#define IMX2_WDT_WICR 0x06 /* Interrupt Control Register */ +#define IMX2_WDT_WICR_WIE BIT(15) /* -> Interrupt Enable */ +#define IMX2_WDT_WICR_WTIS BIT(14) /* -> Interrupt Status */ +#define IMX2_WDT_WICR_WICT 0xFF /* -> Interrupt Count Timeout */ #define IMX2_WDT_WMCR 0x08 /* Misc Register */ @@ -80,6 +86,12 @@ static const struct watchdog_info imx2_wdt_info = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, }; +static const struct watchdog_info imx2_wdt_pretimeout_info = { + .identity = "imx2+ watchdog", + .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | + WDIOF_PRETIMEOUT, +}; + static int imx2_wdt_restart(struct watchdog_device *wdog, unsigned long action, void *data) { @@ -169,6 +181,35 @@ static int imx2_wdt_set_timeout(struct watchdog_device *wdog, return 0; } +static int imx2_wdt_set_pretimeout(struct watchdog_device *wdog, + unsigned int new_pretimeout) +{ + struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); + + if (new_pretimeout >= IMX2_WDT_MAX_TIME) + return -EINVAL; + + wdog->pretimeout = new_pretimeout; + + regmap_update_bits(wdev->regmap, IMX2_WDT_WICR, + IMX2_WDT_WICR_WIE | IMX2_WDT_WICR_WICT, + IMX2_WDT_WICR_WIE | (new_pretimeout << 1)); + return 0; +} + +static irqreturn_t imx2_wdt_isr(int irq, void *wdog_arg) +{ + struct watchdog_device *wdog = wdog_arg; + struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); + + regmap_write_bits(wdev->regmap, IMX2_WDT_WICR, + IMX2_WDT_WICR_WTIS, IMX2_WDT_WICR_WTIS); + + watchdog_notify_pretimeout(wdog); + + return IRQ_HANDLED; +} + static int imx2_wdt_start(struct watchdog_device *wdog) { struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); @@ -188,6 +229,7 @@ static const struct watchdog_ops imx2_wdt_ops = { .start = imx2_wdt_start, .ping = imx2_wdt_ping, .set_timeout = imx2_wdt_set_timeout, + .set_pretimeout = imx2_wdt_set_pretimeout, .restart = imx2_wdt_restart, }; @@ -236,6 +278,12 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) wdog->max_hw_heartbeat_ms = IMX2_WDT_MAX_TIME * 1000; wdog->parent = &pdev->dev; + ret = platform_get_irq(pdev, 0); + if (ret > 0) + if (!devm_request_irq(&pdev->dev, ret, imx2_wdt_isr, 0, + dev_name(&pdev->dev), wdog)) + wdog->info = &imx2_wdt_pretimeout_info; + ret = clk_prepare_enable(wdev->clk); if (ret) return ret; diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c index 5bf931ce1353..8e302d0e346c 100644 --- a/drivers/watchdog/kempld_wdt.c +++ b/drivers/watchdog/kempld_wdt.c @@ -430,7 +430,7 @@ static struct watchdog_info kempld_wdt_info = { WDIOF_PRETIMEOUT }; -static struct watchdog_ops kempld_wdt_ops = { +static const struct watchdog_ops kempld_wdt_ops = { .owner = THIS_MODULE, .start = kempld_wdt_start, .stop = kempld_wdt_stop, diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c index 4a2290f900a8..d5735c12067d 100644 --- a/drivers/watchdog/mt7621_wdt.c +++ b/drivers/watchdog/mt7621_wdt.c @@ -139,7 +139,6 @@ static int mt7621_wdt_probe(struct platform_device *pdev) if (!IS_ERR(mt7621_wdt_reset)) reset_control_deassert(mt7621_wdt_reset); - mt7621_wdt_dev.dev = &pdev->dev; mt7621_wdt_dev.bootstatus = mt7621_wdt_bootcause(); watchdog_init_timeout(&mt7621_wdt_dev, mt7621_wdt_dev.max_timeout, diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c index b2e1b4cbbdc1..fae7fe929ea3 100644 --- a/drivers/watchdog/of_xilinx_wdt.c +++ b/drivers/watchdog/of_xilinx_wdt.c @@ -10,6 +10,7 @@ * 2 of the License, or (at your option) any later version. */ +#include <linux/clk.h> #include <linux/err.h> #include <linux/module.h> #include <linux/types.h> @@ -45,6 +46,7 @@ struct xwdt_device { u32 wdt_interval; spinlock_t spinlock; struct watchdog_device xilinx_wdt_wdd; + struct clk *clk; }; static int xilinx_wdt_start(struct watchdog_device *wdd) @@ -195,16 +197,30 @@ static int xwdt_probe(struct platform_device *pdev) spin_lock_init(&xdev->spinlock); watchdog_set_drvdata(xilinx_wdt_wdd, xdev); + xdev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(xdev->clk)) { + if (PTR_ERR(xdev->clk) == -ENOENT) + xdev->clk = NULL; + else + return PTR_ERR(xdev->clk); + } + + rc = clk_prepare_enable(xdev->clk); + if (rc) { + dev_err(&pdev->dev, "unable to enable clock\n"); + return rc; + } + rc = xwdt_selftest(xdev); if (rc == XWT_TIMER_FAILED) { dev_err(&pdev->dev, "SelfTest routine error\n"); - return rc; + goto err_clk_disable; } rc = watchdog_register_device(xilinx_wdt_wdd); if (rc) { dev_err(&pdev->dev, "Cannot register watchdog (err=%d)\n", rc); - return rc; + goto err_clk_disable; } dev_info(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds\n", @@ -213,6 +229,10 @@ static int xwdt_probe(struct platform_device *pdev) platform_set_drvdata(pdev, xdev); return 0; +err_clk_disable: + clk_disable_unprepare(xdev->clk); + + return rc; } static int xwdt_remove(struct platform_device *pdev) @@ -220,6 +240,7 @@ static int xwdt_remove(struct platform_device *pdev) struct xwdt_device *xdev = platform_get_drvdata(pdev); watchdog_unregister_device(&xdev->xilinx_wdt_wdd); + clk_disable_unprepare(xdev->clk); return 0; } diff --git a/drivers/watchdog/pretimeout_noop.c b/drivers/watchdog/pretimeout_noop.c new file mode 100644 index 000000000000..85f5299d197c --- /dev/null +++ b/drivers/watchdog/pretimeout_noop.c @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2015-2016 Mentor Graphics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/module.h> +#include <linux/printk.h> +#include <linux/watchdog.h> + +#include "watchdog_pretimeout.h" + +/** + * pretimeout_noop - No operation on watchdog pretimeout event + * @wdd - watchdog_device + * + * This function prints a message about pretimeout to kernel log. + */ +static void pretimeout_noop(struct watchdog_device *wdd) +{ + pr_alert("watchdog%d: pretimeout event\n", wdd->id); +} + +static struct watchdog_governor watchdog_gov_noop = { + .name = "noop", + .pretimeout = pretimeout_noop, +}; + +static int __init watchdog_gov_noop_register(void) +{ + return watchdog_register_governor(&watchdog_gov_noop); +} + +static void __exit watchdog_gov_noop_unregister(void) +{ + watchdog_unregister_governor(&watchdog_gov_noop); +} +module_init(watchdog_gov_noop_register); +module_exit(watchdog_gov_noop_unregister); + +MODULE_AUTHOR("Vladimir Zapolskiy <vladimir_zapolskiy@mentor.com>"); +MODULE_DESCRIPTION("Panic watchdog pretimeout governor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/pretimeout_panic.c b/drivers/watchdog/pretimeout_panic.c new file mode 100644 index 000000000000..0c197a1c97f4 --- /dev/null +++ b/drivers/watchdog/pretimeout_panic.c @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2015-2016 Mentor Graphics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/watchdog.h> + +#include "watchdog_pretimeout.h" + +/** + * pretimeout_panic - Panic on watchdog pretimeout event + * @wdd - watchdog_device + * + * Panic, watchdog has not been fed till pretimeout event. + */ +static void pretimeout_panic(struct watchdog_device *wdd) +{ + panic("watchdog pretimeout event\n"); +} + +static struct watchdog_governor watchdog_gov_panic = { + .name = "panic", + .pretimeout = pretimeout_panic, +}; + +static int __init watchdog_gov_panic_register(void) +{ + return watchdog_register_governor(&watchdog_gov_panic); +} + +static void __exit watchdog_gov_panic_unregister(void) +{ + watchdog_unregister_governor(&watchdog_gov_panic); +} +module_init(watchdog_gov_panic_register); +module_exit(watchdog_gov_panic_unregister); + +MODULE_AUTHOR("Vladimir Zapolskiy <vladimir_zapolskiy@mentor.com>"); +MODULE_DESCRIPTION("Panic watchdog pretimeout governor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c index d1c12278cb6a..0805ee2acd7a 100644 --- a/drivers/watchdog/rn5t618_wdt.c +++ b/drivers/watchdog/rn5t618_wdt.c @@ -136,7 +136,7 @@ static struct watchdog_info rn5t618_wdt_info = { .identity = DRIVER_NAME, }; -static struct watchdog_ops rn5t618_wdt_ops = { +static const struct watchdog_ops rn5t618_wdt_ops = { .owner = THIS_MODULE, .start = rn5t618_wdt_start, .stop = rn5t618_wdt_stop, diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c index 1967919ae743..14b4fd428fff 100644 --- a/drivers/watchdog/rt2880_wdt.c +++ b/drivers/watchdog/rt2880_wdt.c @@ -158,7 +158,6 @@ static int rt288x_wdt_probe(struct platform_device *pdev) rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE; - rt288x_wdt_dev.dev = &pdev->dev; rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause(); rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq); rt288x_wdt_dev.parent = &pdev->dev; diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index b067edf246df..c7bdc986dca1 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c @@ -72,10 +72,27 @@ static void softdog_fire(unsigned long data) static struct timer_list softdog_ticktock = TIMER_INITIALIZER(softdog_fire, 0, 0); +static struct watchdog_device softdog_dev; + +static void softdog_pretimeout(unsigned long data) +{ + watchdog_notify_pretimeout(&softdog_dev); +} + +static struct timer_list softdog_preticktock = + TIMER_INITIALIZER(softdog_pretimeout, 0, 0); + static int softdog_ping(struct watchdog_device *w) { if (!mod_timer(&softdog_ticktock, jiffies + (w->timeout * HZ))) __module_get(THIS_MODULE); + + if (w->pretimeout) + mod_timer(&softdog_preticktock, jiffies + + (w->timeout - w->pretimeout) * HZ); + else + del_timer(&softdog_preticktock); + return 0; } @@ -84,15 +101,18 @@ static int softdog_stop(struct watchdog_device *w) if (del_timer(&softdog_ticktock)) module_put(THIS_MODULE); + del_timer(&softdog_preticktock); + return 0; } static struct watchdog_info softdog_info = { .identity = "Software Watchdog", - .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | + WDIOF_PRETIMEOUT, }; -static struct watchdog_ops softdog_ops = { +static const struct watchdog_ops softdog_ops = { .owner = THIS_MODULE, .start = softdog_ping, .stop = softdog_stop, diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c index 14e9badf2bfa..e6100e447dd8 100644 --- a/drivers/watchdog/st_lpc_wdt.c +++ b/drivers/watchdog/st_lpc_wdt.c @@ -52,27 +52,6 @@ struct st_wdog { bool warm_reset; }; -static struct st_wdog_syscfg stid127_syscfg = { - .reset_type_reg = 0x004, - .reset_type_mask = BIT(2), - .enable_reg = 0x000, - .enable_mask = BIT(2), -}; - -static struct st_wdog_syscfg stih415_syscfg = { - .reset_type_reg = 0x0B8, - .reset_type_mask = BIT(6), - .enable_reg = 0x0B4, - .enable_mask = BIT(7), -}; - -static struct st_wdog_syscfg stih416_syscfg = { - .reset_type_reg = 0x88C, - .reset_type_mask = BIT(6), - .enable_reg = 0x888, - .enable_mask = BIT(7), -}; - static struct st_wdog_syscfg stih407_syscfg = { .enable_reg = 0x204, .enable_mask = BIT(19), @@ -83,18 +62,6 @@ static const struct of_device_id st_wdog_match[] = { .compatible = "st,stih407-lpc", .data = &stih407_syscfg, }, - { - .compatible = "st,stih416-lpc", - .data = &stih416_syscfg, - }, - { - .compatible = "st,stih415-lpc", - .data = &stih415_syscfg, - }, - { - .compatible = "st,stid127-lpc", - .data = &stid127_syscfg, - }, {}, }; MODULE_DEVICE_TABLE(of, st_wdog_match); diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c index 9ec57608da82..2d53c3f9394f 100644 --- a/drivers/watchdog/tegra_wdt.c +++ b/drivers/watchdog/tegra_wdt.c @@ -178,7 +178,7 @@ static const struct watchdog_info tegra_wdt_info = { .identity = "Tegra Watchdog", }; -static struct watchdog_ops tegra_wdt_ops = { +static const struct watchdog_ops tegra_wdt_ops = { .owner = THIS_MODULE, .start = tegra_wdt_start, .stop = tegra_wdt_stop, diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c index c2da880292bc..6f7a9deb27d0 100644 --- a/drivers/watchdog/txx9wdt.c +++ b/drivers/watchdog/txx9wdt.c @@ -112,7 +112,7 @@ static int __init txx9wdt_probe(struct platform_device *dev) txx9_imclk = NULL; goto exit; } - ret = clk_enable(txx9_imclk); + ret = clk_prepare_enable(txx9_imclk); if (ret) { clk_put(txx9_imclk); txx9_imclk = NULL; @@ -144,7 +144,7 @@ static int __init txx9wdt_probe(struct platform_device *dev) return 0; exit: if (txx9_imclk) { - clk_disable(txx9_imclk); + clk_disable_unprepare(txx9_imclk); clk_put(txx9_imclk); } return ret; @@ -153,7 +153,7 @@ exit: static int __exit txx9wdt_remove(struct platform_device *dev) { watchdog_unregister_device(&txx9wdt); - clk_disable(txx9_imclk); + clk_disable_unprepare(txx9_imclk); clk_put(txx9_imclk); return 0; } diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index 09e8003039dc..ef2ecaf53a14 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -302,7 +302,7 @@ static struct watchdog_info wdt_info = { .identity = "W83627HF Watchdog", }; -static struct watchdog_ops wdt_ops = { +static const struct watchdog_ops wdt_ops = { .owner = THIS_MODULE, .start = wdt_start, .stop = wdt_stop, diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index 6abb83cd7681..74265b2f806c 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -349,7 +349,7 @@ int devm_watchdog_register_device(struct device *dev, struct watchdog_device **rcwdd; int ret; - rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*wdd), + rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*rcwdd), GFP_KERNEL); if (!rcwdd) return -ENOMEM; diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 040bf8382f46..32930a073a12 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -49,6 +49,7 @@ #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ #include "watchdog_core.h" +#include "watchdog_pretimeout.h" /* * struct watchdog_core_data - watchdog core internal data @@ -335,10 +336,14 @@ static int watchdog_set_timeout(struct watchdog_device *wdd, if (watchdog_timeout_invalid(wdd, timeout)) return -EINVAL; - if (wdd->ops->set_timeout) + if (wdd->ops->set_timeout) { err = wdd->ops->set_timeout(wdd, timeout); - else + } else { wdd->timeout = timeout; + /* Disable pretimeout if it doesn't fit the new timeout */ + if (wdd->pretimeout >= wdd->timeout) + wdd->pretimeout = 0; + } watchdog_update_worker(wdd); @@ -346,6 +351,31 @@ static int watchdog_set_timeout(struct watchdog_device *wdd, } /* + * watchdog_set_pretimeout: set the watchdog timer pretimeout + * @wdd: the watchdog device to set the timeout for + * @timeout: pretimeout to set in seconds + */ + +static int watchdog_set_pretimeout(struct watchdog_device *wdd, + unsigned int timeout) +{ + int err = 0; + + if (!(wdd->info->options & WDIOF_PRETIMEOUT)) + return -EOPNOTSUPP; + + if (watchdog_pretimeout_invalid(wdd, timeout)) + return -EINVAL; + + if (wdd->ops->set_pretimeout) + err = wdd->ops->set_pretimeout(wdd, timeout); + else + wdd->pretimeout = timeout; + + return err; +} + +/* * watchdog_get_timeleft: wrapper to get the time left before a reboot * @wdd: the watchdog device to get the remaining time from * @timeleft: the time that's left @@ -429,6 +459,15 @@ static ssize_t timeout_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(timeout); +static ssize_t pretimeout_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct watchdog_device *wdd = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", wdd->pretimeout); +} +static DEVICE_ATTR_RO(pretimeout); + static ssize_t identity_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -450,6 +489,36 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(state); +static ssize_t pretimeout_available_governors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return watchdog_pretimeout_available_governors_get(buf); +} +static DEVICE_ATTR_RO(pretimeout_available_governors); + +static ssize_t pretimeout_governor_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct watchdog_device *wdd = dev_get_drvdata(dev); + + return watchdog_pretimeout_governor_get(wdd, buf); +} + +static ssize_t pretimeout_governor_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct watchdog_device *wdd = dev_get_drvdata(dev); + int ret = watchdog_pretimeout_governor_set(wdd, buf); + + if (!ret) + ret = count; + + return ret; +} +static DEVICE_ATTR_RW(pretimeout_governor); + static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr, int n) { @@ -459,6 +528,14 @@ static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr, if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft) mode = 0; + else if (attr == &dev_attr_pretimeout.attr && + !(wdd->info->options & WDIOF_PRETIMEOUT)) + mode = 0; + else if ((attr == &dev_attr_pretimeout_governor.attr || + attr == &dev_attr_pretimeout_available_governors.attr) && + (!(wdd->info->options & WDIOF_PRETIMEOUT) || + !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV))) + mode = 0; return mode; } @@ -466,10 +543,13 @@ static struct attribute *wdt_attrs[] = { &dev_attr_state.attr, &dev_attr_identity.attr, &dev_attr_timeout.attr, + &dev_attr_pretimeout.attr, &dev_attr_timeleft.attr, &dev_attr_bootstatus.attr, &dev_attr_status.attr, &dev_attr_nowayout.attr, + &dev_attr_pretimeout_governor.attr, + &dev_attr_pretimeout_available_governors.attr, NULL, }; @@ -646,6 +726,16 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd, break; err = put_user(val, p); break; + case WDIOC_SETPRETIMEOUT: + if (get_user(val, p)) { + err = -EFAULT; + break; + } + err = watchdog_set_pretimeout(wdd, val); + break; + case WDIOC_GETPRETIMEOUT: + err = put_user(wdd->pretimeout, p); + break; default: err = -ENOTTY; break; @@ -937,6 +1027,12 @@ int watchdog_dev_register(struct watchdog_device *wdd) return PTR_ERR(dev); } + ret = watchdog_register_pretimeout(wdd); + if (ret) { + device_destroy(&watchdog_class, devno); + watchdog_cdev_unregister(wdd); + } + return ret; } @@ -950,6 +1046,7 @@ int watchdog_dev_register(struct watchdog_device *wdd) void watchdog_dev_unregister(struct watchdog_device *wdd) { + watchdog_unregister_pretimeout(wdd); device_destroy(&watchdog_class, wdd->wd_data->cdev.dev); watchdog_cdev_unregister(wdd); } diff --git a/drivers/watchdog/watchdog_pretimeout.c b/drivers/watchdog/watchdog_pretimeout.c new file mode 100644 index 000000000000..9db07bfb4334 --- /dev/null +++ b/drivers/watchdog/watchdog_pretimeout.c @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2015-2016 Mentor Graphics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/watchdog.h> + +#include "watchdog_pretimeout.h" + +/* Default watchdog pretimeout governor */ +static struct watchdog_governor *default_gov; + +/* The spinlock protects default_gov, wdd->gov and pretimeout_list */ +static DEFINE_SPINLOCK(pretimeout_lock); + +/* List of watchdog devices, which can generate a pretimeout event */ +static LIST_HEAD(pretimeout_list); + +struct watchdog_pretimeout { + struct watchdog_device *wdd; + struct list_head entry; +}; + +/* The mutex protects governor list and serializes external interfaces */ +static DEFINE_MUTEX(governor_lock); + +/* List of the registered watchdog pretimeout governors */ +static LIST_HEAD(governor_list); + +struct governor_priv { + struct watchdog_governor *gov; + struct list_head entry; +}; + +static struct governor_priv *find_governor_by_name(const char *gov_name) +{ + struct governor_priv *priv; + + list_for_each_entry(priv, &governor_list, entry) + if (sysfs_streq(gov_name, priv->gov->name)) + return priv; + + return NULL; +} + +int watchdog_pretimeout_available_governors_get(char *buf) +{ + struct governor_priv *priv; + int count = 0; + + mutex_lock(&governor_lock); + + list_for_each_entry(priv, &governor_list, entry) + count += sprintf(buf + count, "%s\n", priv->gov->name); + + mutex_unlock(&governor_lock); + + return count; +} + +int watchdog_pretimeout_governor_get(struct watchdog_device *wdd, char *buf) +{ + int count = 0; + + spin_lock_irq(&pretimeout_lock); + if (wdd->gov) + count = sprintf(buf, "%s\n", wdd->gov->name); + spin_unlock_irq(&pretimeout_lock); + + return count; +} + +int watchdog_pretimeout_governor_set(struct watchdog_device *wdd, + const char *buf) +{ + struct governor_priv *priv; + + mutex_lock(&governor_lock); + + priv = find_governor_by_name(buf); + if (!priv) { + mutex_unlock(&governor_lock); + return -EINVAL; + } + + spin_lock_irq(&pretimeout_lock); + wdd->gov = priv->gov; + spin_unlock_irq(&pretimeout_lock); + + mutex_unlock(&governor_lock); + + return 0; +} + +void watchdog_notify_pretimeout(struct watchdog_device *wdd) +{ + unsigned long flags; + + spin_lock_irqsave(&pretimeout_lock, flags); + if (!wdd->gov) { + spin_unlock_irqrestore(&pretimeout_lock, flags); + return; + } + + wdd->gov->pretimeout(wdd); + spin_unlock_irqrestore(&pretimeout_lock, flags); +} +EXPORT_SYMBOL_GPL(watchdog_notify_pretimeout); + +int watchdog_register_governor(struct watchdog_governor *gov) +{ + struct watchdog_pretimeout *p; + struct governor_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mutex_lock(&governor_lock); + + if (find_governor_by_name(gov->name)) { + mutex_unlock(&governor_lock); + kfree(priv); + return -EBUSY; + } + + priv->gov = gov; + list_add(&priv->entry, &governor_list); + + if (!strncmp(gov->name, WATCHDOG_PRETIMEOUT_DEFAULT_GOV, + WATCHDOG_GOV_NAME_MAXLEN)) { + spin_lock_irq(&pretimeout_lock); + default_gov = gov; + + list_for_each_entry(p, &pretimeout_list, entry) + if (!p->wdd->gov) + p->wdd->gov = default_gov; + spin_unlock_irq(&pretimeout_lock); + } + + mutex_unlock(&governor_lock); + + return 0; +} +EXPORT_SYMBOL(watchdog_register_governor); + +void watchdog_unregister_governor(struct watchdog_governor *gov) +{ + struct watchdog_pretimeout *p; + struct governor_priv *priv, *t; + + mutex_lock(&governor_lock); + + list_for_each_entry_safe(priv, t, &governor_list, entry) { + if (priv->gov == gov) { + list_del(&priv->entry); + kfree(priv); + break; + } + } + + spin_lock_irq(&pretimeout_lock); + list_for_each_entry(p, &pretimeout_list, entry) + if (p->wdd->gov == gov) + p->wdd->gov = default_gov; + spin_unlock_irq(&pretimeout_lock); + + mutex_unlock(&governor_lock); +} +EXPORT_SYMBOL(watchdog_unregister_governor); + +int watchdog_register_pretimeout(struct watchdog_device *wdd) +{ + struct watchdog_pretimeout *p; + + if (!(wdd->info->options & WDIOF_PRETIMEOUT)) + return 0; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + + spin_lock_irq(&pretimeout_lock); + list_add(&p->entry, &pretimeout_list); + p->wdd = wdd; + wdd->gov = default_gov; + spin_unlock_irq(&pretimeout_lock); + + return 0; +} + +void watchdog_unregister_pretimeout(struct watchdog_device *wdd) +{ + struct watchdog_pretimeout *p, *t; + + if (!(wdd->info->options & WDIOF_PRETIMEOUT)) + return; + + spin_lock_irq(&pretimeout_lock); + wdd->gov = NULL; + + list_for_each_entry_safe(p, t, &pretimeout_list, entry) { + if (p->wdd == wdd) { + list_del(&p->entry); + break; + } + } + spin_unlock_irq(&pretimeout_lock); + + kfree(p); +} diff --git a/drivers/watchdog/watchdog_pretimeout.h b/drivers/watchdog/watchdog_pretimeout.h new file mode 100644 index 000000000000..a5a32b39c56d --- /dev/null +++ b/drivers/watchdog/watchdog_pretimeout.h @@ -0,0 +1,60 @@ +#ifndef __WATCHDOG_PRETIMEOUT_H +#define __WATCHDOG_PRETIMEOUT_H + +#define WATCHDOG_GOV_NAME_MAXLEN 20 + +struct watchdog_device; + +struct watchdog_governor { + const char name[WATCHDOG_GOV_NAME_MAXLEN]; + void (*pretimeout)(struct watchdog_device *wdd); +}; + +#if IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV) +/* Interfaces to watchdog pretimeout governors */ +int watchdog_register_governor(struct watchdog_governor *gov); +void watchdog_unregister_governor(struct watchdog_governor *gov); + +/* Interfaces to watchdog_dev.c */ +int watchdog_register_pretimeout(struct watchdog_device *wdd); +void watchdog_unregister_pretimeout(struct watchdog_device *wdd); +int watchdog_pretimeout_available_governors_get(char *buf); +int watchdog_pretimeout_governor_get(struct watchdog_device *wdd, char *buf); +int watchdog_pretimeout_governor_set(struct watchdog_device *wdd, + const char *buf); + +#if IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP) +#define WATCHDOG_PRETIMEOUT_DEFAULT_GOV "noop" +#elif IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC) +#define WATCHDOG_PRETIMEOUT_DEFAULT_GOV "panic" +#endif + +#else +static inline int watchdog_register_pretimeout(struct watchdog_device *wdd) +{ + return 0; +} + +static inline void watchdog_unregister_pretimeout(struct watchdog_device *wdd) +{ +} + +static inline int watchdog_pretimeout_available_governors_get(char *buf) +{ + return -EINVAL; +} + +static inline int watchdog_pretimeout_governor_get(struct watchdog_device *wdd, + char *buf) +{ + return -EINVAL; +} + +static inline int watchdog_pretimeout_governor_set(struct watchdog_device *wdd, + const char *buf) +{ + return -EINVAL; +} +#endif + +#endif diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c index fa1efef3c96e..b4e0cea5a64e 100644 --- a/drivers/watchdog/ziirave_wdt.c +++ b/drivers/watchdog/ziirave_wdt.c @@ -18,7 +18,10 @@ * GNU General Public License for more details. */ +#include <linux/delay.h> #include <linux/i2c.h> +#include <linux/ihex.h> +#include <linux/firmware.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -36,6 +39,8 @@ #define ZIIRAVE_STATE_OFF 0x1 #define ZIIRAVE_STATE_ON 0x2 +#define ZIIRAVE_FW_NAME "ziirave_wdt.fw" + static char *ziirave_reasons[] = {"power cycle", "hw watchdog", NULL, NULL, "host request", NULL, "illegal configuration", "illegal instruction", "illegal trap", @@ -50,12 +55,35 @@ static char *ziirave_reasons[] = {"power cycle", "hw watchdog", NULL, NULL, #define ZIIRAVE_WDT_PING 0x9 #define ZIIRAVE_WDT_RESET_DURATION 0xa +#define ZIIRAVE_FIRM_PKT_TOTAL_SIZE 20 +#define ZIIRAVE_FIRM_PKT_DATA_SIZE 16 +#define ZIIRAVE_FIRM_FLASH_MEMORY_START 0x1600 +#define ZIIRAVE_FIRM_FLASH_MEMORY_END 0x2bbf + +/* Received and ready for next Download packet. */ +#define ZIIRAVE_FIRM_DOWNLOAD_ACK 1 +/* Currently writing to flash. Retry Download status in a moment! */ +#define ZIIRAVE_FIRM_DOWNLOAD_BUSY 2 + +/* Wait for ACK timeout in ms */ +#define ZIIRAVE_FIRM_WAIT_FOR_ACK_TIMEOUT 50 + +/* Firmware commands */ +#define ZIIRAVE_CMD_DOWNLOAD_START 0x10 +#define ZIIRAVE_CMD_DOWNLOAD_END 0x11 +#define ZIIRAVE_CMD_DOWNLOAD_SET_READ_ADDR 0x12 +#define ZIIRAVE_CMD_DOWNLOAD_READ_BYTE 0x13 +#define ZIIRAVE_CMD_RESET_PROCESSOR 0x0b +#define ZIIRAVE_CMD_JUMP_TO_BOOTLOADER 0x0c +#define ZIIRAVE_CMD_DOWNLOAD_PACKET 0x0e + struct ziirave_wdt_rev { unsigned char major; unsigned char minor; }; struct ziirave_wdt_data { + struct mutex sysfs_mutex; struct watchdog_device wdd; struct ziirave_wdt_rev bootloader_rev; struct ziirave_wdt_rev firmware_rev; @@ -146,6 +174,293 @@ static unsigned int ziirave_wdt_get_timeleft(struct watchdog_device *wdd) return ret; } +static int ziirave_firm_wait_for_ack(struct watchdog_device *wdd) +{ + struct i2c_client *client = to_i2c_client(wdd->parent); + int ret; + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(ZIIRAVE_FIRM_WAIT_FOR_ACK_TIMEOUT); + do { + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + usleep_range(5000, 10000); + + ret = i2c_smbus_read_byte(client); + if (ret < 0) { + dev_err(&client->dev, "Failed to read byte\n"); + return ret; + } + } while (ret == ZIIRAVE_FIRM_DOWNLOAD_BUSY); + + return ret == ZIIRAVE_FIRM_DOWNLOAD_ACK ? 0 : -EIO; +} + +static int ziirave_firm_set_read_addr(struct watchdog_device *wdd, u16 addr) +{ + struct i2c_client *client = to_i2c_client(wdd->parent); + u8 address[2]; + + address[0] = addr & 0xff; + address[1] = (addr >> 8) & 0xff; + + return i2c_smbus_write_block_data(client, + ZIIRAVE_CMD_DOWNLOAD_SET_READ_ADDR, + ARRAY_SIZE(address), address); +} + +static int ziirave_firm_write_block_data(struct watchdog_device *wdd, + u8 command, u8 length, const u8 *data, + bool wait_for_ack) +{ + struct i2c_client *client = to_i2c_client(wdd->parent); + int ret; + + ret = i2c_smbus_write_block_data(client, command, length, data); + if (ret) { + dev_err(&client->dev, + "Failed to send command 0x%02x: %d\n", command, ret); + return ret; + } + + if (wait_for_ack) + ret = ziirave_firm_wait_for_ack(wdd); + + return ret; +} + +static int ziirave_firm_write_byte(struct watchdog_device *wdd, u8 command, + u8 byte, bool wait_for_ack) +{ + return ziirave_firm_write_block_data(wdd, command, 1, &byte, + wait_for_ack); +} + +/* + * ziirave_firm_write_pkt() - Build and write a firmware packet + * + * A packet to send to the firmware is composed by following bytes: + * Length | Addr0 | Addr1 | Data0 .. Data15 | Checksum | + * Where, + * Length: A data byte containing the length of the data. + * Addr0: Low byte of the address. + * Addr1: High byte of the address. + * Data0 .. Data15: Array of 16 bytes of data. + * Checksum: Checksum byte to verify data integrity. + */ +static int ziirave_firm_write_pkt(struct watchdog_device *wdd, + const struct ihex_binrec *rec) +{ + struct i2c_client *client = to_i2c_client(wdd->parent); + u8 i, checksum = 0, packet[ZIIRAVE_FIRM_PKT_TOTAL_SIZE]; + int ret; + u16 addr; + + memset(packet, 0, ARRAY_SIZE(packet)); + + /* Packet length */ + packet[0] = (u8)be16_to_cpu(rec->len); + /* Packet address */ + addr = (be32_to_cpu(rec->addr) & 0xffff) >> 1; + packet[1] = addr & 0xff; + packet[2] = (addr & 0xff00) >> 8; + + /* Packet data */ + if (be16_to_cpu(rec->len) > ZIIRAVE_FIRM_PKT_DATA_SIZE) + return -EMSGSIZE; + memcpy(packet + 3, rec->data, be16_to_cpu(rec->len)); + + /* Packet checksum */ + for (i = 0; i < ZIIRAVE_FIRM_PKT_TOTAL_SIZE - 1; i++) + checksum += packet[i]; + packet[ZIIRAVE_FIRM_PKT_TOTAL_SIZE - 1] = checksum; + + ret = ziirave_firm_write_block_data(wdd, ZIIRAVE_CMD_DOWNLOAD_PACKET, + ARRAY_SIZE(packet), packet, true); + if (ret) + dev_err(&client->dev, + "Failed to write firmware packet at address 0x%04x: %d\n", + addr, ret); + + return ret; +} + +static int ziirave_firm_verify(struct watchdog_device *wdd, + const struct firmware *fw) +{ + struct i2c_client *client = to_i2c_client(wdd->parent); + const struct ihex_binrec *rec; + int i, ret; + u8 data[ZIIRAVE_FIRM_PKT_DATA_SIZE]; + u16 addr; + + for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) { + /* Zero length marks end of records */ + if (!be16_to_cpu(rec->len)) + break; + + addr = (be32_to_cpu(rec->addr) & 0xffff) >> 1; + if (addr < ZIIRAVE_FIRM_FLASH_MEMORY_START || + addr > ZIIRAVE_FIRM_FLASH_MEMORY_END) + continue; + + ret = ziirave_firm_set_read_addr(wdd, addr); + if (ret) { + dev_err(&client->dev, + "Failed to send SET_READ_ADDR command: %d\n", + ret); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(data); i++) { + ret = i2c_smbus_read_byte_data(client, + ZIIRAVE_CMD_DOWNLOAD_READ_BYTE); + if (ret < 0) { + dev_err(&client->dev, + "Failed to READ DATA: %d\n", ret); + return ret; + } + data[i] = ret; + } + + if (memcmp(data, rec->data, be16_to_cpu(rec->len))) { + dev_err(&client->dev, + "Firmware mismatch at address 0x%04x\n", addr); + return -EINVAL; + } + } + + return 0; +} + +static int ziirave_firm_upload(struct watchdog_device *wdd, + const struct firmware *fw) +{ + struct i2c_client *client = to_i2c_client(wdd->parent); + int ret, words_till_page_break; + const struct ihex_binrec *rec; + struct ihex_binrec *rec_new; + + ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_JUMP_TO_BOOTLOADER, 1, + false); + if (ret) + return ret; + + msleep(500); + + ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_DOWNLOAD_START, 1, true); + if (ret) + return ret; + + msleep(500); + + for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) { + /* Zero length marks end of records */ + if (!be16_to_cpu(rec->len)) + break; + + /* Check max data size */ + if (be16_to_cpu(rec->len) > ZIIRAVE_FIRM_PKT_DATA_SIZE) { + dev_err(&client->dev, "Firmware packet too long (%d)\n", + be16_to_cpu(rec->len)); + return -EMSGSIZE; + } + + /* Calculate words till page break */ + words_till_page_break = (64 - ((be32_to_cpu(rec->addr) >> 1) & + 0x3f)); + if ((be16_to_cpu(rec->len) >> 1) > words_till_page_break) { + /* + * Data in passes page boundary, so we need to split in + * two blocks of data. Create a packet with the first + * block of data. + */ + rec_new = kzalloc(sizeof(struct ihex_binrec) + + (words_till_page_break << 1), + GFP_KERNEL); + if (!rec_new) + return -ENOMEM; + + rec_new->len = cpu_to_be16(words_till_page_break << 1); + rec_new->addr = rec->addr; + memcpy(rec_new->data, rec->data, + be16_to_cpu(rec_new->len)); + + ret = ziirave_firm_write_pkt(wdd, rec_new); + kfree(rec_new); + if (ret) + return ret; + + /* Create a packet with the second block of data */ + rec_new = kzalloc(sizeof(struct ihex_binrec) + + be16_to_cpu(rec->len) - + (words_till_page_break << 1), + GFP_KERNEL); + if (!rec_new) + return -ENOMEM; + + /* Remaining bytes */ + rec_new->len = rec->len - + cpu_to_be16(words_till_page_break << 1); + + rec_new->addr = cpu_to_be32(be32_to_cpu(rec->addr) + + (words_till_page_break << 1)); + + memcpy(rec_new->data, + rec->data + (words_till_page_break << 1), + be16_to_cpu(rec_new->len)); + + ret = ziirave_firm_write_pkt(wdd, rec_new); + kfree(rec_new); + if (ret) + return ret; + } else { + ret = ziirave_firm_write_pkt(wdd, rec); + if (ret) + return ret; + } + } + + /* For end of download, the length field will be set to 0 */ + rec_new = kzalloc(sizeof(struct ihex_binrec) + 1, GFP_KERNEL); + if (!rec_new) + return -ENOMEM; + + ret = ziirave_firm_write_pkt(wdd, rec_new); + kfree(rec_new); + if (ret) { + dev_err(&client->dev, "Failed to send EMPTY packet: %d\n", ret); + return ret; + } + + /* This sleep seems to be required */ + msleep(20); + + /* Start firmware verification */ + ret = ziirave_firm_verify(wdd, fw); + if (ret) { + dev_err(&client->dev, + "Failed to verify firmware: %d\n", ret); + return ret; + } + + /* End download operation */ + ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_DOWNLOAD_END, 1, false); + if (ret) + return ret; + + /* Reset the processor */ + ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_RESET_PROCESSOR, 1, + false); + if (ret) + return ret; + + msleep(500); + + return 0; +} + static const struct watchdog_info ziirave_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = "Zodiac RAVE Watchdog", @@ -166,9 +481,18 @@ static ssize_t ziirave_wdt_sysfs_show_firm(struct device *dev, { struct i2c_client *client = to_i2c_client(dev->parent); struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client); + int ret; + + ret = mutex_lock_interruptible(&w_priv->sysfs_mutex); + if (ret) + return ret; + + ret = sprintf(buf, "02.%02u.%02u", w_priv->firmware_rev.major, + w_priv->firmware_rev.minor); - return sprintf(buf, "02.%02u.%02u", w_priv->firmware_rev.major, - w_priv->firmware_rev.minor); + mutex_unlock(&w_priv->sysfs_mutex); + + return ret; } static DEVICE_ATTR(firmware_version, S_IRUGO, ziirave_wdt_sysfs_show_firm, @@ -180,9 +504,18 @@ static ssize_t ziirave_wdt_sysfs_show_boot(struct device *dev, { struct i2c_client *client = to_i2c_client(dev->parent); struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client); + int ret; - return sprintf(buf, "01.%02u.%02u", w_priv->bootloader_rev.major, - w_priv->bootloader_rev.minor); + ret = mutex_lock_interruptible(&w_priv->sysfs_mutex); + if (ret) + return ret; + + ret = sprintf(buf, "01.%02u.%02u", w_priv->bootloader_rev.major, + w_priv->bootloader_rev.minor); + + mutex_unlock(&w_priv->sysfs_mutex); + + return ret; } static DEVICE_ATTR(bootloader_version, S_IRUGO, ziirave_wdt_sysfs_show_boot, @@ -194,17 +527,81 @@ static ssize_t ziirave_wdt_sysfs_show_reason(struct device *dev, { struct i2c_client *client = to_i2c_client(dev->parent); struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client); + int ret; + + ret = mutex_lock_interruptible(&w_priv->sysfs_mutex); + if (ret) + return ret; + + ret = sprintf(buf, "%s", ziirave_reasons[w_priv->reset_reason]); - return sprintf(buf, "%s", ziirave_reasons[w_priv->reset_reason]); + mutex_unlock(&w_priv->sysfs_mutex); + + return ret; } static DEVICE_ATTR(reset_reason, S_IRUGO, ziirave_wdt_sysfs_show_reason, NULL); +static ssize_t ziirave_wdt_sysfs_store_firm(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev->parent); + struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client); + const struct firmware *fw; + int err; + + err = request_ihex_firmware(&fw, ZIIRAVE_FW_NAME, dev); + if (err) { + dev_err(&client->dev, "Failed to request ihex firmware\n"); + return err; + } + + err = mutex_lock_interruptible(&w_priv->sysfs_mutex); + if (err) + goto release_firmware; + + err = ziirave_firm_upload(&w_priv->wdd, fw); + if (err) { + dev_err(&client->dev, "The firmware update failed: %d\n", err); + goto unlock_mutex; + } + + /* Update firmware version */ + err = ziirave_wdt_revision(client, &w_priv->firmware_rev, + ZIIRAVE_WDT_FIRM_VER_MAJOR); + if (err) { + dev_err(&client->dev, "Failed to read firmware version: %d\n", + err); + goto unlock_mutex; + } + + dev_info(&client->dev, "Firmware updated to version 02.%02u.%02u\n", + w_priv->firmware_rev.major, w_priv->firmware_rev.minor); + + /* Restore the watchdog timeout */ + err = ziirave_wdt_set_timeout(&w_priv->wdd, w_priv->wdd.timeout); + if (err) + dev_err(&client->dev, "Failed to set timeout: %d\n", err); + +unlock_mutex: + mutex_unlock(&w_priv->sysfs_mutex); + +release_firmware: + release_firmware(fw); + + return err ? err : count; +} + +static DEVICE_ATTR(update_firmware, S_IWUSR, NULL, + ziirave_wdt_sysfs_store_firm); + static struct attribute *ziirave_wdt_attrs[] = { &dev_attr_firmware_version.attr, &dev_attr_bootloader_version.attr, &dev_attr_reset_reason.attr, + &dev_attr_update_firmware.attr, NULL }; ATTRIBUTE_GROUPS(ziirave_wdt); @@ -252,6 +649,8 @@ static int ziirave_wdt_probe(struct i2c_client *client, if (!w_priv) return -ENOMEM; + mutex_init(&w_priv->sysfs_mutex); + w_priv->wdd.info = &ziirave_wdt_info; w_priv->wdd.ops = &ziirave_wdt_ops; w_priv->wdd.min_timeout = ZIIRAVE_TIMEOUT_MIN; |