diff options
Diffstat (limited to 'drivers/pci/host')
24 files changed, 2659 insertions, 461 deletions
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 9b485d873b0d..d7e7c0a827c3 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -274,4 +274,31 @@ config PCIE_ARTPEC6 Say Y here to enable PCIe controller support on Axis ARTPEC-6 SoCs. This PCIe controller uses the DesignWare core. +config PCIE_ROCKCHIP + bool "Rockchip PCIe controller" + depends on ARCH_ROCKCHIP + depends on OF + depends on PCI_MSI_IRQ_DOMAIN + select MFD_SYSCON + help + Say Y here if you want internal PCI support on Rockchip SoC. + There is 1 internal PCIe port available to support GEN2 with + 4 slots. + +config VMD + depends on PCI_MSI && X86_64 + tristate "Intel Volume Management Device Driver" + default N + ---help--- + Adds support for the Intel Volume Management Device (VMD). VMD is a + secondary PCI host bridge that allows PCI Express root ports, + and devices attached to them, to be removed from the default + PCI domain and placed within the VMD domain. This provides + more bus resources than are otherwise possible with a + single domain. If you know your system provides one of these and + has devices attached to it, say Y; if you are not sure, say N. + + To compile this driver as a module, choose M here: the + module will be called vmd. + endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 88434101e4c4..084cb4983645 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -31,3 +31,5 @@ obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o +obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o +obj-$(CONFIG_VMD) += vmd.o diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index ef9893fa3176..e4a5b7ee90cf 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c @@ -848,7 +848,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) int err, res_valid = 0; struct device *dev = &pcie->pdev->dev; struct device_node *np = dev->of_node; - struct resource_entry *win; + struct resource_entry *win, *tmp; resource_size_t iobase; INIT_LIST_HEAD(&pcie->resources); @@ -862,7 +862,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) if (err) goto out_release_res; - resource_list_for_each_entry(win, &pcie->resources) { + resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { struct resource *res = win->res; switch (resource_type(res)) { @@ -874,9 +874,11 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) lower_32_bits(res->start), OB_PCIE_IO); err = pci_remap_iospace(res, iobase); - if (err) + if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); + resource_list_destroy_entry(win); + } break; case IORESOURCE_MEM: advk_pcie_set_ob_win(pcie, 0, @@ -925,10 +927,8 @@ static int advk_pcie_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pcie->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pcie->base)) { - dev_err(&pdev->dev, "Failed to map registers\n"); + if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); - } irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler, diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c index 81b3949a26db..19223ed2e619 100644 --- a/drivers/pci/host/pci-dra7xx.c +++ b/drivers/pci/host/pci-dra7xx.c @@ -15,7 +15,7 @@ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of_gpio.h> #include <linux/pci.h> #include <linux/phy/phy.h> @@ -443,25 +443,6 @@ err_phy: return ret; } -static int __exit dra7xx_pcie_remove(struct platform_device *pdev) -{ - struct dra7xx_pcie *dra7xx = platform_get_drvdata(pdev); - struct pcie_port *pp = &dra7xx->pp; - struct device *dev = &pdev->dev; - int count = dra7xx->phy_count; - - if (pp->irq_domain) - irq_domain_remove(pp->irq_domain); - pm_runtime_put(dev); - pm_runtime_disable(dev); - while (count--) { - phy_power_off(dra7xx->phy[count]); - phy_exit(dra7xx->phy[count]); - } - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int dra7xx_pcie_suspend(struct device *dev) { @@ -545,19 +526,13 @@ static const struct of_device_id of_dra7xx_pcie_match[] = { { .compatible = "ti,dra7-pcie", }, {}, }; -MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match); static struct platform_driver dra7xx_pcie_driver = { - .remove = __exit_p(dra7xx_pcie_remove), .driver = { .name = "dra7-pcie", .of_match_table = of_dra7xx_pcie_match, + .suppress_bind_attrs = true, .pm = &dra7xx_pcie_pm_ops, }, }; - -module_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); - -MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); -MODULE_DESCRIPTION("TI PCIe controller driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c index 219976103efc..2e2d7f00b9e8 100644 --- a/drivers/pci/host/pci-exynos.c +++ b/drivers/pci/host/pci-exynos.c @@ -16,7 +16,7 @@ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of_gpio.h> #include <linux/pci.h> #include <linux/platform_device.h> @@ -425,12 +425,15 @@ static void exynos_pcie_enable_interrupts(struct pcie_port *pp) exynos_pcie_msi_init(pp); } -static inline void exynos_pcie_readl_rc(struct pcie_port *pp, - void __iomem *dbi_base, u32 *val) +static inline u32 exynos_pcie_readl_rc(struct pcie_port *pp, + void __iomem *dbi_base) { + u32 val; + exynos_pcie_sideband_dbi_r_mode(pp, true); - *val = readl(dbi_base); + val = readl(dbi_base); exynos_pcie_sideband_dbi_r_mode(pp, false); + return val; } static inline void exynos_pcie_writel_rc(struct pcie_port *pp, @@ -624,7 +627,6 @@ static const struct of_device_id exynos_pcie_of_match[] = { { .compatible = "samsung,exynos5440-pcie", }, {}, }; -MODULE_DEVICE_TABLE(of, exynos_pcie_of_match); static struct platform_driver exynos_pcie_driver = { .remove = __exit_p(exynos_pcie_remove), @@ -641,7 +643,3 @@ static int __init exynos_pcie_init(void) return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe); } subsys_initcall(exynos_pcie_init); - -MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); -MODULE_DESCRIPTION("Samsung PCIe host controller driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c index 9d9d34e959b6..e3c48b5deb93 100644 --- a/drivers/pci/host/pci-host-common.c +++ b/drivers/pci/host/pci-host-common.c @@ -1,4 +1,6 @@ /* + * Generic PCI host driver common code + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -17,7 +19,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/pci-ecam.h> @@ -29,7 +30,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev, int err, res_valid = 0; struct device_node *np = dev->of_node; resource_size_t iobase; - struct resource_entry *win; + struct resource_entry *win, *tmp; err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase); if (err) @@ -39,15 +40,17 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev, if (err) return err; - resource_list_for_each_entry(win, resources) { + resource_list_for_each_entry_safe(win, tmp, resources) { struct resource *res = win->res; switch (resource_type(res)) { case IORESOURCE_IO: err = pci_remap_iospace(res, iobase); - if (err) + if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); + resource_list_destroy_entry(win); + } break; case IORESOURCE_MEM: res_valid |= !(res->flags & IORESOURCE_PREFETCH); @@ -162,7 +165,3 @@ int pci_host_common_probe(struct platform_device *pdev, pci_bus_add_devices(bus); return 0; } - -MODULE_DESCRIPTION("Generic PCI host driver common code"); -MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 6955ffdb89f3..763ff8745828 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -200,11 +200,11 @@ struct tran_int_desc { */ struct pci_message { - u32 message_type; + u32 type; } __packed; struct pci_child_message { - u32 message_type; + struct pci_message message_type; union win_slot_encoding wslot; } __packed; @@ -222,7 +222,8 @@ struct pci_packet { void (*completion_func)(void *context, struct pci_response *resp, int resp_packet_size); void *compl_ctxt; - struct pci_message message; + + struct pci_message message[0]; }; /* @@ -258,7 +259,7 @@ struct pci_bus_d0_entry { struct pci_bus_relations { struct pci_incoming_message incoming; u32 device_count; - struct pci_function_description func[1]; + struct pci_function_description func[0]; } __packed; struct pci_q_res_req_response { @@ -314,7 +315,7 @@ struct pci_dev_incoming { } __packed; struct pci_eject_response { - u32 message_type; + struct pci_message message_type; union win_slot_encoding wslot; u32 status; } __packed; @@ -373,7 +374,6 @@ struct hv_pcibus_device { struct list_head children; struct list_head dr_list; - struct work_struct wrk; struct msi_domain_info msi_info; struct msi_controller msi_chip; @@ -393,7 +393,7 @@ struct hv_dr_work { struct hv_dr_state { struct list_head list_entry; u32 device_count; - struct pci_function_description func[1]; + struct pci_function_description func[0]; }; enum hv_pcichild_state { @@ -447,15 +447,16 @@ struct hv_pci_compl { * for any message for which the completion packet contains a * status and nothing else. */ -static -void -hv_pci_generic_compl(void *context, struct pci_response *resp, - int resp_packet_size) +static void hv_pci_generic_compl(void *context, struct pci_response *resp, + int resp_packet_size) { struct hv_pci_compl *comp_pkt = context; if (resp_packet_size >= offsetofend(struct pci_response, status)) comp_pkt->completion_status = resp->status; + else + comp_pkt->completion_status = -1; + complete(&comp_pkt->host_event); } @@ -694,13 +695,12 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev, struct pci_delete_interrupt *int_pkt; struct { struct pci_packet pkt; - u8 buffer[sizeof(struct pci_delete_interrupt) - - sizeof(struct pci_message)]; + u8 buffer[sizeof(struct pci_delete_interrupt)]; } ctxt; memset(&ctxt, 0, sizeof(ctxt)); int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; - int_pkt->message_type.message_type = + int_pkt->message_type.type = PCI_DELETE_INTERRUPT_MESSAGE; int_pkt->wslot.slot = hpdev->desc.win_slot.slot; int_pkt->int_desc = *int_desc; @@ -847,8 +847,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct cpumask *affinity; struct { struct pci_packet pkt; - u8 buffer[sizeof(struct pci_create_interrupt) - - sizeof(struct pci_message)]; + u8 buffer[sizeof(struct pci_create_interrupt)]; } ctxt; int cpu; int ret; @@ -876,7 +875,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ctxt.pkt.completion_func = hv_pci_compose_compl; ctxt.pkt.compl_ctxt = ∁ int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message; - int_pkt->message_type.message_type = PCI_CREATE_INTERRUPT_MESSAGE; + int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; int_pkt->wslot.slot = hpdev->desc.win_slot.slot; int_pkt->int_desc.vector = cfg->vector; int_pkt->int_desc.vector_count = 1; @@ -897,8 +896,10 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) sizeof(*int_pkt), (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (!ret) - wait_for_completion(&comp.comp_pkt.host_event); + if (ret) + goto free_int_desc; + + wait_for_completion(&comp.comp_pkt.host_event); if (comp.comp_pkt.completion_status < 0) { dev_err(&hbus->hdev->device, @@ -1289,7 +1290,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, pkt.init_packet.compl_ctxt = &comp_pkt; pkt.init_packet.completion_func = q_resource_requirements; res_req = (struct pci_child_message *)&pkt.init_packet.message; - res_req->message_type = PCI_QUERY_RESOURCE_REQUIREMENTS; + res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS; res_req->wslot.slot = desc->win_slot.slot; ret = vmbus_sendpacket(hbus->hdev->channel, res_req, @@ -1466,8 +1467,7 @@ static void pci_devices_present_work(struct work_struct *work) if (hpdev->reported_missing) { found = true; put_pcichild(hpdev, hv_pcidev_ref_childlist); - list_del(&hpdev->list_entry); - list_add_tail(&hpdev->list_entry, &removed); + list_move_tail(&hpdev->list_entry, &removed); break; } } @@ -1558,8 +1558,7 @@ static void hv_eject_device_work(struct work_struct *work) int wslot; struct { struct pci_packet pkt; - u8 buffer[sizeof(struct pci_eject_response) - - sizeof(struct pci_message)]; + u8 buffer[sizeof(struct pci_eject_response)]; } ctxt; hpdev = container_of(work, struct hv_pci_dev, wrk); @@ -1585,7 +1584,7 @@ static void hv_eject_device_work(struct work_struct *work) memset(&ctxt, 0, sizeof(ctxt)); ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; - ejct_pkt->message_type = PCI_EJECTION_COMPLETE; + ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt, sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, @@ -1688,7 +1687,7 @@ static void hv_pci_onchannelcallback(void *context) case VM_PKT_DATA_INBAND: new_message = (struct pci_incoming_message *)buffer; - switch (new_message->message_type.message_type) { + switch (new_message->message_type.type) { case PCI_BUS_RELATIONS: bus_rel = (struct pci_bus_relations *)buffer; @@ -1719,7 +1718,7 @@ static void hv_pci_onchannelcallback(void *context) default: dev_warn(&hbus->hdev->device, "Unimplemented protocol message %x\n", - new_message->message_type.message_type); + new_message->message_type.type); break; } break; @@ -1772,7 +1771,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev) pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; version_req = (struct pci_version_request *)&pkt->message; - version_req->message_type.message_type = PCI_QUERY_PROTOCOL_VERSION; + version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT; ret = vmbus_sendpacket(hdev->channel, version_req, @@ -1973,7 +1972,7 @@ static int hv_pci_enter_d0(struct hv_device *hdev) pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; d0_entry = (struct pci_bus_d0_entry *)&pkt->message; - d0_entry->message_type.message_type = PCI_BUS_D0ENTRY; + d0_entry->message_type.type = PCI_BUS_D0ENTRY; d0_entry->mmio_base = hbus->mem_config->start; ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry), @@ -2019,7 +2018,7 @@ static int hv_pci_query_relations(struct hv_device *hdev) return -ENOTEMPTY; memset(&message, 0, sizeof(message)); - message.message_type = PCI_QUERY_BUS_RELATIONS; + message.type = PCI_QUERY_BUS_RELATIONS; ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message), 0, VM_PKT_DATA_INBAND, 0); @@ -2072,8 +2071,8 @@ static int hv_send_resources_allocated(struct hv_device *hdev) init_completion(&comp_pkt.host_event); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; - pkt->message.message_type = PCI_RESOURCES_ASSIGNED; res_assigned = (struct pci_resources_assigned *)&pkt->message; + res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED; res_assigned->wslot.slot = hpdev->desc.win_slot.slot; put_pcichild(hpdev, hv_pcidev_ref_by_slot); @@ -2123,7 +2122,7 @@ static int hv_send_resources_released(struct hv_device *hdev) continue; memset(&pkt, 0, sizeof(pkt)); - pkt.message_type = PCI_RESOURCES_RELEASED; + pkt.message_type.type = PCI_RESOURCES_RELEASED; pkt.wslot.slot = hpdev->desc.win_slot.slot; put_pcichild(hpdev, hv_pcidev_ref_by_slot); @@ -2290,7 +2289,7 @@ static int hv_pci_remove(struct hv_device *hdev) init_completion(&comp_pkt.host_event); pkt.teardown_packet.completion_func = hv_pci_generic_compl; pkt.teardown_packet.compl_ctxt = &comp_pkt; - pkt.teardown_packet.message.message_type = PCI_BUS_D0EXIT; + pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT; ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message, sizeof(struct pci_message), diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index b741a36a67f3..ead4a5c3480b 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c @@ -739,7 +739,6 @@ static const struct of_device_id imx6_pcie_of_match[] = { { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, {}, }; -MODULE_DEVICE_TABLE(of, imx6_pcie_of_match); static struct platform_driver imx6_pcie_driver = { .driver = { @@ -749,14 +748,8 @@ static struct platform_driver imx6_pcie_driver = { .shutdown = imx6_pcie_shutdown, }; -/* Freescale PCIe driver does not allow module unload */ - static int __init imx6_pcie_init(void) { return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe); } -module_init(imx6_pcie_init); - -MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>"); -MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver"); -MODULE_LICENSE("GPL v2"); +device_initcall(imx6_pcie_init); diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index 8ba28834d470..82b461b5b08a 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c @@ -334,8 +334,9 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, if (ks_pcie->error_irq <= 0) dev_info(&pdev->dev, "no error IRQ defined\n"); else { - if (request_irq(ks_pcie->error_irq, pcie_err_irq_handler, - IRQF_SHARED, "pcie-error-irq", ks_pcie) < 0) { + ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler, + IRQF_SHARED, "pcie-error-irq", ks_pcie); + if (ret < 0) { dev_err(&pdev->dev, "failed to request error IRQ %d\n", ks_pcie->error_irq); return ret; diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 6de0757b11e4..e2a8e4cab22e 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -240,7 +240,7 @@ struct tegra_msi { }; /* used to differentiate between Tegra SoC generations */ -struct tegra_pcie_soc_data { +struct tegra_pcie_soc { unsigned int num_ports; unsigned int msi_base_shift; u32 pads_pll_ctl; @@ -300,7 +300,7 @@ struct tegra_pcie { struct regulator_bulk_data *supplies; unsigned int num_supplies; - const struct tegra_pcie_soc_data *soc_data; + const struct tegra_pcie_soc *soc; struct dentry *debugfs; }; @@ -542,8 +542,8 @@ static void tegra_pcie_port_reset(struct tegra_pcie_port *port) static void tegra_pcie_port_enable(struct tegra_pcie_port *port) { - const struct tegra_pcie_soc_data *soc = port->pcie->soc_data; unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); + const struct tegra_pcie_soc *soc = port->pcie->soc; unsigned long value; /* enable reference clock */ @@ -562,8 +562,8 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port) static void tegra_pcie_port_disable(struct tegra_pcie_port *port) { - const struct tegra_pcie_soc_data *soc = port->pcie->soc_data; unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); + const struct tegra_pcie_soc *soc = port->pcie->soc; unsigned long value; /* assert port reset */ @@ -621,7 +621,11 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) if (err < 0) return err; - pci_add_resource_offset(&sys->resources, &pcie->pio, sys->io_offset); + err = pci_remap_iospace(&pcie->pio, pcie->io.start); + if (!err) + pci_add_resource_offset(&sys->resources, &pcie->pio, + sys->io_offset); + pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); pci_add_resource_offset(&sys->resources, &pcie->prefetch, sys->mem_offset); @@ -631,7 +635,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) if (err < 0) return err; - pci_remap_iospace(&pcie->pio, pcie->io.start); return 1; } @@ -774,7 +777,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout) { - const struct tegra_pcie_soc_data *soc = pcie->soc_data; + const struct tegra_pcie_soc *soc = pcie->soc; u32 value; timeout = jiffies + msecs_to_jiffies(timeout); @@ -790,7 +793,7 @@ static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout) static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) { - const struct tegra_pcie_soc_data *soc = pcie->soc_data; + const struct tegra_pcie_soc *soc = pcie->soc; u32 value; int err; @@ -845,7 +848,7 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) static int tegra_pcie_phy_disable(struct tegra_pcie *pcie) { - const struct tegra_pcie_soc_data *soc = pcie->soc_data; + const struct tegra_pcie_soc *soc = pcie->soc; u32 value; /* disable TX/RX data */ @@ -906,7 +909,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port) static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) { - const struct tegra_pcie_soc_data *soc = pcie->soc_data; + const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_pcie_port *port; int err; @@ -974,7 +977,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) { - const struct tegra_pcie_soc_data *soc = pcie->soc_data; + const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_pcie_port *port; unsigned long value; int err; @@ -1067,7 +1070,7 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie) static int tegra_pcie_power_on(struct tegra_pcie *pcie) { - const struct tegra_pcie_soc_data *soc = pcie->soc_data; + const struct tegra_pcie_soc *soc = pcie->soc; int err; reset_control_assert(pcie->pcie_xrst); @@ -1117,7 +1120,7 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie) static int tegra_pcie_clocks_get(struct tegra_pcie *pcie) { - const struct tegra_pcie_soc_data *soc = pcie->soc_data; + const struct tegra_pcie_soc *soc = pcie->soc; pcie->pex_clk = devm_clk_get(pcie->dev, "pex"); if (IS_ERR(pcie->pex_clk)) @@ -1234,7 +1237,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port) static int tegra_pcie_phys_get(struct tegra_pcie *pcie) { - const struct tegra_pcie_soc_data *soc = pcie->soc_data; + const struct tegra_pcie_soc *soc = pcie->soc; struct device_node *np = pcie->dev->of_node; struct tegra_pcie_port *port; int err; @@ -1486,7 +1489,7 @@ static const struct irq_domain_ops msi_domain_ops = { static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) { struct platform_device *pdev = to_platform_device(pcie->dev); - const struct tegra_pcie_soc_data *soc = pcie->soc_data; + const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_msi *msi = &pcie->msi; unsigned long base; int err; @@ -1799,8 +1802,8 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) { - const struct tegra_pcie_soc_data *soc = pcie->soc_data; struct device_node *np = pcie->dev->of_node, *port; + const struct tegra_pcie_soc *soc = pcie->soc; struct of_pci_range_parser parser; struct of_pci_range range; u32 lanes = 0, mask = 0; @@ -2043,7 +2046,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie) return 0; } -static const struct tegra_pcie_soc_data tegra20_pcie_data = { +static const struct tegra_pcie_soc tegra20_pcie = { .num_ports = 2, .msi_base_shift = 0, .pads_pll_ctl = PADS_PLL_CTL_TEGRA20, @@ -2056,7 +2059,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = { .has_gen2 = false, }; -static const struct tegra_pcie_soc_data tegra30_pcie_data = { +static const struct tegra_pcie_soc tegra30_pcie = { .num_ports = 3, .msi_base_shift = 8, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, @@ -2070,7 +2073,7 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = { .has_gen2 = false, }; -static const struct tegra_pcie_soc_data tegra124_pcie_data = { +static const struct tegra_pcie_soc tegra124_pcie = { .num_ports = 2, .msi_base_shift = 8, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, @@ -2084,9 +2087,9 @@ static const struct tegra_pcie_soc_data tegra124_pcie_data = { }; static const struct of_device_id tegra_pcie_of_match[] = { - { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data }, - { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data }, - { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data }, + { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie }, + { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie }, + { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie }, { }, }; @@ -2201,21 +2204,16 @@ remove: static int tegra_pcie_probe(struct platform_device *pdev) { - const struct of_device_id *match; struct tegra_pcie *pcie; int err; - match = of_match_device(tegra_pcie_of_match, &pdev->dev); - if (!match) - return -ENODEV; - pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; + pcie->soc = of_device_get_match_data(&pdev->dev); INIT_LIST_HEAD(&pcie->buses); INIT_LIST_HEAD(&pcie->ports); - pcie->soc_data = match->data; pcie->dev = &pdev->dev; err = tegra_pcie_parse_dt(pcie); diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c index f234405770ab..b7dc07002f13 100644 --- a/drivers/pci/host/pci-versatile.c +++ b/drivers/pci/host/pci-versatile.c @@ -74,7 +74,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, int err, mem = 1, res_valid = 0; struct device_node *np = dev->of_node; resource_size_t iobase; - struct resource_entry *win; + struct resource_entry *win, *tmp; err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase); if (err) @@ -84,15 +84,17 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, if (err) goto out_release_res; - resource_list_for_each_entry(win, res) { + resource_list_for_each_entry_safe(win, tmp, res) { struct resource *res = win->res; switch (resource_type(res)) { case IORESOURCE_IO: err = pci_remap_iospace(res, iobase); - if (err) + if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); + resource_list_destroy_entry(win); + } break; case IORESOURCE_MEM: res_valid |= !(res->flags & IORESOURCE_PREFETCH); diff --git a/drivers/pci/host/pcie-altera-msi.c b/drivers/pci/host/pcie-altera-msi.c index 99177f4ccde2..4e5d628e8cd4 100644 --- a/drivers/pci/host/pcie-altera-msi.c +++ b/drivers/pci/host/pcie-altera-msi.c @@ -1,4 +1,8 @@ /* + * Altera PCIe MSI support + * + * Author: Ley Foon Tan <lftan@altera.com> + * * Copyright Altera Corporation (C) 2013-2015. All rights reserved * * This program is free software; you can redistribute it and/or modify it @@ -16,7 +20,7 @@ #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_irq.h> @@ -237,11 +241,6 @@ static int altera_msi_probe(struct platform_device *pdev) msi->pdev = pdev; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); - if (!res) { - dev_err(&pdev->dev, "no csr memory resource defined\n"); - return -ENODEV; - } - msi->csr_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(msi->csr_base)) { dev_err(&pdev->dev, "failed to map csr memory\n"); @@ -250,11 +249,6 @@ static int altera_msi_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vector_slave"); - if (!res) { - dev_err(&pdev->dev, "no vector_slave memory resource defined\n"); - return -ENODEV; - } - msi->vector_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(msi->vector_base)) { dev_err(&pdev->dev, "failed to map vector_slave memory\n"); @@ -308,7 +302,3 @@ static int __init altera_msi_init(void) return platform_driver_register(&altera_msi_driver); } subsys_initcall(altera_msi_init); - -MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); -MODULE_DESCRIPTION("Altera PCIe MSI support"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c index 2b7837650db8..c24e96559cbb 100644 --- a/drivers/pci/host/pcie-altera.c +++ b/drivers/pci/host/pcie-altera.c @@ -1,6 +1,9 @@ /* * Copyright Altera Corporation (C) 2013-2015. All rights reserved * + * Author: Ley Foon Tan <lftan@altera.com> + * Description: Altera PCIe host controller driver + * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. @@ -17,7 +20,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_pci.h> @@ -43,6 +46,7 @@ #define RP_LTSSM_MASK 0x1f #define LTSSM_L0 0xf +#define PCIE_CAP_OFFSET 0x80 /* TLP configuration type 0 and 1 */ #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ #define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ @@ -61,7 +65,8 @@ #define TLP_LOOP 500 #define RP_DEVFN 0 -#define LINK_UP_TIMEOUT 5000 +#define LINK_UP_TIMEOUT HZ +#define LINK_RETRAIN_TIMEOUT HZ #define INTX_NUM 4 @@ -99,38 +104,6 @@ static bool altera_pcie_link_is_up(struct altera_pcie *pcie) return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); } -static void altera_pcie_retrain(struct pci_dev *dev) -{ - u16 linkcap, linkstat; - struct altera_pcie *pcie = dev->bus->sysdata; - int timeout = 0; - - if (!altera_pcie_link_is_up(pcie)) - return; - - /* - * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but - * current speed is 2.5 GB/s. - */ - pcie_capability_read_word(dev, PCI_EXP_LNKCAP, &linkcap); - - if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) - return; - - pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat); - if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) { - pcie_capability_set_word(dev, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_RL); - while (!altera_pcie_link_is_up(pcie)) { - timeout++; - if (timeout > LINK_UP_TIMEOUT) - break; - udelay(5); - } - } -} -DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain); - /* * Altera PCIe port uses BAR0 of RC's configuration space as the translation * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space @@ -171,13 +144,6 @@ static bool altera_pcie_valid_config(struct altera_pcie *pcie, if (bus->number == pcie->root_bus_nr && dev > 0) return false; - /* - * Do not read more than one device on the bus directly attached - * to root port, root port can only attach to one downstream port. - */ - if (bus->primary == pcie->root_bus_nr && dev > 0) - return false; - return true; } @@ -301,22 +267,14 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, return PCIBIOS_SUCCESSFUL; } -static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *value) +static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int where, int size, + u32 *value) { - struct altera_pcie *pcie = bus->sysdata; int ret; u32 data; u8 byte_en; - if (altera_pcie_hide_rc_bar(bus, devfn, where)) - return PCIBIOS_BAD_REGISTER_NUMBER; - - if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) { - *value = 0xffffffff; - return PCIBIOS_DEVICE_NOT_FOUND; - } - switch (size) { case 1: byte_en = 1 << (where & 3); @@ -329,7 +287,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, break; } - ret = tlp_cfg_dword_read(pcie, bus->number, devfn, + ret = tlp_cfg_dword_read(pcie, busno, devfn, (where & ~DWORD_MASK), byte_en, &data); if (ret != PCIBIOS_SUCCESSFUL) return ret; @@ -349,20 +307,14 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_SUCCESSFUL; } -static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 value) +static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int where, int size, + u32 value) { - struct altera_pcie *pcie = bus->sysdata; u32 data32; u32 shift = 8 * (where & 3); u8 byte_en; - if (altera_pcie_hide_rc_bar(bus, devfn, where)) - return PCIBIOS_BAD_REGISTER_NUMBER; - - if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) - return PCIBIOS_DEVICE_NOT_FOUND; - switch (size) { case 1: data32 = (value & 0xff) << shift; @@ -378,8 +330,40 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, break; } - return tlp_cfg_dword_write(pcie, bus->number, devfn, - (where & ~DWORD_MASK), byte_en, data32); + return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK), + byte_en, data32); +} + +static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + struct altera_pcie *pcie = bus->sysdata; + + if (altera_pcie_hide_rc_bar(bus, devfn, where)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) { + *value = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size, + value); +} + +static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + struct altera_pcie *pcie = bus->sysdata; + + if (altera_pcie_hide_rc_bar(bus, devfn, where)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) + return PCIBIOS_DEVICE_NOT_FOUND; + + return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size, + value); } static struct pci_ops altera_pcie_ops = { @@ -387,6 +371,90 @@ static struct pci_ops altera_pcie_ops = { .write = altera_pcie_cfg_write, }; +static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int offset, u16 *value) +{ + u32 data; + int ret; + + ret = _altera_pcie_cfg_read(pcie, busno, devfn, + PCIE_CAP_OFFSET + offset, sizeof(*value), + &data); + *value = data; + return ret; +} + +static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int offset, u16 value) +{ + return _altera_pcie_cfg_write(pcie, busno, devfn, + PCIE_CAP_OFFSET + offset, sizeof(value), + value); +} + +static void altera_wait_link_retrain(struct altera_pcie *pcie) +{ + u16 reg16; + unsigned long start_jiffies; + + /* Wait for link training end. */ + start_jiffies = jiffies; + for (;;) { + altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, + PCI_EXP_LNKSTA, ®16); + if (!(reg16 & PCI_EXP_LNKSTA_LT)) + break; + + if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) { + dev_err(&pcie->pdev->dev, "link retrain timeout\n"); + break; + } + udelay(100); + } + + /* Wait for link is up */ + start_jiffies = jiffies; + for (;;) { + if (altera_pcie_link_is_up(pcie)) + break; + + if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) { + dev_err(&pcie->pdev->dev, "link up timeout\n"); + break; + } + udelay(100); + } +} + +static void altera_pcie_retrain(struct altera_pcie *pcie) +{ + u16 linkcap, linkstat, linkctl; + + if (!altera_pcie_link_is_up(pcie)) + return; + + /* + * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but + * current speed is 2.5 GB/s. + */ + altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP, + &linkcap); + if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) + return; + + altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA, + &linkstat); + if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) { + altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, + PCI_EXP_LNKCTL, &linkctl); + linkctl |= PCI_EXP_LNKCTL_RL; + altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, + PCI_EXP_LNKCTL, linkctl); + + altera_wait_link_retrain(pcie); + } +} + static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { @@ -508,6 +576,11 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie) return 0; } +static void altera_pcie_host_init(struct altera_pcie *pcie) +{ + altera_pcie_retrain(pcie); +} + static int altera_pcie_probe(struct platform_device *pdev) { struct altera_pcie *pcie; @@ -545,6 +618,7 @@ static int altera_pcie_probe(struct platform_device *pdev) cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); /* enable all interrupts */ cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); + altera_pcie_host_init(pcie); bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops, pcie, &pcie->resources); @@ -568,7 +642,6 @@ static const struct of_device_id altera_pcie_of_match[] = { { .compatible = "altr,pcie-root-port-1.0", }, {}, }; -MODULE_DEVICE_TABLE(of, altera_pcie_of_match); static struct platform_driver altera_pcie_driver = { .probe = altera_pcie_probe, @@ -583,8 +656,4 @@ static int altera_pcie_init(void) { return platform_driver_register(&altera_pcie_driver); } -module_init(altera_pcie_init); - -MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); -MODULE_DESCRIPTION("Altera PCIe host controller driver"); -MODULE_LICENSE("GPL v2"); +device_initcall(altera_pcie_init); diff --git a/drivers/pci/host/pcie-artpec6.c b/drivers/pci/host/pcie-artpec6.c index 16ba70b7ec65..39bf1a6df463 100644 --- a/drivers/pci/host/pcie-artpec6.c +++ b/drivers/pci/host/pcie-artpec6.c @@ -191,8 +191,8 @@ static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg) return dw_handle_msi_irq(pp); } -static int __init artpec6_add_pcie_port(struct pcie_port *pp, - struct platform_device *pdev) +static int artpec6_add_pcie_port(struct pcie_port *pp, + struct platform_device *pdev) { int ret; diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c index c8079dc81c10..17da005497a5 100644 --- a/drivers/pci/host/pcie-designware-plat.c +++ b/drivers/pci/host/pcie-designware-plat.c @@ -100,9 +100,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev) pp->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - dw_plat_pcie->mem_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dw_plat_pcie->mem_base)) return PTR_ERR(dw_plat_pcie->mem_base); diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 12afce19890b..74da71ea544a 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -14,7 +14,6 @@ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> @@ -26,7 +25,17 @@ #include "pcie-designware.h" -/* Synopsis specific PCIE configuration registers */ +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + +/* Parameters for the waiting for iATU enabled routine */ +#define LINK_WAIT_MAX_IATU_RETRIES 5 +#define LINK_WAIT_IATU_MIN 9000 +#define LINK_WAIT_IATU_MAX 10000 + +/* Synopsys-specific PCIe configuration registers */ #define PCIE_PORT_LINK_CONTROL 0x710 #define PORT_LINK_MODE_MASK (0x3f << 16) #define PORT_LINK_MODE_1_LANES (0x1 << 16) @@ -51,6 +60,7 @@ #define PCIE_ATU_VIEWPORT 0x900 #define PCIE_ATU_REGION_INBOUND (0x1 << 31) #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) +#define PCIE_ATU_REGION_INDEX2 (0x2 << 0) #define PCIE_ATU_REGION_INDEX1 (0x1 << 0) #define PCIE_ATU_REGION_INDEX0 (0x0 << 0) #define PCIE_ATU_CR1 0x904 @@ -70,10 +80,26 @@ #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) #define PCIE_ATU_UPPER_TARGET 0x91C +/* + * iATU Unroll-specific register definitions + * From 4.80 core version the address translation will be made by unroll + */ +#define PCIE_ATU_UNR_REGION_CTRL1 0x00 +#define PCIE_ATU_UNR_REGION_CTRL2 0x04 +#define PCIE_ATU_UNR_LOWER_BASE 0x08 +#define PCIE_ATU_UNR_UPPER_BASE 0x0C +#define PCIE_ATU_UNR_LIMIT 0x10 +#define PCIE_ATU_UNR_LOWER_TARGET 0x14 +#define PCIE_ATU_UNR_UPPER_TARGET 0x18 + +/* Register address builder */ +#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) ((0x3 << 20) | (region << 9)) + /* PCIe Port Logic registers */ #define PLR_OFFSET 0x700 #define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c) -#define PCIE_PHY_DEBUG_R1_LINK_UP 0x00000010 +#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4) +#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29) static struct pci_ops dw_pcie_ops; @@ -115,12 +141,12 @@ int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val) return PCIBIOS_SUCCESSFUL; } -static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val) +static inline u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg) { if (pp->ops->readl_rc) - pp->ops->readl_rc(pp, pp->dbi_base + reg, val); - else - *val = readl(pp->dbi_base + reg); + return pp->ops->readl_rc(pp, pp->dbi_base + reg); + + return readl(pp->dbi_base + reg); } static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg) @@ -131,6 +157,27 @@ static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg) writel(val, pp->dbi_base + reg); } +static inline u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg) +{ + u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); + + if (pp->ops->readl_rc) + return pp->ops->readl_rc(pp, pp->dbi_base + offset + reg); + + return readl(pp->dbi_base + offset + reg); +} + +static inline void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index, + u32 val, u32 reg) +{ + u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); + + if (pp->ops->writel_rc) + pp->ops->writel_rc(pp, val, pp->dbi_base + offset + reg); + else + writel(val, pp->dbi_base + offset + reg); +} + static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val) { @@ -152,24 +199,57 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, int type, u64 cpu_addr, u64 pci_addr, u32 size) { - u32 val; - - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1), - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); + u32 retries, val; + + if (pp->iatu_unroll_enabled) { + dw_pcie_writel_unroll(pp, index, + lower_32_bits(cpu_addr), PCIE_ATU_UNR_LOWER_BASE); + dw_pcie_writel_unroll(pp, index, + upper_32_bits(cpu_addr), PCIE_ATU_UNR_UPPER_BASE); + dw_pcie_writel_unroll(pp, index, + lower_32_bits(cpu_addr + size - 1), PCIE_ATU_UNR_LIMIT); + dw_pcie_writel_unroll(pp, index, + lower_32_bits(pci_addr), PCIE_ATU_UNR_LOWER_TARGET); + dw_pcie_writel_unroll(pp, index, + upper_32_bits(pci_addr), PCIE_ATU_UNR_UPPER_TARGET); + dw_pcie_writel_unroll(pp, index, + type, PCIE_ATU_UNR_REGION_CTRL1); + dw_pcie_writel_unroll(pp, index, + PCIE_ATU_ENABLE, PCIE_ATU_UNR_REGION_CTRL2); + } else { + dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, + PCIE_ATU_VIEWPORT); + dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), + PCIE_ATU_LOWER_BASE); + dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), + PCIE_ATU_UPPER_BASE); + dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1), + PCIE_ATU_LIMIT); + dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), + PCIE_ATU_LOWER_TARGET); + dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), + PCIE_ATU_UPPER_TARGET); + dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); + } /* * Make sure ATU enable takes effect before any subsequent config * and I/O accesses. */ - dw_pcie_readl_rc(pp, PCIE_ATU_CR2, &val); + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + if (pp->iatu_unroll_enabled) + val = dw_pcie_readl_unroll(pp, index, + PCIE_ATU_UNR_REGION_CTRL2); + else + val = dw_pcie_readl_rc(pp, PCIE_ATU_CR2); + + if (val == PCIE_ATU_ENABLE) + return; + + usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + } + dev_err(pp->dev, "iATU is not being enabled\n"); } static struct irq_chip dw_msi_irq_chip = { @@ -412,7 +492,8 @@ int dw_pcie_link_up(struct pcie_port *pp) return pp->ops->link_up(pp); val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1); - return val & PCIE_PHY_DEBUG_R1_LINK_UP; + return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) && + (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING))); } static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, @@ -428,6 +509,17 @@ static const struct irq_domain_ops msi_domain_ops = { .map = dw_pcie_msi_map, }; +static u8 dw_pcie_iatu_unroll_enabled(struct pcie_port *pp) +{ + u32 val; + + val = dw_pcie_readl_rc(pp, PCIE_ATU_VIEWPORT); + if (val == 0xffffffff) + return 1; + + return 0; +} + int dw_pcie_host_init(struct pcie_port *pp) { struct device_node *np = pp->dev->of_node; @@ -436,7 +528,7 @@ int dw_pcie_host_init(struct pcie_port *pp) struct resource *cfg_res; int i, ret; LIST_HEAD(res); - struct resource_entry *win; + struct resource_entry *win, *tmp; cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); if (cfg_res) { @@ -457,17 +549,20 @@ int dw_pcie_host_init(struct pcie_port *pp) goto error; /* Get the I/O and memory ranges from DT */ - resource_list_for_each_entry(win, &res) { + resource_list_for_each_entry_safe(win, tmp, &res) { switch (resource_type(win->res)) { case IORESOURCE_IO: - pp->io = win->res; - pp->io->name = "I/O"; - pp->io_size = resource_size(pp->io); - pp->io_bus_addr = pp->io->start - win->offset; - ret = pci_remap_iospace(pp->io, pp->io_base); - if (ret) + ret = pci_remap_iospace(win->res, pp->io_base); + if (ret) { dev_warn(pp->dev, "error %d: failed to map resource %pR\n", - ret, pp->io); + ret, win->res); + resource_list_destroy_entry(win); + } else { + pp->io = win->res; + pp->io->name = "I/O"; + pp->io_size = resource_size(pp->io); + pp->io_bus_addr = pp->io->start - win->offset; + } break; case IORESOURCE_MEM: pp->mem = win->res; @@ -524,6 +619,10 @@ int dw_pcie_host_init(struct pcie_port *pp) if (ret) pp->lanes = 0; + ret = of_property_read_u32(np, "num-viewport", &pp->num_viewport); + if (ret) + pp->num_viewport = 2; + if (IS_ENABLED(CONFIG_PCI_MSI)) { if (!pp->ops->msi_host_init) { pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, @@ -544,6 +643,8 @@ int dw_pcie_host_init(struct pcie_port *pp) } } + pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); + if (pp->ops->host_init) pp->ops->host_init(pp); @@ -609,13 +710,14 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, va_cfg_base = pp->va_cfg1_base; } - dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, type, cpu_addr, busdev, cfg_size); ret = dw_pcie_cfg_read(va_cfg_base + where, size, val); - dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, - PCIE_ATU_TYPE_IO, pp->io_base, - pp->io_bus_addr, pp->io_size); + if (pp->num_viewport <= 2) + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_IO, pp->io_base, + pp->io_bus_addr, pp->io_size); return ret; } @@ -646,13 +748,14 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, va_cfg_base = pp->va_cfg1_base; } - dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, type, cpu_addr, busdev, cfg_size); ret = dw_pcie_cfg_write(va_cfg_base + where, size, val); - dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, - PCIE_ATU_TYPE_IO, pp->io_base, - pp->io_bus_addr, pp->io_size); + if (pp->num_viewport <= 2) + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_IO, pp->io_base, + pp->io_bus_addr, pp->io_size); return ret; } @@ -670,13 +773,6 @@ static int dw_pcie_valid_config(struct pcie_port *pp, if (bus->number == pp->root_bus_nr && dev > 0) return 0; - /* - * do not read more than one device on the bus directly attached - * to RC's (Virtual Bridge's) DS side. - */ - if (bus->primary == pp->root_bus_nr && dev > 0) - return 0; - return 1; } @@ -720,7 +816,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) u32 val; /* set the number of lanes */ - dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val); + val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); val &= ~PORT_LINK_MODE_MASK; switch (pp->lanes) { case 1: @@ -742,7 +838,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL); /* set link width speed control register */ - dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val); + val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL); val &= ~PORT_LOGIC_LINK_WIDTH_MASK; switch (pp->lanes) { case 1: @@ -765,19 +861,19 @@ void dw_pcie_setup_rc(struct pcie_port *pp) dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1); /* setup interrupt pins */ - dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val); + val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE); val &= 0xffff00ff; val |= 0x00000100; dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE); /* setup bus numbers */ - dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val); + val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS); val &= 0xff000000; val |= 0x00010100; dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS); /* setup command register */ - dw_pcie_readl_rc(pp, PCI_COMMAND, &val); + val = dw_pcie_readl_rc(pp, PCI_COMMAND); val &= 0xffff0000; val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_SERR; @@ -788,10 +884,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp) * uses its own address translation component rather than ATU, so * we should not program the ATU here. */ - if (!pp->ops->rd_other_conf) - dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, + if (!pp->ops->rd_other_conf) { + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, PCIE_ATU_TYPE_MEM, pp->mem_base, pp->mem_bus_addr, pp->mem_size); + if (pp->num_viewport > 2) + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX2, + PCIE_ATU_TYPE_IO, pp->io_base, + pp->io_bus_addr, pp->io_size); + } dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); @@ -802,7 +903,3 @@ void dw_pcie_setup_rc(struct pcie_port *pp) val |= PORT_LOGIC_SPEED_CHANGE; dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); } - -MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); -MODULE_DESCRIPTION("Designware PCIe host controller driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h index f437f9b5be04..c8e5bc647f49 100644 --- a/drivers/pci/host/pcie-designware.h +++ b/drivers/pci/host/pcie-designware.h @@ -22,11 +22,6 @@ #define MAX_MSI_IRQS 32 #define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32) -/* Parameters for the waiting for link up routine */ -#define LINK_WAIT_MAX_RETRIES 10 -#define LINK_WAIT_USLEEP_MIN 90000 -#define LINK_WAIT_USLEEP_MAX 100000 - struct pcie_port { struct device *dev; u8 root_bus_nr; @@ -49,16 +44,17 @@ struct pcie_port { struct resource *busn; int irq; u32 lanes; + u32 num_viewport; struct pcie_host_ops *ops; int msi_irq; struct irq_domain *irq_domain; unsigned long msi_data; + u8 iatu_unroll_enabled; DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); }; struct pcie_host_ops { - void (*readl_rc)(struct pcie_port *pp, - void __iomem *dbi_base, u32 *val); + u32 (*readl_rc)(struct pcie_port *pp, void __iomem *dbi_base); void (*writel_rc)(struct pcie_port *pp, u32 val, void __iomem *dbi_base); int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c index f2f90c50f75d..5ec2d440a6b7 100644 --- a/drivers/pci/host/pcie-qcom.c +++ b/drivers/pci/host/pcie-qcom.c @@ -1,7 +1,11 @@ /* + * Qualcomm PCIe root complex driver + * * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. * Copyright 2015 Linaro Limited. * + * Author: Stanimir Varbanov <svarbanov@mm-sol.com> + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. @@ -19,7 +23,7 @@ #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/pci.h> @@ -570,37 +574,19 @@ static int qcom_pcie_probe(struct platform_device *pdev) return 0; } -static int qcom_pcie_remove(struct platform_device *pdev) -{ - struct qcom_pcie *pcie = platform_get_drvdata(pdev); - - qcom_ep_reset_assert(pcie); - phy_power_off(pcie->phy); - phy_exit(pcie->phy); - pcie->ops->deinit(pcie); - - return 0; -} - static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 }, { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, { } }; -MODULE_DEVICE_TABLE(of, qcom_pcie_match); static struct platform_driver qcom_pcie_driver = { .probe = qcom_pcie_probe, - .remove = qcom_pcie_remove, .driver = { .name = "qcom-pcie", + .suppress_bind_attrs = true, .of_match_table = qcom_pcie_match, }, }; - -module_platform_driver(qcom_pcie_driver); - -MODULE_AUTHOR("Stanimir Varbanov <svarbanov@mm-sol.com>"); -MODULE_DESCRIPTION("Qualcomm PCIe root complex driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(qcom_pcie_driver); diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index 65db7a221509..e06b1d3b4dea 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c @@ -84,8 +84,18 @@ #define IDSETR1 0x011004 #define TLCTLR 0x011048 #define MACSR 0x011054 +#define SPCHGFIN (1 << 4) +#define SPCHGFAIL (1 << 6) +#define SPCHGSUC (1 << 7) +#define LINK_SPEED (0xf << 16) +#define LINK_SPEED_2_5GTS (1 << 16) +#define LINK_SPEED_5_0GTS (2 << 16) #define MACCTLR 0x011058 +#define SPEED_CHANGE (1 << 24) #define SCRAMBLE_DISABLE (1 << 27) +#define MACS2R 0x011078 +#define MACCGSPSETR 0x011084 +#define SPCNGRSN (1 << 31) /* R-Car H1 PHY */ #define H1_PCIEPHYADRR 0x04000c @@ -385,11 +395,67 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) return 1; } +static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) +{ + unsigned int timeout = 1000; + u32 macsr; + + if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS) + return; + + if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) { + dev_err(pcie->dev, "Speed change already in progress\n"); + return; + } + + macsr = rcar_pci_read_reg(pcie, MACSR); + if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS) + goto done; + + /* Set target link speed to 5.0 GT/s */ + rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS, + PCI_EXP_LNKSTA_CLS_5_0GB); + + /* Set speed change reason as intentional factor */ + rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0); + + /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */ + if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL)) + rcar_pci_write_reg(pcie, macsr, MACSR); + + /* Start link speed change */ + rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE); + + while (timeout--) { + macsr = rcar_pci_read_reg(pcie, MACSR); + if (macsr & SPCHGFIN) { + /* Clear the interrupt bits */ + rcar_pci_write_reg(pcie, macsr, MACSR); + + if (macsr & SPCHGFAIL) + dev_err(pcie->dev, "Speed change failed\n"); + + goto done; + } + + msleep(1); + }; + + dev_err(pcie->dev, "Speed change timed out\n"); + +done: + dev_info(pcie->dev, "Current link speed is %s GT/s\n", + (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5"); +} + static int rcar_pcie_enable(struct rcar_pcie *pcie) { struct pci_bus *bus, *child; LIST_HEAD(res); + /* Try setting 5 GT/s link speed */ + rcar_pcie_force_speedup(pcie); + rcar_pcie_setup(&res, pcie); pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); @@ -608,6 +674,18 @@ static int rcar_msi_alloc(struct rcar_msi *chip) return msi; } +static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs) +{ + int msi; + + mutex_lock(&chip->lock); + msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR, + order_base_2(no_irqs)); + mutex_unlock(&chip->lock); + + return msi; +} + static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq) { mutex_lock(&chip->lock); @@ -665,7 +743,7 @@ static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, if (hwirq < 0) return hwirq; - irq = irq_create_mapping(msi->domain, hwirq); + irq = irq_find_mapping(msi->domain, hwirq); if (!irq) { rcar_msi_free(msi, hwirq); return -EINVAL; @@ -682,6 +760,58 @@ static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, return 0; } +static int rcar_msi_setup_irqs(struct msi_controller *chip, + struct pci_dev *pdev, int nvec, int type) +{ + struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip); + struct rcar_msi *msi = to_rcar_msi(chip); + struct msi_desc *desc; + struct msi_msg msg; + unsigned int irq; + int hwirq; + int i; + + /* MSI-X interrupts are not supported */ + if (type == PCI_CAP_ID_MSIX) + return -EINVAL; + + WARN_ON(!list_is_singular(&pdev->dev.msi_list)); + desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list); + + hwirq = rcar_msi_alloc_region(msi, nvec); + if (hwirq < 0) + return -ENOSPC; + + irq = irq_find_mapping(msi->domain, hwirq); + if (!irq) + return -ENOSPC; + + for (i = 0; i < nvec; i++) { + /* + * irq_create_mapping() called from rcar_pcie_probe() pre- + * allocates descs, so there is no need to allocate descs here. + * We can therefore assume that if irq_find_mapping() above + * returns non-zero, then the descs are also successfully + * allocated. + */ + if (irq_set_msi_desc_off(irq, i, desc)) { + /* TODO: clear */ + return -EINVAL; + } + } + + desc->nvec_used = nvec; + desc->msi_attrib.multiple = order_base_2(nvec); + + msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; + msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); + msg.data = hwirq; + + pci_write_msi_msg(irq, &msg); + + return 0; +} + static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) { struct rcar_msi *msi = to_rcar_msi(chip); @@ -716,12 +846,13 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) struct platform_device *pdev = to_platform_device(pcie->dev); struct rcar_msi *msi = &pcie->msi; unsigned long base; - int err; + int err, i; mutex_init(&msi->lock); msi->chip.dev = pcie->dev; msi->chip.setup_irq = rcar_msi_setup_irq; + msi->chip.setup_irqs = rcar_msi_setup_irqs; msi->chip.teardown_irq = rcar_msi_teardown_irq; msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR, @@ -731,6 +862,9 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) return -ENOMEM; } + for (i = 0; i < INT_PCI_MSI_NR; i++) + irq_create_mapping(msi->domain, i); + /* Two irqs are for MSI, but they are also used for non-MSI irqs */ err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq, IRQF_SHARED | IRQF_NO_THREAD, @@ -775,6 +909,10 @@ static int rcar_pcie_get_resources(struct platform_device *pdev, if (err) return err; + pcie->base = devm_ioremap_resource(&pdev->dev, &res); + if (IS_ERR(pcie->base)) + return PTR_ERR(pcie->base); + pcie->clk = devm_clk_get(&pdev->dev, "pcie"); if (IS_ERR(pcie->clk)) { dev_err(pcie->dev, "cannot get platform clock\n"); @@ -782,7 +920,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev, } err = clk_prepare_enable(pcie->clk); if (err) - goto fail_clk; + return err; pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus"); if (IS_ERR(pcie->bus_clk)) { @@ -792,7 +930,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev, } err = clk_prepare_enable(pcie->bus_clk); if (err) - goto err_map_reg; + goto fail_clk; i = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!i) { @@ -810,12 +948,6 @@ static int rcar_pcie_get_resources(struct platform_device *pdev, } pcie->msi.irq2 = i; - pcie->base = devm_ioremap_resource(&pdev->dev, &res); - if (IS_ERR(pcie->base)) { - err = PTR_ERR(pcie->base); - goto err_map_reg; - } - return 0; err_map_reg: @@ -865,12 +997,16 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, * Set up 64-bit inbound regions as the range parser doesn't * distinguish between 32 and 64-bit types. */ - rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx)); + rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), + PCIEPRAR(idx)); rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx)); - rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx)); + rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, + PCIELAMR(idx)); - rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1)); - rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1)); + rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), + PCIEPRAR(idx + 1)); + rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), + PCIELAR(idx + 1)); rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1)); pci_addr += size; @@ -919,6 +1055,7 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie, /* Get the dma-ranges from DT */ for_each_of_pci_range(&parser, &range) { u64 end = range.cpu_addr + range.size - 1; + dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", range.flags, range.cpu_addr, end, range.pci_addr); @@ -932,9 +1069,12 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie, static const struct of_device_id rcar_pcie_of_match[] = { { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 }, - { .compatible = "renesas,pcie-rcar-gen2", .data = rcar_pcie_hw_init_gen2 }, - { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init_gen2 }, - { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init_gen2 }, + { .compatible = "renesas,pcie-rcar-gen2", + .data = rcar_pcie_hw_init_gen2 }, + { .compatible = "renesas,pcie-r8a7790", + .data = rcar_pcie_hw_init_gen2 }, + { .compatible = "renesas,pcie-r8a7791", + .data = rcar_pcie_hw_init_gen2 }, { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init }, {}, }; @@ -945,9 +1085,10 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci) struct device *dev = pci->dev; struct device_node *np = dev->of_node; resource_size_t iobase; - struct resource_entry *win; + struct resource_entry *win, *tmp; - err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, &iobase); + err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, + &iobase); if (err) return err; @@ -955,14 +1096,17 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci) if (err) goto out_release_res; - resource_list_for_each_entry(win, &pci->resources) { + resource_list_for_each_entry_safe(win, tmp, &pci->resources) { struct resource *res = win->res; if (resource_type(res) == IORESOURCE_IO) { err = pci_remap_iospace(res, iobase); - if (err) + if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); + + resource_list_destroy_entry(win); + } } } @@ -998,8 +1142,8 @@ static int rcar_pcie_probe(struct platform_device *pdev) return err; } - err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node); - if (err) + err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node); + if (err) return err; of_id = of_match_device(rcar_pcie_of_match, pcie->dev); diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c new file mode 100644 index 000000000000..b8c82fc812dc --- /dev/null +++ b/drivers/pci/host/pcie-rockchip.c @@ -0,0 +1,1229 @@ +/* + * Rockchip AXI PCIe host controller driver + * + * Copyright (c) 2016 Rockchip, Inc. + * + * Author: Shawn Lin <shawn.lin@rock-chips.com> + * Wenrui Li <wenrui.li@rock-chips.com> + * + * Bits taken from Synopsys Designware Host controller driver and + * ARM PCI Host generic driver. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_pci.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/regmap.h> + +/* + * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16 + * bits. This allows atomic updates of the register without locking. + */ +#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val)) +#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val) + +#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4) + +#define PCIE_CLIENT_BASE 0x0 +#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00) +#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001) +#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002) +#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008) +#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x)) +#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040) +#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080) +#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48) +#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000 +#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000 +#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c) +#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50) +#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5) +#define PCIE_CLIENT_INTR_SHIFT 5 +#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15) +#define PCIE_CLIENT_INT_MSG BIT(14) +#define PCIE_CLIENT_INT_HOT_RST BIT(13) +#define PCIE_CLIENT_INT_DPA BIT(12) +#define PCIE_CLIENT_INT_FATAL_ERR BIT(11) +#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10) +#define PCIE_CLIENT_INT_CORR_ERR BIT(9) +#define PCIE_CLIENT_INT_INTD BIT(8) +#define PCIE_CLIENT_INT_INTC BIT(7) +#define PCIE_CLIENT_INT_INTB BIT(6) +#define PCIE_CLIENT_INT_INTA BIT(5) +#define PCIE_CLIENT_INT_LOCAL BIT(4) +#define PCIE_CLIENT_INT_UDMA BIT(3) +#define PCIE_CLIENT_INT_PHY BIT(2) +#define PCIE_CLIENT_INT_HOT_PLUG BIT(1) +#define PCIE_CLIENT_INT_PWR_STCG BIT(0) + +#define PCIE_CLIENT_INT_LEGACY \ + (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \ + PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD) + +#define PCIE_CLIENT_INT_CLI \ + (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \ + PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \ + PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \ + PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \ + PCIE_CLIENT_INT_PHY) + +#define PCIE_CORE_CTRL_MGMT_BASE 0x900000 +#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000) +#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008 +#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018 +#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006 +#define PCIE_CORE_PL_CONF_LANE_SHIFT 1 +#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004) +#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8) +#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8 +#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff +#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020) +#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000 +#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16 +#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \ + (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT) +#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c) +#define PCIE_CORE_INT_PRFPE BIT(0) +#define PCIE_CORE_INT_CRFPE BIT(1) +#define PCIE_CORE_INT_RRPE BIT(2) +#define PCIE_CORE_INT_PRFO BIT(3) +#define PCIE_CORE_INT_CRFO BIT(4) +#define PCIE_CORE_INT_RT BIT(5) +#define PCIE_CORE_INT_RTR BIT(6) +#define PCIE_CORE_INT_PE BIT(7) +#define PCIE_CORE_INT_MTR BIT(8) +#define PCIE_CORE_INT_UCR BIT(9) +#define PCIE_CORE_INT_FCE BIT(10) +#define PCIE_CORE_INT_CT BIT(11) +#define PCIE_CORE_INT_UTC BIT(18) +#define PCIE_CORE_INT_MMVC BIT(19) +#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210) +#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300) + +#define PCIE_CORE_INT \ + (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \ + PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \ + PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \ + PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \ + PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \ + PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \ + PCIE_CORE_INT_MMVC) + +#define PCIE_RC_CONFIG_BASE 0xa00000 +#define PCIE_RC_CONFIG_VENDOR (PCIE_RC_CONFIG_BASE + 0x00) +#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) +#define PCIE_RC_CONFIG_SCC_SHIFT 16 +#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) +#define PCIE_RC_CONFIG_LCS_RETRAIN_LINK BIT(5) +#define PCIE_RC_CONFIG_LCS_LBMIE BIT(10) +#define PCIE_RC_CONFIG_LCS_LABIE BIT(11) +#define PCIE_RC_CONFIG_LCS_LBMS BIT(30) +#define PCIE_RC_CONFIG_LCS_LAMS BIT(31) +#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c) + +#define PCIE_CORE_AXI_CONF_BASE 0xc00000 +#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0) +#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f +#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00 +#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4) +#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8) +#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc) + +#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800 +#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0) +#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f +#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00 +#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4) + +/* Size of one AXI Region (not Region 0) */ +#define AXI_REGION_SIZE BIT(20) +/* Size of Region 0, equal to sum of sizes of other regions */ +#define AXI_REGION_0_SIZE (32 * (0x1 << 20)) +#define OB_REG_SIZE_SHIFT 5 +#define IB_ROOT_PORT_REG_SIZE_SHIFT 3 +#define AXI_WRAPPER_IO_WRITE 0x6 +#define AXI_WRAPPER_MEM_WRITE 0x2 + +#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3 +#define MIN_AXI_ADDR_BITS_PASSED 8 +#define ROCKCHIP_VENDOR_ID 0x1d87 +#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20) +#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15) +#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12) +#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0) +#define PCIE_ECAM_ADDR(bus, dev, func, reg) \ + (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \ + PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg)) + +#define RC_REGION_0_ADDR_TRANS_H 0x00000000 +#define RC_REGION_0_ADDR_TRANS_L 0x00000000 +#define RC_REGION_0_PASS_BITS (25 - 1) +#define MAX_AXI_WRAPPER_REGION_NUM 33 + +struct rockchip_pcie { + void __iomem *reg_base; /* DT axi-base */ + void __iomem *apb_base; /* DT apb-base */ + struct phy *phy; + struct reset_control *core_rst; + struct reset_control *mgmt_rst; + struct reset_control *mgmt_sticky_rst; + struct reset_control *pipe_rst; + struct clk *aclk_pcie; + struct clk *aclk_perf_pcie; + struct clk *hclk_pcie; + struct clk *clk_pcie_pm; + struct regulator *vpcie3v3; /* 3.3V power supply */ + struct regulator *vpcie1v8; /* 1.8V power supply */ + struct regulator *vpcie0v9; /* 0.9V power supply */ + struct gpio_desc *ep_gpio; + u32 lanes; + u8 root_bus_nr; + struct device *dev; + struct irq_domain *irq_domain; +}; + +static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg) +{ + return readl(rockchip->apb_base + reg); +} + +static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val, + u32 reg) +{ + writel(val, rockchip->apb_base + reg); +} + +static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) +{ + u32 status; + + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= (PCIE_RC_CONFIG_LCS_LBMIE | PCIE_RC_CONFIG_LCS_LABIE); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); +} + +static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) +{ + u32 status; + + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= (PCIE_RC_CONFIG_LCS_LBMS | PCIE_RC_CONFIG_LCS_LAMS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); +} + +static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) +{ + u32 val; + + /* Update Tx credit maximum update interval */ + val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1); + val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK; + val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */ + rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1); +} + +static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, + struct pci_bus *bus, int dev) +{ + /* access only one slot on each root port */ + if (bus->number == rockchip->root_bus_nr && dev > 0) + return 0; + + /* + * do not read more than one device on the bus directly attached + * to RC's downstream side. + */ + if (bus->primary == rockchip->root_bus_nr && dev > 0) + return 0; + + return 1; +} + +static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, + int where, int size, u32 *val) +{ + void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where; + + if (!IS_ALIGNED((uintptr_t)addr, size)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (size == 4) { + *val = readl(addr); + } else if (size == 2) { + *val = readw(addr); + } else if (size == 1) { + *val = readb(addr); + } else { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + return PCIBIOS_SUCCESSFUL; +} + +static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, + int where, int size, u32 val) +{ + u32 mask, tmp, offset; + + offset = where & ~0x3; + + if (size == 4) { + writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset); + return PCIBIOS_SUCCESSFUL; + } + + mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); + + /* + * N.B. This read/modify/write isn't safe in general because it can + * corrupt RW1C bits in adjacent registers. But the hardware + * doesn't support smaller writes. + */ + tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask; + tmp |= val << ((where & 0x3) * 8); + writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset); + + return PCIBIOS_SUCCESSFUL; +} + +static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, + struct pci_bus *bus, u32 devfn, + int where, int size, u32 *val) +{ + u32 busdev; + + busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); + + if (!IS_ALIGNED(busdev, size)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (size == 4) { + *val = readl(rockchip->reg_base + busdev); + } else if (size == 2) { + *val = readw(rockchip->reg_base + busdev); + } else if (size == 1) { + *val = readb(rockchip->reg_base + busdev); + } else { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + return PCIBIOS_SUCCESSFUL; +} + +static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, + struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + u32 busdev; + + busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); + if (!IS_ALIGNED(busdev, size)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (size == 4) + writel(val, rockchip->reg_base + busdev); + else if (size == 2) + writew(val, rockchip->reg_base + busdev); + else if (size == 1) + writeb(val, rockchip->reg_base + busdev); + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) +{ + struct rockchip_pcie *rockchip = bus->sysdata; + + if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (bus->number == rockchip->root_bus_nr) + return rockchip_pcie_rd_own_conf(rockchip, where, size, val); + + return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val); +} + +static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct rockchip_pcie *rockchip = bus->sysdata; + + if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (bus->number == rockchip->root_bus_nr) + return rockchip_pcie_wr_own_conf(rockchip, where, size, val); + + return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val); +} + +static struct pci_ops rockchip_pcie_ops = { + .read = rockchip_pcie_rd_conf, + .write = rockchip_pcie_wr_conf, +}; + +/** + * rockchip_pcie_init_port - Initialize hardware + * @rockchip: PCIe port information + */ +static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int err; + u32 status; + unsigned long timeout; + + gpiod_set_value(rockchip->ep_gpio, 0); + + err = phy_init(rockchip->phy); + if (err < 0) { + dev_err(dev, "fail to init phy, err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->core_rst); + if (err) { + dev_err(dev, "assert core_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->mgmt_rst); + if (err) { + dev_err(dev, "assert mgmt_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->mgmt_sticky_rst); + if (err) { + dev_err(dev, "assert mgmt_sticky_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->pipe_rst); + if (err) { + dev_err(dev, "assert pipe_rst err %d\n", err); + return err; + } + + rockchip_pcie_write(rockchip, + PCIE_CLIENT_CONF_ENABLE | + PCIE_CLIENT_LINK_TRAIN_ENABLE | + PCIE_CLIENT_ARI_ENABLE | + PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) | + PCIE_CLIENT_MODE_RC | + PCIE_CLIENT_GEN_SEL_2, + PCIE_CLIENT_CONFIG); + + err = phy_power_on(rockchip->phy); + if (err) { + dev_err(dev, "fail to power on phy, err %d\n", err); + return err; + } + + /* + * Please don't reorder the deassert sequence of the following + * four reset pins. + */ + err = reset_control_deassert(rockchip->mgmt_sticky_rst); + if (err) { + dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); + return err; + } + + err = reset_control_deassert(rockchip->core_rst); + if (err) { + dev_err(dev, "deassert core_rst err %d\n", err); + return err; + } + + err = reset_control_deassert(rockchip->mgmt_rst); + if (err) { + dev_err(dev, "deassert mgmt_rst err %d\n", err); + return err; + } + + err = reset_control_deassert(rockchip->pipe_rst); + if (err) { + dev_err(dev, "deassert pipe_rst err %d\n", err); + return err; + } + + /* + * We need to read/write PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 before + * enabling ASPM. Otherwise L1PwrOnSc and L1PwrOnVal isn't + * reliable and enabling ASPM doesn't work. This is a controller + * bug we need to work around. + */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2); + + /* Fix the transmitted FTS count desired to exit from L0s. */ + status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1); + status = (status & PCIE_CORE_CTRL_PLC1_FTS_MASK) | + (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT); + rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1); + + /* Enable Gen1 training */ + rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, + PCIE_CLIENT_CONFIG); + + gpiod_set_value(rockchip->ep_gpio, 1); + + /* 500ms timeout value should be enough for Gen1/2 training */ + timeout = jiffies + msecs_to_jiffies(500); + + for (;;) { + status = rockchip_pcie_read(rockchip, + PCIE_CLIENT_BASIC_STATUS1); + if ((status & PCIE_CLIENT_LINK_STATUS_MASK) == + PCIE_CLIENT_LINK_STATUS_UP) { + dev_dbg(dev, "PCIe link training gen1 pass!\n"); + break; + } + + if (time_after(jiffies, timeout)) { + dev_err(dev, "PCIe link training gen1 timeout!\n"); + return -ETIMEDOUT; + } + + msleep(20); + } + + /* + * Enable retrain for gen2. This should be configured only after + * gen1 finished. + */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= PCIE_RC_CONFIG_LCS_RETRAIN_LINK; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + + timeout = jiffies + msecs_to_jiffies(500); + for (;;) { + status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); + if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) == + PCIE_CORE_PL_CONF_SPEED_5G) { + dev_dbg(dev, "PCIe link training gen2 pass!\n"); + break; + } + + if (time_after(jiffies, timeout)) { + dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n"); + break; + } + + msleep(20); + } + + /* Check the final link width from negotiated lane counter from MGMT */ + status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); + status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >> + PCIE_CORE_PL_CONF_LANE_MASK); + dev_dbg(dev, "current link width is x%d\n", status); + + rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, + PCIE_RC_CONFIG_VENDOR); + rockchip_pcie_write(rockchip, + PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT, + PCIE_RC_CONFIG_RID_CCR); + rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); + + rockchip_pcie_write(rockchip, + (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), + PCIE_CORE_OB_REGION_ADDR0); + rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, + PCIE_CORE_OB_REGION_ADDR1); + rockchip_pcie_write(rockchip, 0x0080000a, PCIE_CORE_OB_REGION_DESC0); + rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); + + return 0; +} + +static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg) +{ + struct rockchip_pcie *rockchip = arg; + struct device *dev = rockchip->dev; + u32 reg; + u32 sub_reg; + + reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); + if (reg & PCIE_CLIENT_INT_LOCAL) { + dev_dbg(dev, "local interrupt received\n"); + sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS); + if (sub_reg & PCIE_CORE_INT_PRFPE) + dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n"); + + if (sub_reg & PCIE_CORE_INT_CRFPE) + dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n"); + + if (sub_reg & PCIE_CORE_INT_RRPE) + dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n"); + + if (sub_reg & PCIE_CORE_INT_PRFO) + dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n"); + + if (sub_reg & PCIE_CORE_INT_CRFO) + dev_dbg(dev, "overflow occurred in the completion receive FIFO\n"); + + if (sub_reg & PCIE_CORE_INT_RT) + dev_dbg(dev, "replay timer timed out\n"); + + if (sub_reg & PCIE_CORE_INT_RTR) + dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n"); + + if (sub_reg & PCIE_CORE_INT_PE) + dev_dbg(dev, "phy error detected on receive side\n"); + + if (sub_reg & PCIE_CORE_INT_MTR) + dev_dbg(dev, "malformed TLP received from the link\n"); + + if (sub_reg & PCIE_CORE_INT_UCR) + dev_dbg(dev, "malformed TLP received from the link\n"); + + if (sub_reg & PCIE_CORE_INT_FCE) + dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n"); + + if (sub_reg & PCIE_CORE_INT_CT) + dev_dbg(dev, "a request timed out waiting for completion\n"); + + if (sub_reg & PCIE_CORE_INT_UTC) + dev_dbg(dev, "unmapped TC error\n"); + + if (sub_reg & PCIE_CORE_INT_MMVC) + dev_dbg(dev, "MSI mask register changes\n"); + + rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS); + } else if (reg & PCIE_CLIENT_INT_PHY) { + dev_dbg(dev, "phy link changes\n"); + rockchip_pcie_update_txcredit_mui(rockchip); + rockchip_pcie_clr_bw_int(rockchip); + } + + rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL, + PCIE_CLIENT_INT_STATUS); + + return IRQ_HANDLED; +} + +static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg) +{ + struct rockchip_pcie *rockchip = arg; + struct device *dev = rockchip->dev; + u32 reg; + + reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); + if (reg & PCIE_CLIENT_INT_LEGACY_DONE) + dev_dbg(dev, "legacy done interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_MSG) + dev_dbg(dev, "message done interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_HOT_RST) + dev_dbg(dev, "hot reset interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_DPA) + dev_dbg(dev, "dpa interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_FATAL_ERR) + dev_dbg(dev, "fatal error interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_NFATAL_ERR) + dev_dbg(dev, "no fatal error interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_CORR_ERR) + dev_dbg(dev, "correctable error interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_PHY) + dev_dbg(dev, "phy interrupt received\n"); + + rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE | + PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST | + PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR | + PCIE_CLIENT_INT_NFATAL_ERR | + PCIE_CLIENT_INT_CORR_ERR | + PCIE_CLIENT_INT_PHY), + PCIE_CLIENT_INT_STATUS); + + return IRQ_HANDLED; +} + +static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); + struct device *dev = rockchip->dev; + u32 reg; + u32 hwirq; + u32 virq; + + chained_irq_enter(chip, desc); + + reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); + reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT; + + while (reg) { + hwirq = ffs(reg) - 1; + reg &= ~BIT(hwirq); + + virq = irq_find_mapping(rockchip->irq_domain, hwirq); + if (virq) + generic_handle_irq(virq); + else + dev_err(dev, "unexpected IRQ, INT%d\n", hwirq); + } + + chained_irq_exit(chip, desc); +} + + +/** + * rockchip_pcie_parse_dt - Parse Device Tree + * @rockchip: PCIe port information + * + * Return: '0' on success and error value on failure + */ +static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + struct platform_device *pdev = to_platform_device(dev); + struct device_node *node = dev->of_node; + struct resource *regs; + int irq; + int err; + + regs = platform_get_resource_byname(pdev, + IORESOURCE_MEM, + "axi-base"); + rockchip->reg_base = devm_ioremap_resource(dev, regs); + if (IS_ERR(rockchip->reg_base)) + return PTR_ERR(rockchip->reg_base); + + regs = platform_get_resource_byname(pdev, + IORESOURCE_MEM, + "apb-base"); + rockchip->apb_base = devm_ioremap_resource(dev, regs); + if (IS_ERR(rockchip->apb_base)) + return PTR_ERR(rockchip->apb_base); + + rockchip->phy = devm_phy_get(dev, "pcie-phy"); + if (IS_ERR(rockchip->phy)) { + if (PTR_ERR(rockchip->phy) != -EPROBE_DEFER) + dev_err(dev, "missing phy\n"); + return PTR_ERR(rockchip->phy); + } + + rockchip->lanes = 1; + err = of_property_read_u32(node, "num-lanes", &rockchip->lanes); + if (!err && (rockchip->lanes == 0 || + rockchip->lanes == 3 || + rockchip->lanes > 4)) { + dev_warn(dev, "invalid num-lanes, default to use one lane\n"); + rockchip->lanes = 1; + } + + rockchip->core_rst = devm_reset_control_get(dev, "core"); + if (IS_ERR(rockchip->core_rst)) { + if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER) + dev_err(dev, "missing core reset property in node\n"); + return PTR_ERR(rockchip->core_rst); + } + + rockchip->mgmt_rst = devm_reset_control_get(dev, "mgmt"); + if (IS_ERR(rockchip->mgmt_rst)) { + if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER) + dev_err(dev, "missing mgmt reset property in node\n"); + return PTR_ERR(rockchip->mgmt_rst); + } + + rockchip->mgmt_sticky_rst = devm_reset_control_get(dev, "mgmt-sticky"); + if (IS_ERR(rockchip->mgmt_sticky_rst)) { + if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER) + dev_err(dev, "missing mgmt-sticky reset property in node\n"); + return PTR_ERR(rockchip->mgmt_sticky_rst); + } + + rockchip->pipe_rst = devm_reset_control_get(dev, "pipe"); + if (IS_ERR(rockchip->pipe_rst)) { + if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pipe reset property in node\n"); + return PTR_ERR(rockchip->pipe_rst); + } + + rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH); + if (IS_ERR(rockchip->ep_gpio)) { + dev_err(dev, "missing ep-gpios property in node\n"); + return PTR_ERR(rockchip->ep_gpio); + } + + rockchip->aclk_pcie = devm_clk_get(dev, "aclk"); + if (IS_ERR(rockchip->aclk_pcie)) { + dev_err(dev, "aclk clock not found\n"); + return PTR_ERR(rockchip->aclk_pcie); + } + + rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf"); + if (IS_ERR(rockchip->aclk_perf_pcie)) { + dev_err(dev, "aclk_perf clock not found\n"); + return PTR_ERR(rockchip->aclk_perf_pcie); + } + + rockchip->hclk_pcie = devm_clk_get(dev, "hclk"); + if (IS_ERR(rockchip->hclk_pcie)) { + dev_err(dev, "hclk clock not found\n"); + return PTR_ERR(rockchip->hclk_pcie); + } + + rockchip->clk_pcie_pm = devm_clk_get(dev, "pm"); + if (IS_ERR(rockchip->clk_pcie_pm)) { + dev_err(dev, "pm clock not found\n"); + return PTR_ERR(rockchip->clk_pcie_pm); + } + + irq = platform_get_irq_byname(pdev, "sys"); + if (irq < 0) { + dev_err(dev, "missing sys IRQ resource\n"); + return -EINVAL; + } + + err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler, + IRQF_SHARED, "pcie-sys", rockchip); + if (err) { + dev_err(dev, "failed to request PCIe subsystem IRQ\n"); + return err; + } + + irq = platform_get_irq_byname(pdev, "legacy"); + if (irq < 0) { + dev_err(dev, "missing legacy IRQ resource\n"); + return -EINVAL; + } + + irq_set_chained_handler_and_data(irq, + rockchip_pcie_legacy_int_handler, + rockchip); + + irq = platform_get_irq_byname(pdev, "client"); + if (irq < 0) { + dev_err(dev, "missing client IRQ resource\n"); + return -EINVAL; + } + + err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler, + IRQF_SHARED, "pcie-client", rockchip); + if (err) { + dev_err(dev, "failed to request PCIe client IRQ\n"); + return err; + } + + rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); + if (IS_ERR(rockchip->vpcie3v3)) { + if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no vpcie3v3 regulator found\n"); + } + + rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8"); + if (IS_ERR(rockchip->vpcie1v8)) { + if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no vpcie1v8 regulator found\n"); + } + + rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9"); + if (IS_ERR(rockchip->vpcie0v9)) { + if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no vpcie0v9 regulator found\n"); + } + + return 0; +} + +static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int err; + + if (!IS_ERR(rockchip->vpcie3v3)) { + err = regulator_enable(rockchip->vpcie3v3); + if (err) { + dev_err(dev, "fail to enable vpcie3v3 regulator\n"); + goto err_out; + } + } + + if (!IS_ERR(rockchip->vpcie1v8)) { + err = regulator_enable(rockchip->vpcie1v8); + if (err) { + dev_err(dev, "fail to enable vpcie1v8 regulator\n"); + goto err_disable_3v3; + } + } + + if (!IS_ERR(rockchip->vpcie0v9)) { + err = regulator_enable(rockchip->vpcie0v9); + if (err) { + dev_err(dev, "fail to enable vpcie0v9 regulator\n"); + goto err_disable_1v8; + } + } + + return 0; + +err_disable_1v8: + if (!IS_ERR(rockchip->vpcie1v8)) + regulator_disable(rockchip->vpcie1v8); +err_disable_3v3: + if (!IS_ERR(rockchip->vpcie3v3)) + regulator_disable(rockchip->vpcie3v3); +err_out: + return err; +} + +static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip) +{ + rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) & + (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK); + rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT), + PCIE_CORE_INT_MASK); + + rockchip_pcie_enable_bw_int(rockchip); +} + +static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = rockchip_pcie_intx_map, +}; + +static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + struct device_node *intc = of_get_next_child(dev->of_node, NULL); + + if (!intc) { + dev_err(dev, "missing child interrupt-controller node\n"); + return -EINVAL; + } + + rockchip->irq_domain = irq_domain_add_linear(intc, 4, + &intx_domain_ops, rockchip); + if (!rockchip->irq_domain) { + dev_err(dev, "failed to get a INTx IRQ domain\n"); + return -EINVAL; + } + + return 0; +} + +static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip, + int region_no, int type, u8 num_pass_bits, + u32 lower_addr, u32 upper_addr) +{ + u32 ob_addr_0; + u32 ob_addr_1; + u32 ob_desc_0; + u32 aw_offset; + + if (region_no >= MAX_AXI_WRAPPER_REGION_NUM) + return -EINVAL; + if (num_pass_bits + 1 < 8) + return -EINVAL; + if (num_pass_bits > 63) + return -EINVAL; + if (region_no == 0) { + if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits)) + return -EINVAL; + } + if (region_no != 0) { + if (AXI_REGION_SIZE < (2ULL << num_pass_bits)) + return -EINVAL; + } + + aw_offset = (region_no << OB_REG_SIZE_SHIFT); + + ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS; + ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR; + ob_addr_1 = upper_addr; + ob_desc_0 = (1 << 23 | type); + + rockchip_pcie_write(rockchip, ob_addr_0, + PCIE_CORE_OB_REGION_ADDR0 + aw_offset); + rockchip_pcie_write(rockchip, ob_addr_1, + PCIE_CORE_OB_REGION_ADDR1 + aw_offset); + rockchip_pcie_write(rockchip, ob_desc_0, + PCIE_CORE_OB_REGION_DESC0 + aw_offset); + rockchip_pcie_write(rockchip, 0, + PCIE_CORE_OB_REGION_DESC1 + aw_offset); + + return 0; +} + +static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip, + int region_no, u8 num_pass_bits, + u32 lower_addr, u32 upper_addr) +{ + u32 ib_addr_0; + u32 ib_addr_1; + u32 aw_offset; + + if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM) + return -EINVAL; + if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED) + return -EINVAL; + if (num_pass_bits > 63) + return -EINVAL; + + aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT); + + ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS; + ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR; + ib_addr_1 = upper_addr; + + rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset); + rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset); + + return 0; +} + +static int rockchip_pcie_probe(struct platform_device *pdev) +{ + struct rockchip_pcie *rockchip; + struct device *dev = &pdev->dev; + struct pci_bus *bus, *child; + struct resource_entry *win; + resource_size_t io_base; + struct resource *mem; + struct resource *io; + phys_addr_t io_bus_addr = 0; + u32 io_size; + phys_addr_t mem_bus_addr = 0; + u32 mem_size = 0; + int reg_no; + int err; + int offset; + + LIST_HEAD(res); + + if (!dev->of_node) + return -ENODEV; + + rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL); + if (!rockchip) + return -ENOMEM; + + rockchip->dev = dev; + + err = rockchip_pcie_parse_dt(rockchip); + if (err) + return err; + + err = clk_prepare_enable(rockchip->aclk_pcie); + if (err) { + dev_err(dev, "unable to enable aclk_pcie clock\n"); + goto err_aclk_pcie; + } + + err = clk_prepare_enable(rockchip->aclk_perf_pcie); + if (err) { + dev_err(dev, "unable to enable aclk_perf_pcie clock\n"); + goto err_aclk_perf_pcie; + } + + err = clk_prepare_enable(rockchip->hclk_pcie); + if (err) { + dev_err(dev, "unable to enable hclk_pcie clock\n"); + goto err_hclk_pcie; + } + + err = clk_prepare_enable(rockchip->clk_pcie_pm); + if (err) { + dev_err(dev, "unable to enable hclk_pcie clock\n"); + goto err_pcie_pm; + } + + err = rockchip_pcie_set_vpcie(rockchip); + if (err) { + dev_err(dev, "failed to set vpcie regulator\n"); + goto err_set_vpcie; + } + + err = rockchip_pcie_init_port(rockchip); + if (err) + goto err_vpcie; + + platform_set_drvdata(pdev, rockchip); + + rockchip_pcie_enable_interrupts(rockchip); + + err = rockchip_pcie_init_irq_domain(rockchip); + if (err < 0) + goto err_vpcie; + + err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, + &res, &io_base); + if (err) + goto err_vpcie; + + err = devm_request_pci_bus_resources(dev, &res); + if (err) + goto err_vpcie; + + /* Get the I/O and memory ranges from DT */ + io_size = 0; + resource_list_for_each_entry(win, &res) { + switch (resource_type(win->res)) { + case IORESOURCE_IO: + io = win->res; + io->name = "I/O"; + io_size = resource_size(io); + io_bus_addr = io->start - win->offset; + err = pci_remap_iospace(io, io_base); + if (err) { + dev_warn(dev, "error %d: failed to map resource %pR\n", + err, io); + continue; + } + break; + case IORESOURCE_MEM: + mem = win->res; + mem->name = "MEM"; + mem_size = resource_size(mem); + mem_bus_addr = mem->start - win->offset; + break; + case IORESOURCE_BUS: + rockchip->root_bus_nr = win->res->start; + break; + default: + continue; + } + } + + if (mem_size) { + for (reg_no = 0; reg_no < (mem_size >> 20); reg_no++) { + err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, + AXI_WRAPPER_MEM_WRITE, + 20 - 1, + mem_bus_addr + + (reg_no << 20), + 0); + if (err) { + dev_err(dev, "program RC mem outbound ATU failed\n"); + goto err_vpcie; + } + } + } + + err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0); + if (err) { + dev_err(dev, "program RC mem inbound ATU failed\n"); + goto err_vpcie; + } + + offset = mem_size >> 20; + + if (io_size) { + for (reg_no = 0; reg_no < (io_size >> 20); reg_no++) { + err = rockchip_pcie_prog_ob_atu(rockchip, + reg_no + 1 + offset, + AXI_WRAPPER_IO_WRITE, + 20 - 1, + io_bus_addr + + (reg_no << 20), + 0); + if (err) { + dev_err(dev, "program RC io outbound ATU failed\n"); + goto err_vpcie; + } + } + } + + bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res); + if (!bus) { + err = -ENOMEM; + goto err_vpcie; + } + + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(bus); + + dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n"); + + return err; + +err_vpcie: + if (!IS_ERR(rockchip->vpcie3v3)) + regulator_disable(rockchip->vpcie3v3); + if (!IS_ERR(rockchip->vpcie1v8)) + regulator_disable(rockchip->vpcie1v8); + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); +err_set_vpcie: + clk_disable_unprepare(rockchip->clk_pcie_pm); +err_pcie_pm: + clk_disable_unprepare(rockchip->hclk_pcie); +err_hclk_pcie: + clk_disable_unprepare(rockchip->aclk_perf_pcie); +err_aclk_perf_pcie: + clk_disable_unprepare(rockchip->aclk_pcie); +err_aclk_pcie: + return err; +} + +static const struct of_device_id rockchip_pcie_of_match[] = { + { .compatible = "rockchip,rk3399-pcie", }, + {} +}; + +static struct platform_driver rockchip_pcie_driver = { + .driver = { + .name = "rockchip-pcie", + .of_match_table = rockchip_pcie_of_match, + }, + .probe = rockchip_pcie_probe, + +}; +builtin_platform_driver(rockchip_pcie_driver); diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c index a4060b85ab23..09aed85f275a 100644 --- a/drivers/pci/host/pcie-spear13xx.c +++ b/drivers/pci/host/pcie-spear13xx.c @@ -15,7 +15,7 @@ #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/phy/phy.h> @@ -355,7 +355,6 @@ static const struct of_device_id spear13xx_pcie_of_match[] = { { .compatible = "st,spear1340-pcie", }, {}, }; -MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match); static struct platform_driver spear13xx_pcie_driver = { .probe = spear13xx_pcie_probe, @@ -365,14 +364,8 @@ static struct platform_driver spear13xx_pcie_driver = { }, }; -/* SPEAr13xx PCIe driver does not allow module unload */ - static int __init spear13xx_pcie_init(void) { return platform_driver_register(&spear13xx_pcie_driver); } -module_init(spear13xx_pcie_init); - -MODULE_DESCRIPTION("ST Microelectronics SPEAr13xx PCIe host controller driver"); -MODULE_AUTHOR("Pratyush Anand <pratyush.anand@gmail.com>"); -MODULE_LICENSE("GPL v2"); +device_initcall(spear13xx_pcie_init); diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c index 0b597d9190b4..67eae4179290 100644 --- a/drivers/pci/host/pcie-xilinx-nwl.c +++ b/drivers/pci/host/pcie-xilinx-nwl.c @@ -15,7 +15,7 @@ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> @@ -85,10 +85,15 @@ #define MSGF_MISC_SR_MASTER_ERR BIT(5) #define MSGF_MISC_SR_I_ADDR_ERR BIT(6) #define MSGF_MISC_SR_E_ADDR_ERR BIT(7) -#define MSGF_MISC_SR_UR_DETECT BIT(20) - -#define MSGF_MISC_SR_PCIE_CORE GENMASK(18, 16) -#define MSGF_MISC_SR_PCIE_CORE_ERR GENMASK(31, 22) +#define MSGF_MISC_SR_FATAL_AER BIT(16) +#define MSGF_MISC_SR_NON_FATAL_AER BIT(17) +#define MSGF_MISC_SR_CORR_AER BIT(18) +#define MSGF_MISC_SR_UR_DETECT BIT(20) +#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22) +#define MSGF_MISC_SR_FATAL_DEV BIT(23) +#define MSGF_MISC_SR_LINK_DOWN BIT(24) +#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25) +#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26) #define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ MSGF_MISC_SR_RXMSG_OVER | \ @@ -96,9 +101,15 @@ MSGF_MISC_SR_MASTER_ERR | \ MSGF_MISC_SR_I_ADDR_ERR | \ MSGF_MISC_SR_E_ADDR_ERR | \ + MSGF_MISC_SR_FATAL_AER | \ + MSGF_MISC_SR_NON_FATAL_AER | \ + MSGF_MISC_SR_CORR_AER | \ MSGF_MISC_SR_UR_DETECT | \ - MSGF_MISC_SR_PCIE_CORE | \ - MSGF_MISC_SR_PCIE_CORE_ERR) + MSGF_MISC_SR_NON_FATAL_DEV | \ + MSGF_MISC_SR_FATAL_DEV | \ + MSGF_MISC_SR_LINK_DOWN | \ + MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \ + MSGF_MSIC_SR_LINK_BWIDTH) /* Legacy interrupt status mask bits */ #define MSGF_LEG_SR_INTA BIT(0) @@ -109,8 +120,8 @@ MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD) /* MSI interrupt status mask bits */ -#define MSGF_MSI_SR_LO_MASK BIT(0) -#define MSGF_MSI_SR_HI_MASK BIT(0) +#define MSGF_MSI_SR_LO_MASK GENMASK(31, 0) +#define MSGF_MSI_SR_HI_MASK GENMASK(31, 0) #define MSII_PRESENT BIT(0) #define MSII_ENABLE BIT(0) @@ -291,8 +302,29 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) dev_err(pcie->dev, "In Misc Egress address translation error\n"); - if (misc_stat & MSGF_MISC_SR_PCIE_CORE_ERR) - dev_err(pcie->dev, "PCIe Core error\n"); + if (misc_stat & MSGF_MISC_SR_FATAL_AER) + dev_err(pcie->dev, "Fatal Error in AER Capability\n"); + + if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER) + dev_err(pcie->dev, "Non-Fatal Error in AER Capability\n"); + + if (misc_stat & MSGF_MISC_SR_CORR_AER) + dev_err(pcie->dev, "Correctable Error in AER Capability\n"); + + if (misc_stat & MSGF_MISC_SR_UR_DETECT) + dev_err(pcie->dev, "Unsupported request Detected\n"); + + if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV) + dev_err(pcie->dev, "Non-Fatal Error Detected\n"); + + if (misc_stat & MSGF_MISC_SR_FATAL_DEV) + dev_err(pcie->dev, "Fatal Error Detected\n"); + + if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH) + dev_info(pcie->dev, "Link Autonomous Bandwidth Management Status bit set\n"); + + if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH) + dev_info(pcie->dev, "Link Bandwidth Management Status bit set\n"); /* Clear misc interrupt status */ nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS); @@ -459,40 +491,6 @@ static const struct irq_domain_ops dev_msi_domain_ops = { .free = nwl_irq_domain_free, }; -static void nwl_msi_free_irq_domain(struct nwl_pcie *pcie) -{ - struct nwl_msi *msi = &pcie->msi; - - if (msi->irq_msi0) - irq_set_chained_handler_and_data(msi->irq_msi0, NULL, NULL); - if (msi->irq_msi1) - irq_set_chained_handler_and_data(msi->irq_msi1, NULL, NULL); - - if (msi->msi_domain) - irq_domain_remove(msi->msi_domain); - if (msi->dev_domain) - irq_domain_remove(msi->dev_domain); - - kfree(msi->bitmap); - msi->bitmap = NULL; -} - -static void nwl_pcie_free_irq_domain(struct nwl_pcie *pcie) -{ - int i; - u32 irq; - - for (i = 0; i < INTX_NUM; i++) { - irq = irq_find_mapping(pcie->legacy_irq_domain, i + 1); - if (irq > 0) - irq_dispose_mapping(irq); - } - if (pcie->legacy_irq_domain) - irq_domain_remove(pcie->legacy_irq_domain); - - nwl_msi_free_irq_domain(pcie); -} - static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie) { #ifdef CONFIG_PCI_MSI @@ -867,25 +865,12 @@ error: return err; } -static int nwl_pcie_remove(struct platform_device *pdev) -{ - struct nwl_pcie *pcie = platform_get_drvdata(pdev); - - nwl_pcie_free_irq_domain(pcie); - platform_set_drvdata(pdev, NULL); - return 0; -} - static struct platform_driver nwl_pcie_driver = { .driver = { .name = "nwl-pcie", + .suppress_bind_attrs = true, .of_match_table = nwl_pcie_of_match, }, .probe = nwl_pcie_probe, - .remove = nwl_pcie_remove, }; -module_platform_driver(nwl_pcie_driver); - -MODULE_AUTHOR("Xilinx, Inc"); -MODULE_DESCRIPTION("NWL PCIe driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver(nwl_pcie_driver); diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c index a30e01639557..be568039d9d0 100644 --- a/drivers/pci/host/pcie-xilinx.c +++ b/drivers/pci/host/pcie-xilinx.c @@ -18,7 +18,7 @@ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> @@ -101,7 +101,8 @@ * @msi_pages: MSI pages * @root_busno: Root Bus number * @dev: Device pointer - * @irq_domain: IRQ domain pointer + * @msi_domain: MSI IRQ domain pointer + * @leg_domain: Legacy IRQ domain pointer * @resources: Bus Resources */ struct xilinx_pcie_port { @@ -110,7 +111,8 @@ struct xilinx_pcie_port { unsigned long msi_pages; u8 root_busno; struct device *dev; - struct irq_domain *irq_domain; + struct irq_domain *msi_domain; + struct irq_domain *leg_domain; struct list_head resources; }; @@ -168,13 +170,6 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) if (bus->number == port->root_busno && devfn > 0) return false; - /* - * Do not read more than one device on the bus directly attached - * to RC. - */ - if (bus->primary == port->root_busno && devfn > 0) - return false; - return true; } @@ -219,13 +214,15 @@ static void xilinx_pcie_destroy_msi(unsigned int irq) { struct msi_desc *msi; struct xilinx_pcie_port *port; + struct irq_data *d = irq_get_irq_data(irq); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (!test_bit(irq, msi_irq_in_use)) { + if (!test_bit(hwirq, msi_irq_in_use)) { msi = irq_get_msi_desc(irq); port = msi_desc_to_pci_sysdata(msi); dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); } else { - clear_bit(irq, msi_irq_in_use); + clear_bit(hwirq, msi_irq_in_use); } } @@ -257,6 +254,7 @@ static void xilinx_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) { xilinx_pcie_destroy_msi(irq); + irq_dispose_mapping(irq); } /** @@ -281,7 +279,7 @@ static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip, if (hwirq < 0) return hwirq; - irq = irq_create_mapping(port->irq_domain, hwirq); + irq = irq_create_mapping(port->msi_domain, hwirq); if (!irq) return -EINVAL; @@ -432,7 +430,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) /* Check whether interrupt valid */ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { dev_warn(port->dev, "RP Intr FIFO1 read error\n"); - return IRQ_HANDLED; + goto error; } if (!(val & XILINX_PCIE_RPIFR1_MSI_INTR)) { @@ -443,7 +441,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) /* Handle INTx Interrupt */ val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >> XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1; - generic_handle_irq(irq_find_mapping(port->irq_domain, + generic_handle_irq(irq_find_mapping(port->leg_domain, val)); } } @@ -454,7 +452,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { dev_warn(port->dev, "RP Intr FIFO1 read error\n"); - return IRQ_HANDLED; + goto error; } if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { @@ -499,6 +497,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) if (status & XILINX_PCIE_INTR_MST_ERRP) dev_warn(port->dev, "Master error poison\n"); +error: /* Clear the Interrupt Decode register */ pcie_write(port, status, XILINX_PCIE_REG_IDR); @@ -506,35 +505,6 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) } /** - * xilinx_pcie_free_irq_domain - Free IRQ domain - * @port: PCIe port information - */ -static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port) -{ - int i; - u32 irq, num_irqs; - - /* Free IRQ Domain */ - if (IS_ENABLED(CONFIG_PCI_MSI)) { - - free_pages(port->msi_pages, 0); - - num_irqs = XILINX_NUM_MSI_IRQS; - } else { - /* INTx */ - num_irqs = 4; - } - - for (i = 0; i < num_irqs; i++) { - irq = irq_find_mapping(port->irq_domain, i); - if (irq > 0) - irq_dispose_mapping(irq); - } - - irq_domain_remove(port->irq_domain); -} - -/** * xilinx_pcie_init_irq_domain - Initialize IRQ domain * @port: PCIe port information * @@ -553,21 +523,21 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) return -ENODEV; } - port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4, + port->leg_domain = irq_domain_add_linear(pcie_intc_node, 4, &intx_domain_ops, port); - if (!port->irq_domain) { + if (!port->leg_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENODEV; } /* Setup MSI */ if (IS_ENABLED(CONFIG_PCI_MSI)) { - port->irq_domain = irq_domain_add_linear(node, + port->msi_domain = irq_domain_add_linear(node, XILINX_NUM_MSI_IRQS, &msi_domain_ops, &xilinx_pcie_msi_chip); - if (!port->irq_domain) { + if (!port->msi_domain) { dev_err(dev, "Failed to get a MSI IRQ domain\n"); return -ENODEV; } @@ -724,21 +694,6 @@ error: return err; } -/** - * xilinx_pcie_remove - Remove function - * @pdev: Platform device pointer - * - * Return: '0' always - */ -static int xilinx_pcie_remove(struct platform_device *pdev) -{ - struct xilinx_pcie_port *port = platform_get_drvdata(pdev); - - xilinx_pcie_free_irq_domain(port); - - return 0; -} - static struct of_device_id xilinx_pcie_of_match[] = { { .compatible = "xlnx,axi-pcie-host-1.00.a", }, {} @@ -751,10 +706,5 @@ static struct platform_driver xilinx_pcie_driver = { .suppress_bind_attrs = true, }, .probe = xilinx_pcie_probe, - .remove = xilinx_pcie_remove, }; -module_platform_driver(xilinx_pcie_driver); - -MODULE_AUTHOR("Xilinx Inc"); -MODULE_DESCRIPTION("Xilinx AXI PCIe driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(xilinx_pcie_driver); diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c new file mode 100644 index 000000000000..37e29b580be3 --- /dev/null +++ b/drivers/pci/host/vmd.c @@ -0,0 +1,768 @@ +/* + * Volume Management Device driver + * Copyright (c) 2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/pci.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> + +#include <asm/irqdomain.h> +#include <asm/device.h> +#include <asm/msi.h> +#include <asm/msidef.h> + +#define VMD_CFGBAR 0 +#define VMD_MEMBAR1 2 +#define VMD_MEMBAR2 4 + +/* + * Lock for manipulating VMD IRQ lists. + */ +static DEFINE_RAW_SPINLOCK(list_lock); + +/** + * struct vmd_irq - private data to map driver IRQ to the VMD shared vector + * @node: list item for parent traversal. + * @rcu: RCU callback item for freeing. + * @irq: back pointer to parent. + * @enabled: true if driver enabled IRQ + * @virq: the virtual IRQ value provided to the requesting driver. + * + * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to + * a VMD IRQ using this structure. + */ +struct vmd_irq { + struct list_head node; + struct rcu_head rcu; + struct vmd_irq_list *irq; + bool enabled; + unsigned int virq; +}; + +/** + * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector + * @irq_list: the list of irq's the VMD one demuxes to. + * @count: number of child IRQs assigned to this vector; used to track + * sharing. + */ +struct vmd_irq_list { + struct list_head irq_list; + unsigned int count; +}; + +struct vmd_dev { + struct pci_dev *dev; + + spinlock_t cfg_lock; + char __iomem *cfgbar; + + int msix_count; + struct vmd_irq_list *irqs; + + struct pci_sysdata sysdata; + struct resource resources[3]; + struct irq_domain *irq_domain; + struct pci_bus *bus; + +#ifdef CONFIG_X86_DEV_DMA_OPS + struct dma_map_ops dma_ops; + struct dma_domain dma_domain; +#endif +}; + +static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) +{ + return container_of(bus->sysdata, struct vmd_dev, sysdata); +} + +static inline unsigned int index_from_irqs(struct vmd_dev *vmd, + struct vmd_irq_list *irqs) +{ + return irqs - vmd->irqs; +} + +/* + * Drivers managing a device in a VMD domain allocate their own IRQs as before, + * but the MSI entry for the hardware it's driving will be programmed with a + * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its + * domain into one of its own, and the VMD driver de-muxes these for the + * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations + * and irq_chip to set this up. + */ +static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct vmd_irq *vmdirq = data->chip_data; + struct vmd_irq_list *irq = vmdirq->irq; + struct vmd_dev *vmd = irq_data_get_irq_handler_data(data); + + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = MSI_ADDR_BASE_LO | + MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq)); + msg->data = 0; +} + +/* + * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. + */ +static void vmd_irq_enable(struct irq_data *data) +{ + struct vmd_irq *vmdirq = data->chip_data; + unsigned long flags; + + raw_spin_lock_irqsave(&list_lock, flags); + WARN_ON(vmdirq->enabled); + list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); + vmdirq->enabled = true; + raw_spin_unlock_irqrestore(&list_lock, flags); + + data->chip->irq_unmask(data); +} + +static void vmd_irq_disable(struct irq_data *data) +{ + struct vmd_irq *vmdirq = data->chip_data; + unsigned long flags; + + data->chip->irq_mask(data); + + raw_spin_lock_irqsave(&list_lock, flags); + if (vmdirq->enabled) { + list_del_rcu(&vmdirq->node); + vmdirq->enabled = false; + } + raw_spin_unlock_irqrestore(&list_lock, flags); +} + +/* + * XXX: Stubbed until we develop acceptable way to not create conflicts with + * other devices sharing the same vector. + */ +static int vmd_irq_set_affinity(struct irq_data *data, + const struct cpumask *dest, bool force) +{ + return -EINVAL; +} + +static struct irq_chip vmd_msi_controller = { + .name = "VMD-MSI", + .irq_enable = vmd_irq_enable, + .irq_disable = vmd_irq_disable, + .irq_compose_msi_msg = vmd_compose_msi_msg, + .irq_set_affinity = vmd_irq_set_affinity, +}; + +static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, + msi_alloc_info_t *arg) +{ + return 0; +} + +/* + * XXX: We can be even smarter selecting the best IRQ once we solve the + * affinity problem. + */ +static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) +{ + int i, best = 1; + unsigned long flags; + + if (!desc->msi_attrib.is_msix || vmd->msix_count == 1) + return &vmd->irqs[0]; + + raw_spin_lock_irqsave(&list_lock, flags); + for (i = 1; i < vmd->msix_count; i++) + if (vmd->irqs[i].count < vmd->irqs[best].count) + best = i; + vmd->irqs[best].count++; + raw_spin_unlock_irqrestore(&list_lock, flags); + + return &vmd->irqs[best]; +} + +static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, + unsigned int virq, irq_hw_number_t hwirq, + msi_alloc_info_t *arg) +{ + struct msi_desc *desc = arg->desc; + struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); + struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); + unsigned int index, vector; + + if (!vmdirq) + return -ENOMEM; + + INIT_LIST_HEAD(&vmdirq->node); + vmdirq->irq = vmd_next_irq(vmd, desc); + vmdirq->virq = virq; + index = index_from_irqs(vmd, vmdirq->irq); + vector = pci_irq_vector(vmd->dev, index); + + irq_domain_set_info(domain, virq, vector, info->chip, vmdirq, + handle_untracked_irq, vmd, NULL); + return 0; +} + +static void vmd_msi_free(struct irq_domain *domain, + struct msi_domain_info *info, unsigned int virq) +{ + struct vmd_irq *vmdirq = irq_get_chip_data(virq); + unsigned long flags; + + synchronize_rcu(); + + /* XXX: Potential optimization to rebalance */ + raw_spin_lock_irqsave(&list_lock, flags); + vmdirq->irq->count--; + raw_spin_unlock_irqrestore(&list_lock, flags); + + kfree_rcu(vmdirq, rcu); +} + +static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct vmd_dev *vmd = vmd_from_bus(pdev->bus); + + if (nvec > vmd->msix_count) + return vmd->msix_count; + + memset(arg, 0, sizeof(*arg)); + return 0; +} + +static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; +} + +static struct msi_domain_ops vmd_msi_domain_ops = { + .get_hwirq = vmd_get_hwirq, + .msi_init = vmd_msi_init, + .msi_free = vmd_msi_free, + .msi_prepare = vmd_msi_prepare, + .set_desc = vmd_set_desc, +}; + +static struct msi_domain_info vmd_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX, + .ops = &vmd_msi_domain_ops, + .chip = &vmd_msi_controller, +}; + +#ifdef CONFIG_X86_DEV_DMA_OPS +/* + * VMD replaces the requester ID with its own. DMA mappings for devices in a + * VMD domain need to be mapped for the VMD, not the device requiring + * the mapping. + */ +static struct device *to_vmd_dev(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct vmd_dev *vmd = vmd_from_bus(pdev->bus); + + return &vmd->dev->dev; +} + +static struct dma_map_ops *vmd_dma_ops(struct device *dev) +{ + return get_dma_ops(to_vmd_dev(dev)); +} + +static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, + gfp_t flag, unsigned long attrs) +{ + return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, + attrs); +} + +static void vmd_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t addr, unsigned long attrs) +{ + return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, + attrs); +} + +static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t addr, size_t size, + unsigned long attrs) +{ + return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, + size, attrs); +} + +static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t addr, size_t size, + unsigned long attrs) +{ + return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, + addr, size, attrs); +} + +static dma_addr_t vmd_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, + dir, attrs); +} + +static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); +} + +static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); +} + +static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); +} + +static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); +} + +static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size, + dir); +} + +static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); +} + +static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); +} + +static int vmd_mapping_error(struct device *dev, dma_addr_t addr) +{ + return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr); +} + +static int vmd_dma_supported(struct device *dev, u64 mask) +{ + return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); +} + +#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK +static u64 vmd_get_required_mask(struct device *dev) +{ + return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); +} +#endif + +static void vmd_teardown_dma_ops(struct vmd_dev *vmd) +{ + struct dma_domain *domain = &vmd->dma_domain; + + if (get_dma_ops(&vmd->dev->dev)) + del_dma_domain(domain); +} + +#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ + do { \ + if (source->fn) \ + dest->fn = vmd_##fn; \ + } while (0) + +static void vmd_setup_dma_ops(struct vmd_dev *vmd) +{ + const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev); + struct dma_map_ops *dest = &vmd->dma_ops; + struct dma_domain *domain = &vmd->dma_domain; + + domain->domain_nr = vmd->sysdata.domain; + domain->dma_ops = dest; + + if (!source) + return; + ASSIGN_VMD_DMA_OPS(source, dest, alloc); + ASSIGN_VMD_DMA_OPS(source, dest, free); + ASSIGN_VMD_DMA_OPS(source, dest, mmap); + ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable); + ASSIGN_VMD_DMA_OPS(source, dest, map_page); + ASSIGN_VMD_DMA_OPS(source, dest, unmap_page); + ASSIGN_VMD_DMA_OPS(source, dest, map_sg); + ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg); + ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu); + ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); + ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); + ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); + ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); + ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); +#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK + ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); +#endif + add_dma_domain(domain); +} +#undef ASSIGN_VMD_DMA_OPS +#else +static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {} +static void vmd_setup_dma_ops(struct vmd_dev *vmd) {} +#endif + +static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, + unsigned int devfn, int reg, int len) +{ + char __iomem *addr = vmd->cfgbar + + (bus->number << 20) + (devfn << 12) + reg; + + if ((addr - vmd->cfgbar) + len >= + resource_size(&vmd->dev->resource[VMD_CFGBAR])) + return NULL; + + return addr; +} + +/* + * CPU may deadlock if config space is not serialized on some versions of this + * hardware, so all config space access is done under a spinlock. + */ +static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, + int len, u32 *value) +{ + struct vmd_dev *vmd = vmd_from_bus(bus); + char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); + unsigned long flags; + int ret = 0; + + if (!addr) + return -EFAULT; + + spin_lock_irqsave(&vmd->cfg_lock, flags); + switch (len) { + case 1: + *value = readb(addr); + break; + case 2: + *value = readw(addr); + break; + case 4: + *value = readl(addr); + break; + default: + ret = -EINVAL; + break; + } + spin_unlock_irqrestore(&vmd->cfg_lock, flags); + return ret; +} + +/* + * VMD h/w converts non-posted config writes to posted memory writes. The + * read-back in this function forces the completion so it returns only after + * the config space was written, as expected. + */ +static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, + int len, u32 value) +{ + struct vmd_dev *vmd = vmd_from_bus(bus); + char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); + unsigned long flags; + int ret = 0; + + if (!addr) + return -EFAULT; + + spin_lock_irqsave(&vmd->cfg_lock, flags); + switch (len) { + case 1: + writeb(value, addr); + readb(addr); + break; + case 2: + writew(value, addr); + readw(addr); + break; + case 4: + writel(value, addr); + readl(addr); + break; + default: + ret = -EINVAL; + break; + } + spin_unlock_irqrestore(&vmd->cfg_lock, flags); + return ret; +} + +static struct pci_ops vmd_ops = { + .read = vmd_pci_read, + .write = vmd_pci_write, +}; + +static void vmd_attach_resources(struct vmd_dev *vmd) +{ + vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; + vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2]; +} + +static void vmd_detach_resources(struct vmd_dev *vmd) +{ + vmd->dev->resource[VMD_MEMBAR1].child = NULL; + vmd->dev->resource[VMD_MEMBAR2].child = NULL; +} + +/* + * VMD domains start at 0x1000 to not clash with ACPI _SEG domains. + */ +static int vmd_find_free_domain(void) +{ + int domain = 0xffff; + struct pci_bus *bus = NULL; + + while ((bus = pci_find_next_bus(bus)) != NULL) + domain = max_t(int, domain, pci_domain_nr(bus)); + return domain + 1; +} + +static int vmd_enable_domain(struct vmd_dev *vmd) +{ + struct pci_sysdata *sd = &vmd->sysdata; + struct resource *res; + u32 upper_bits; + unsigned long flags; + LIST_HEAD(resources); + + res = &vmd->dev->resource[VMD_CFGBAR]; + vmd->resources[0] = (struct resource) { + .name = "VMD CFGBAR", + .start = 0, + .end = (resource_size(res) >> 20) - 1, + .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, + }; + + /* + * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can + * put 32-bit resources in the window. + * + * There's no hardware reason why a 64-bit window *couldn't* + * contain a 32-bit resource, but pbus_size_mem() computes the + * bridge window size assuming a 64-bit window will contain no + * 32-bit resources. __pci_assign_resource() enforces that + * artificial restriction to make sure everything will fit. + * + * The only way we could use a 64-bit non-prefechable MEMBAR is + * if its address is <4GB so that we can convert it to a 32-bit + * resource. To be visible to the host OS, all VMD endpoints must + * be initially configured by platform BIOS, which includes setting + * up these resources. We can assume the device is configured + * according to the platform needs. + */ + res = &vmd->dev->resource[VMD_MEMBAR1]; + upper_bits = upper_32_bits(res->end); + flags = res->flags & ~IORESOURCE_SIZEALIGN; + if (!upper_bits) + flags &= ~IORESOURCE_MEM_64; + vmd->resources[1] = (struct resource) { + .name = "VMD MEMBAR1", + .start = res->start, + .end = res->end, + .flags = flags, + .parent = res, + }; + + res = &vmd->dev->resource[VMD_MEMBAR2]; + upper_bits = upper_32_bits(res->end); + flags = res->flags & ~IORESOURCE_SIZEALIGN; + if (!upper_bits) + flags &= ~IORESOURCE_MEM_64; + vmd->resources[2] = (struct resource) { + .name = "VMD MEMBAR2", + .start = res->start + 0x2000, + .end = res->end, + .flags = flags, + .parent = res, + }; + + sd->vmd_domain = true; + sd->domain = vmd_find_free_domain(); + if (sd->domain < 0) + return sd->domain; + + sd->node = pcibus_to_node(vmd->dev->bus); + + vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info, + x86_vector_domain); + if (!vmd->irq_domain) + return -ENODEV; + + pci_add_resource(&resources, &vmd->resources[0]); + pci_add_resource(&resources, &vmd->resources[1]); + pci_add_resource(&resources, &vmd->resources[2]); + vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd, + &resources); + if (!vmd->bus) { + pci_free_resource_list(&resources); + irq_domain_remove(vmd->irq_domain); + return -ENODEV; + } + + vmd_attach_resources(vmd); + vmd_setup_dma_ops(vmd); + dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); + pci_rescan_bus(vmd->bus); + + WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, + "domain"), "Can't create symlink to domain\n"); + return 0; +} + +static irqreturn_t vmd_irq(int irq, void *data) +{ + struct vmd_irq_list *irqs = data; + struct vmd_irq *vmdirq; + + rcu_read_lock(); + list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) + generic_handle_irq(vmdirq->virq); + rcu_read_unlock(); + + return IRQ_HANDLED; +} + +static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct vmd_dev *vmd; + int i, err; + + if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) + return -ENOMEM; + + vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); + if (!vmd) + return -ENOMEM; + + vmd->dev = dev; + err = pcim_enable_device(dev); + if (err < 0) + return err; + + vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); + if (!vmd->cfgbar) + return -ENOMEM; + + pci_set_master(dev); + if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) && + dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) + return -ENODEV; + + vmd->msix_count = pci_msix_vec_count(dev); + if (vmd->msix_count < 0) + return -ENODEV; + + vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + if (vmd->msix_count < 0) + return vmd->msix_count; + + vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), + GFP_KERNEL); + if (!vmd->irqs) + return -ENOMEM; + + for (i = 0; i < vmd->msix_count; i++) { + INIT_LIST_HEAD(&vmd->irqs[i].irq_list); + err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), + vmd_irq, 0, "vmd", &vmd->irqs[i]); + if (err) + return err; + } + + spin_lock_init(&vmd->cfg_lock); + pci_set_drvdata(dev, vmd); + err = vmd_enable_domain(vmd); + if (err) + return err; + + dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", + vmd->sysdata.domain); + return 0; +} + +static void vmd_remove(struct pci_dev *dev) +{ + struct vmd_dev *vmd = pci_get_drvdata(dev); + + vmd_detach_resources(vmd); + pci_set_drvdata(dev, NULL); + sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); + pci_stop_root_bus(vmd->bus); + pci_remove_root_bus(vmd->bus); + vmd_teardown_dma_ops(vmd); + irq_domain_remove(vmd->irq_domain); +} + +#ifdef CONFIG_PM +static int vmd_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + pci_save_state(pdev); + return 0; +} + +static int vmd_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + pci_restore_state(pdev); + return 0; +} +#endif +static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); + +static const struct pci_device_id vmd_ids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, vmd_ids); + +static struct pci_driver vmd_drv = { + .name = "vmd", + .id_table = vmd_ids, + .probe = vmd_probe, + .remove = vmd_remove, + .driver = { + .pm = &vmd_dev_pm_ops, + }, +}; +module_pci_driver(vmd_drv); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("0.6"); |