summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci10
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt25
-rw-r--r--Documentation/devicetree/bindings/pci/pci-keystone.txt63
-rw-r--r--Documentation/devicetree/bindings/pci/xgene-pci.txt57
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-pcie.txt62
-rw-r--r--Documentation/driver-model/devres.txt2
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi18
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi6
-rw-r--r--arch/arm/include/asm/io.h1
-rw-r--r--arch/arm/mach-integrator/pci_v3.c23
-rw-r--r--arch/arm64/Kconfig22
-rw-r--r--arch/arm64/boot/dts/apm-mustang.dts8
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi165
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/io.h3
-rw-r--r--arch/arm64/include/asm/pci.h37
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/kernel/Makefile1
-rw-r--r--arch/arm64/kernel/pci.c70
-rw-r--r--arch/ia64/kernel/msi_ia64.c2
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c4
-rw-r--r--arch/mips/pci/msi-octeon.c6
-rw-r--r--arch/powerpc/include/asm/machdep.h2
-rw-r--r--arch/powerpc/kernel/msi.c12
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c9
-rw-r--r--arch/powerpc/platforms/powernv/pci.c19
-rw-r--r--arch/powerpc/platforms/pseries/msi.c44
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c12
-rw-r--r--arch/powerpc/sysdev/mpic_pasemi_msi.c11
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c28
-rw-r--r--arch/powerpc/sysdev/ppc4xx_hsta_msi.c18
-rw-r--r--arch/powerpc/sysdev/ppc4xx_msi.c19
-rw-r--r--arch/x86/pci/common.c20
-rw-r--r--arch/x86/pci/mmconfig-shared.c40
-rw-r--r--arch/x86/pci/pcbios.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h1
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c14
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h1
-rw-r--r--drivers/of/address.c154
-rw-r--r--drivers/of/of_pci.c142
-rw-r--r--drivers/pci/host/Kconfig28
-rw-r--r--drivers/pci/host/Makefile3
-rw-r--r--drivers/pci/host/pci-imx6.c13
-rw-r--r--drivers/pci/host/pci-keystone-dw.c516
-rw-r--r--drivers/pci/host/pci-keystone.c415
-rw-r--r--drivers/pci/host/pci-keystone.h58
-rw-r--r--drivers/pci/host/pci-mvebu.c6
-rw-r--r--drivers/pci/host/pci-tegra.c277
-rw-r--r--drivers/pci/host/pci-xgene.c659
-rw-r--r--drivers/pci/host/pcie-designware.c268
-rw-r--r--drivers/pci/host/pcie-designware.h22
-rw-r--r--drivers/pci/host/pcie-rcar.c21
-rw-r--r--drivers/pci/host/pcie-spear13xx.c2
-rw-r--r--drivers/pci/host/pcie-xilinx.c970
-rw-r--r--drivers/pci/hotplug/Makefile2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c254
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c11
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c13
-rw-r--r--drivers/pci/hotplug/cpcihp_generic.c28
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c44
-rw-r--r--drivers/pci/hotplug/cpqphp.h2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c3
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c19
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c13
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c19
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c3
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c3
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c6
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c45
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c7
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c17
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c9
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c176
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c14
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c5
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c8
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/msi.c75
-rw-r--r--drivers/pci/pci-acpi.c276
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci-sysfs.c41
-rw-r--r--drivers/pci/pci.c57
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c11
-rw-r--r--drivers/pci/pcie/portdrv_pci.c74
-rw-r--r--drivers/pci/probe.c167
-rw-r--r--drivers/pci/quirks.c119
-rw-r--r--drivers/pci/search.c34
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/scsi/vmw_pvscsi.h1
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c4
-rw-r--r--include/asm-generic/io.h2
-rw-r--r--include/asm-generic/pgtable.h4
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/ioport.h5
-rw-r--r--include/linux/msi.h6
-rw-r--r--include/linux/of_address.h27
-rw-r--r--include/linux/of_pci.h13
-rw-r--r--include/linux/pci.h60
-rw-r--r--include/linux/pci_hotplug.h2
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/ras/ras_event.h48
-rw-r--r--include/uapi/linux/pci_regs.h3
-rw-r--r--kernel/resource.c70
-rw-r--r--virt/kvm/assigned-dev.c2
-rw-r--r--virt/kvm/iommu.c4
111 files changed, 4911 insertions, 1333 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 6615fda0abfb..ee6c04036492 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -65,6 +65,16 @@ Description:
force a rescan of all PCI buses in the system, and
re-discover previously removed devices.
+What: /sys/bus/pci/devices/.../msi_bus
+Date: September 2014
+Contact: Linux PCI developers <linux-pci@vger.kernel.org>
+Description:
+ Writing a zero value to this attribute disallows MSI and
+ MSI-X for any future drivers of the device. If the device
+ is a bridge, MSI and MSI-X will be disallowed for future
+ drivers of all child devices under the bridge. Drivers
+ must be reloaded for the new setting to take effect.
+
What: /sys/bus/pci/devices/.../msi_irqs/
Date: September, 2011
Contact: Neil Horman <nhorman@tuxdriver.com>
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index ed0d9b9fff2b..9f4faa8e8d00 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -23,3 +23,6 @@ Required properties:
Optional properties:
- reset-gpio: gpio pin number of power good signal
+- bus-range: PCI bus numbers covered (it is recommended for new devicetrees to
+ specify this property, to keep backwards compatibility a range of 0x00-0xff
+ is assumed if not present)
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
index 0823362548dc..d763e047c6ae 100644
--- a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
@@ -1,7 +1,10 @@
NVIDIA Tegra PCIe controller
Required properties:
-- compatible: "nvidia,tegra20-pcie" or "nvidia,tegra30-pcie"
+- compatible: Must be one of:
+ - "nvidia,tegra20-pcie"
+ - "nvidia,tegra30-pcie"
+ - "nvidia,tegra124-pcie"
- device_type: Must be "pci"
- reg: A list of physical base address and length for each set of controller
registers. Must contain an entry for each entry in the reg-names property.
@@ -57,6 +60,11 @@ Required properties:
- afi
- pcie_x
+Required properties on Tegra124 and later:
+- phys: Must contain an entry for each entry in phy-names.
+- phy-names: Must include the following entries:
+ - pcie
+
Power supplies for Tegra20:
- avdd-pex-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
- vdd-pex-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
@@ -84,6 +92,21 @@ Power supplies for Tegra30:
- avdd-pexb-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
- vdd-pexb-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
+Power supplies for Tegra124:
+- Required:
+ - avddio-pex-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
+ - dvddio-pex-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
+ - avdd-pex-pll-supply: Power supply for dedicated (internal) PCIe PLL. Must
+ supply 1.05 V.
+ - hvdd-pex-supply: High-voltage supply for PCIe I/O and PCIe output clocks.
+ Must supply 3.3 V.
+ - hvdd-pex-pll-e-supply: High-voltage supply for PLLE (shared with USB3).
+ Must supply 3.3 V.
+ - vddio-pex-ctl-supply: Power supply for PCIe control I/O partition. Must
+ supply 2.8-3.3 V.
+ - avdd-pll-erefe-supply: Power supply for PLLE (shared with USB3). Must
+ supply 1.05 V.
+
Root ports are defined as subnodes of the PCIe controller node.
Required properties:
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt
new file mode 100644
index 000000000000..54eae2938174
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt
@@ -0,0 +1,63 @@
+TI Keystone PCIe interface
+
+Keystone PCI host Controller is based on Designware PCI h/w version 3.65.
+It shares common functions with PCIe Designware core driver and inherit
+common properties defined in
+Documentation/devicetree/bindings/pci/designware-pci.txt
+
+Please refer to Documentation/devicetree/bindings/pci/designware-pci.txt
+for the details of Designware DT bindings. Additional properties are
+described here as well as properties that are not applicable.
+
+Required Properties:-
+
+compatibility: "ti,keystone-pcie"
+reg: index 1 is the base address and length of DW application registers.
+ index 2 is the base address and length of PCI device ID register.
+
+pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
+ interrupt-cells: should be set to 1
+ interrupt-parent: Parent interrupt controller phandle
+ interrupts: GIC interrupt lines connected to PCI MSI interrupt lines
+
+ Example:
+ pcie_msi_intc: msi-interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 31 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 32 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 33 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>;
+ };
+
+pcie_intc: Interrupt controller device node for Legacy IRQ chip
+ interrupt-cells: should be set to 1
+ interrupt-parent: Parent interrupt controller phandle
+ interrupts: GIC interrupt lines connected to PCI Legacy interrupt lines
+
+ Example:
+ pcie_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 28 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 29 IRQ_TYPE_EDGE_RISING>;
+ };
+
+Optional properties:-
+ phys: phandle to Generic Keystone SerDes phy for PCI
+ phy-names: name of the Generic Keystine SerDes phy for PCI
+ - If boot loader already does PCI link establishment, then phys and
+ phy-names shouldn't be present.
+
+Designware DT Properties not applicable for Keystone PCI
+
+1. pcie_bus clock-names not used. Instead, a phandle to phys is used.
+
diff --git a/Documentation/devicetree/bindings/pci/xgene-pci.txt b/Documentation/devicetree/bindings/pci/xgene-pci.txt
new file mode 100644
index 000000000000..1070b068c7c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/xgene-pci.txt
@@ -0,0 +1,57 @@
+* AppliedMicro X-Gene PCIe interface
+
+Required properties:
+- device_type: set to "pci"
+- compatible: should contain "apm,xgene-pcie" to identify the core.
+- reg: A list of physical base address and length for each set of controller
+ registers. Must contain an entry for each entry in the reg-names
+ property.
+- reg-names: Must include the following entries:
+ "csr": controller configuration registers.
+ "cfg": pcie configuration space registers.
+- #address-cells: set to <3>
+- #size-cells: set to <2>
+- ranges: ranges for the outbound memory, I/O regions.
+- dma-ranges: ranges for the inbound memory regions.
+- #interrupt-cells: set to <1>
+- interrupt-map-mask and interrupt-map: standard PCI properties
+ to define the mapping of the PCIe interface to interrupt
+ numbers.
+- clocks: from common clock binding: handle to pci clock.
+
+Optional properties:
+- status: Either "ok" or "disabled".
+- dma-coherent: Present if dma operations are coherent
+
+Example:
+
+SoC specific DT Entry:
+
+ pcie0: pcie@1f2b0000 {
+ status = "disabled";
+ device_type = "pci";
+ compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */
+ 0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */
+ reg-names = "csr", "cfg";
+ ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */
+ 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */
+ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
+ 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
+ dma-coherent;
+ clocks = <&pcie0clk 0>;
+ };
+
+
+Board specific DT Entry:
+ &pcie0 {
+ status = "ok";
+ };
diff --git a/Documentation/devicetree/bindings/pci/xilinx-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-pcie.txt
new file mode 100644
index 000000000000..3e2c88d97ad4
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/xilinx-pcie.txt
@@ -0,0 +1,62 @@
+* Xilinx AXI PCIe Root Port Bridge DT description
+
+Required properties:
+- #address-cells: Address representation for root ports, set to <3>
+- #size-cells: Size representation for root ports, set to <2>
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+- compatible: Should contain "xlnx,axi-pcie-host-1.00.a"
+- reg: Should contain AXI PCIe registers location and length
+- device_type: must be "pci"
+- interrupts: Should contain AXI PCIe interrupt
+- interrupt-map-mask,
+ interrupt-map: standard PCI properties to define the mapping of the
+ PCI interface to interrupt numbers.
+- ranges: ranges for the PCI memory regions (I/O space region is not
+ supported by hardware)
+ Please refer to the standard PCI bus binding document for a more
+ detailed explanation
+
+Optional properties:
+- bus-range: PCI bus numbers covered
+
+Interrupt controller child node
++++++++++++++++++++++++++++++++
+Required properties:
+- interrupt-controller: identifies the node as an interrupt controller
+- #address-cells: specifies the number of cells needed to encode an
+ address. The value must be 0.
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+
+NOTE:
+The core provides a single interrupt for both INTx/MSI messages. So,
+created a interrupt controller node to support 'interrupt-map' DT
+functionality. The driver will create an IRQ domain for this map, decode
+the four INTx interrupts in ISR and route them to this domain.
+
+
+Example:
+++++++++
+
+ pci_express: axi-pcie@50000000 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ compatible = "xlnx,axi-pcie-host-1.00.a";
+ reg = < 0x50000000 0x10000000 >;
+ device_type = "pci";
+ interrupts = < 0 52 4 >;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc 1>,
+ <0 0 0 2 &pcie_intc 2>,
+ <0 0 0 3 &pcie_intc 3>,
+ <0 0 0 4 &pcie_intc 4>;
+ ranges = < 0x02000000 0 0x60000000 0x60000000 0 0x10000000 >;
+
+ pcie_intc: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ }
+ };
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 40677443c0c5..b5ab416cd53a 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -264,8 +264,10 @@ IIO
IO region
devm_release_mem_region()
devm_release_region()
+ devm_release_resource()
devm_request_mem_region()
devm_request_region()
+ devm_request_resource()
IOMAP
devm_ioport_map()
diff --git a/MAINTAINERS b/MAINTAINERS
index 0b23084070c2..75b98b4958c8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6939,6 +6939,14 @@ F: include/linux/pci*
F: arch/x86/pci/
F: arch/x86/kernel/quirks.c
+PCI DRIVER FOR APPLIEDMICRO XGENE
+M: Tanmay Inamdar <tinamdar@apm.com>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/xgene-pci.txt
+F: drivers/pci/host/pci-xgene.c
+
PCI DRIVER FOR IMX6
M: Richard Zhu <r65037@freescale.com>
M: Lucas Stach <l.stach@pengutronix.de>
@@ -6947,6 +6955,13 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/pci/host/*imx6*
+PCI DRIVER FOR TI KEYSTONE
+M: Murali Karicheri <m-karicheri2@ti.com>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/pci/host/*keystone*
+
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
M: Jason Cooper <jason@lakedaemon.net>
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
index fa5f2bb5f106..9d342920695a 100644
--- a/arch/arm/boot/dts/spear1310.dtsi
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -85,7 +85,8 @@
pcie0: pcie@b1000000 {
compatible = "st,spear1340-pcie", "snps,dw-pcie";
- reg = <0xb1000000 0x4000>;
+ reg = <0xb1000000 0x4000>, <0x80000000 0x20000>;
+ reg-names = "dbi", "config";
interrupts = <0 68 0x4>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0x0 0 &gic 0 68 0x4>;
@@ -95,15 +96,15 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
- ranges = <0x00000800 0 0x80000000 0x80000000 0 0x00020000 /* configuration space */
- 0x81000000 0 0 0x80020000 0 0x00010000 /* downstream I/O */
+ ranges = <0x81000000 0 0 0x80020000 0 0x00010000 /* downstream I/O */
0x82000000 0 0x80030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */
status = "disabled";
};
pcie1: pcie@b1800000 {
compatible = "st,spear1340-pcie", "snps,dw-pcie";
- reg = <0xb1800000 0x4000>;
+ reg = <0xb1800000 0x4000>, <0x90000000 0x20000>;
+ reg-names = "dbi", "config";
interrupts = <0 69 0x4>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0x0 0 &gic 0 69 0x4>;
@@ -113,15 +114,15 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
- ranges = <0x00000800 0 0x90000000 0x90000000 0 0x00020000 /* configuration space */
- 0x81000000 0 0 0x90020000 0 0x00010000 /* downstream I/O */
+ ranges = <0x81000000 0 0 0x90020000 0 0x00010000 /* downstream I/O */
0x82000000 0 0x90030000 0x90030000 0 0x0ffd0000>; /* non-prefetchable memory */
status = "disabled";
};
pcie2: pcie@b4000000 {
compatible = "st,spear1340-pcie", "snps,dw-pcie";
- reg = <0xb4000000 0x4000>;
+ reg = <0xb4000000 0x4000>, <0xc0000000 0x20000>;
+ reg-names = "dbi", "config";
interrupts = <0 70 0x4>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0x0 0 &gic 0 70 0x4>;
@@ -131,8 +132,7 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
- ranges = <0x00000800 0 0xc0000000 0xc0000000 0 0x00020000 /* configuration space */
- 0x81000000 0 0 0xc0020000 0 0x00010000 /* downstream I/O */
+ ranges = <0x81000000 0 0 0xc0020000 0 0x00010000 /* downstream I/O */
0x82000000 0 0xc0030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */
status = "disabled";
};
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index e71df0f2cb52..13e1aa33daa2 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -50,7 +50,8 @@
pcie0: pcie@b1000000 {
compatible = "st,spear1340-pcie", "snps,dw-pcie";
- reg = <0xb1000000 0x4000>;
+ reg = <0xb1000000 0x4000>, <0x80000000 0x20000>;
+ reg-names = "dbi", "config";
interrupts = <0 68 0x4>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0x0 0 &gic 0 68 0x4>;
@@ -60,8 +61,7 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
- ranges = <0x00000800 0 0x80000000 0x80000000 0 0x00020000 /* configuration space */
- 0x81000000 0 0 0x80020000 0 0x00010000 /* downstream I/O */
+ ranges = <0x81000000 0 0 0x80020000 0 0x00010000 /* downstream I/O */
0x82000000 0 0x80030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */
status = "disabled";
};
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 3d23418cbddd..180567408ee8 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -178,6 +178,7 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
/* PCI fixed i/o mapping */
#define PCI_IO_VIRT_BASE 0xfee00000
+#define PCI_IOBASE ((void __iomem *)PCI_IO_VIRT_BASE)
#if defined(CONFIG_PCI)
void pci_ioremap_set_mem_type(int mem_type);
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index 05e1f73a1e8d..c186a17c2cff 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -660,6 +660,7 @@ static void __init pci_v3_preinit(void)
{
unsigned long flags;
unsigned int temp;
+ phys_addr_t io_address = pci_pio_to_address(io_mem.start);
pcibios_min_mem = 0x00100000;
@@ -701,7 +702,7 @@ static void __init pci_v3_preinit(void)
/*
* Setup window 2 - PCI IO
*/
- v3_writel(V3_LB_BASE2, v3_addr_to_lb_base2(io_mem.start) |
+ v3_writel(V3_LB_BASE2, v3_addr_to_lb_base2(io_address) |
V3_LB_BASE_ENABLE);
v3_writew(V3_LB_MAP2, v3_addr_to_lb_map2(0));
@@ -742,6 +743,7 @@ static void __init pci_v3_preinit(void)
static void __init pci_v3_postinit(void)
{
unsigned int pci_cmd;
+ phys_addr_t io_address = pci_pio_to_address(io_mem.start);
pci_cmd = PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
@@ -758,7 +760,7 @@ static void __init pci_v3_postinit(void)
"interrupt: %d\n", ret);
#endif
- register_isa_ports(non_mem.start, io_mem.start, 0);
+ register_isa_ports(non_mem.start, io_address, 0);
}
/*
@@ -867,33 +869,32 @@ static int __init pci_v3_probe(struct platform_device *pdev)
for_each_of_pci_range(&parser, &range) {
if (!range.flags) {
- of_pci_range_to_resource(&range, np, &conf_mem);
+ ret = of_pci_range_to_resource(&range, np, &conf_mem);
conf_mem.name = "PCIv3 config";
}
if (range.flags & IORESOURCE_IO) {
- of_pci_range_to_resource(&range, np, &io_mem);
+ ret = of_pci_range_to_resource(&range, np, &io_mem);
io_mem.name = "PCIv3 I/O";
}
if ((range.flags & IORESOURCE_MEM) &&
!(range.flags & IORESOURCE_PREFETCH)) {
non_mem_pci = range.pci_addr;
non_mem_pci_sz = range.size;
- of_pci_range_to_resource(&range, np, &non_mem);
+ ret = of_pci_range_to_resource(&range, np, &non_mem);
non_mem.name = "PCIv3 non-prefetched mem";
}
if ((range.flags & IORESOURCE_MEM) &&
(range.flags & IORESOURCE_PREFETCH)) {
pre_mem_pci = range.pci_addr;
pre_mem_pci_sz = range.size;
- of_pci_range_to_resource(&range, np, &pre_mem);
+ ret = of_pci_range_to_resource(&range, np, &pre_mem);
pre_mem.name = "PCIv3 prefetched mem";
}
- }
- if (!conf_mem.start || !io_mem.start ||
- !non_mem.start || !pre_mem.start) {
- dev_err(&pdev->dev, "missing ranges in device node\n");
- return -EINVAL;
+ if (ret < 0) {
+ dev_err(&pdev->dev, "missing ranges in device node\n");
+ return ret;
+ }
}
pci_v3.map_irq = of_irq_parse_and_map_pci;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9746dc24a117..3f0e854d0ff4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -83,7 +83,7 @@ config MMU
def_bool y
config NO_IOPORT_MAP
- def_bool y
+ def_bool y if !PCI
config STACKTRACE_SUPPORT
def_bool y
@@ -163,6 +163,26 @@ menu "Bus support"
config ARM_AMBA
bool
+config PCI
+ bool "PCI support"
+ help
+ This feature enables support for PCI bus system. If you say Y
+ here, the kernel will include drivers and infrastructure code
+ to support PCI bus devices.
+
+config PCI_DOMAINS
+ def_bool PCI
+
+config PCI_DOMAINS_GENERIC
+ def_bool PCI
+
+config PCI_SYSCALL
+ def_bool PCI
+
+source "drivers/pci/Kconfig"
+source "drivers/pci/pcie/Kconfig"
+source "drivers/pci/hotplug/Kconfig"
+
endmenu
menu "Kernel Features"
diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts
index b2f56229aa5e..f64900052f4e 100644
--- a/arch/arm64/boot/dts/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm-mustang.dts
@@ -25,6 +25,14 @@
};
};
+&pcie0clk {
+ status = "ok";
+};
+
+&pcie0 {
+ status = "ok";
+};
+
&serial0 {
status = "ok";
};
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index f391972ad135..4f6d04d52cca 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -282,6 +282,171 @@
enable-mask = <0x10>;
clock-output-names = "rngpkaclk";
};
+
+ pcie0clk: pcie0clk@1f2bc000 {
+ status = "disabled";
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f2bc000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "pcie0clk";
+ };
+
+ pcie1clk: pcie1clk@1f2cc000 {
+ status = "disabled";
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f2cc000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "pcie1clk";
+ };
+
+ pcie2clk: pcie2clk@1f2dc000 {
+ status = "disabled";
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f2dc000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "pcie2clk";
+ };
+
+ pcie3clk: pcie3clk@1f50c000 {
+ status = "disabled";
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f50c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "pcie3clk";
+ };
+
+ pcie4clk: pcie4clk@1f51c000 {
+ status = "disabled";
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f51c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "pcie4clk";
+ };
+ };
+
+ pcie0: pcie@1f2b0000 {
+ status = "disabled";
+ device_type = "pci";
+ compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */
+ 0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */
+ reg-names = "csr", "cfg";
+ ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */
+ 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */
+ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
+ 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
+ dma-coherent;
+ clocks = <&pcie0clk 0>;
+ };
+
+ pcie1: pcie@1f2c0000 {
+ status = "disabled";
+ device_type = "pci";
+ compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = < 0x00 0x1f2c0000 0x0 0x00010000 /* Controller registers */
+ 0xd0 0xd0000000 0x0 0x00040000>; /* PCI config space */
+ reg-names = "csr", "cfg";
+ ranges = <0x01000000 0x0 0x00000000 0xd0 0x10000000 0x00 0x00010000 /* io */
+ 0x02000000 0x0 0x80000000 0xd1 0x80000000 0x00 0x80000000>; /* mem */
+ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
+ 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc8 0x1
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xc9 0x1
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xca 0x1
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x1>;
+ dma-coherent;
+ clocks = <&pcie1clk 0>;
+ };
+
+ pcie2: pcie@1f2d0000 {
+ status = "disabled";
+ device_type = "pci";
+ compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = < 0x00 0x1f2d0000 0x0 0x00010000 /* Controller registers */
+ 0x90 0xd0000000 0x0 0x00040000>; /* PCI config space */
+ reg-names = "csr", "cfg";
+ ranges = <0x01000000 0x0 0x00000000 0x90 0x10000000 0x0 0x00010000 /* io */
+ 0x02000000 0x0 0x80000000 0x91 0x80000000 0x0 0x80000000>; /* mem */
+ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
+ 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xce 0x1
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xcf 0x1
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xd0 0x1
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x1>;
+ dma-coherent;
+ clocks = <&pcie2clk 0>;
+ };
+
+ pcie3: pcie@1f500000 {
+ status = "disabled";
+ device_type = "pci";
+ compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = < 0x00 0x1f500000 0x0 0x00010000 /* Controller registers */
+ 0xa0 0xd0000000 0x0 0x00040000>; /* PCI config space */
+ reg-names = "csr", "cfg";
+ ranges = <0x01000000 0x0 0x00000000 0xa0 0x10000000 0x0 0x00010000 /* io */
+ 0x02000000 0x0 0x80000000 0xa1 0x80000000 0x0 0x80000000>; /* mem */
+ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
+ 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xd4 0x1
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xd5 0x1
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xd6 0x1
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x1>;
+ dma-coherent;
+ clocks = <&pcie3clk 0>;
+ };
+
+ pcie4: pcie@1f510000 {
+ status = "disabled";
+ device_type = "pci";
+ compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = < 0x00 0x1f510000 0x0 0x00010000 /* Controller registers */
+ 0xc0 0xd0000000 0x0 0x00200000>; /* PCI config space */
+ reg-names = "csr", "cfg";
+ ranges = <0x01000000 0x0 0x00000000 0xc0 0x10000000 0x0 0x00010000 /* io */
+ 0x02000000 0x0 0x80000000 0xc1 0x80000000 0x0 0x80000000>; /* mem */
+ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
+ 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xda 0x1
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xdb 0x1
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xdc 0x1
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x1>;
+ dma-coherent;
+ clocks = <&pcie4clk 0>;
};
serial0: serial@1c020000 {
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index c1968475cc4e..774a7c85e70f 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -29,6 +29,7 @@ generic-y += mman.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += pci.h
+generic-y += pci-bridge.h
generic-y += poll.h
generic-y += preempt.h
generic-y += resource.h
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index f771e8bcad4a..79f1d519221f 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -121,7 +121,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
/*
* I/O port access primitives.
*/
-#define IO_SPACE_LIMIT 0xffff
+#define arch_has_dev_port() (1)
+#define IO_SPACE_LIMIT (SZ_32M - 1)
#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M))
static inline u8 inb(unsigned long addr)
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
new file mode 100644
index 000000000000..872ba939fcb2
--- /dev/null
+++ b/arch/arm64/include/asm/pci.h
@@ -0,0 +1,37 @@
+#ifndef __ASM_PCI_H
+#define __ASM_PCI_H
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm-generic/pci-bridge.h>
+#include <asm-generic/pci-dma-compat.h>
+
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0
+
+/*
+ * Set to 1 if the kernel should re-assign all PCI bus numbers
+ */
+#define pcibios_assign_all_busses() \
+ (pci_has_flag(PCI_REASSIGN_ALL_BUS))
+
+/*
+ * PCI address space differs from physical memory address space
+ */
+#define PCI_DMA_BUS_IS_PHYS (0)
+
+extern int isa_dma_bridge_buggy;
+
+#ifdef CONFIG_PCI
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return 1;
+}
+#endif /* CONFIG_PCI */
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_PCI_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index d58e40cde88e..77dbe1e6398d 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -301,6 +301,8 @@ static inline int has_transparent_hugepage(void)
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
#define pgprot_writecombine(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
+#define pgprot_device(prot) \
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 6e9538c2d28a..5bd029b43644 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -30,6 +30,7 @@ arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
+arm64-obj-$(CONFIG_PCI) += pci.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
new file mode 100644
index 000000000000..ce5836c14ec1
--- /dev/null
+++ b/arch/arm64/kernel/pci.c
@@ -0,0 +1,70 @@
+/*
+ * Code borrowed from powerpc/kernel/pci-common.c
+ *
+ * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+#include <asm/pci-bridge.h>
+
+/*
+ * Called after each bus is probed, but before its children are examined
+ */
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+ /* nothing to do, expected to be removed in the future */
+}
+
+/*
+ * We don't have to worry about legacy ISA devices, so nothing to do here
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ return res->start;
+}
+
+/*
+ * Try to assign the IRQ number from DT when adding a new device
+ */
+int pcibios_add_device(struct pci_dev *dev)
+{
+ dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+static bool dt_domain_found = false;
+
+void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
+{
+ int domain = of_get_pci_domain_nr(parent->of_node);
+
+ if (domain >= 0) {
+ dt_domain_found = true;
+ } else if (dt_domain_found == true) {
+ dev_err(parent, "Node %s is missing \"linux,pci-domain\" property in DT\n",
+ parent->of_node->full_name);
+ return;
+ } else {
+ domain = pci_get_new_domain_nr();
+ }
+
+ bus->domain_nr = domain;
+}
+#endif
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index c430f9198d1b..8c3730c3c63d 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -23,7 +23,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata,
if (irq_prepare_move(irq, cpu))
return -1;
- get_cached_msi_msg(irq, &msg);
+ __get_cached_msi_msg(idata->msi_desc, &msg);
addr = msg.address_lo;
addr &= MSI_ADDR_DEST_ID_MASK;
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index afc58d2799ad..446e7799928c 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -175,8 +175,8 @@ static int sn_set_msi_irq_affinity(struct irq_data *data,
* Release XIO resources for the old MSI PCI address
*/
- get_cached_msi_msg(irq, &msg);
- sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+ __get_cached_msi_msg(data->msi_desc, &msg);
+ sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
pdev = sn_pdev->pdi_linux_pcidev;
provider = SN_PCIDEV_BUSPROVIDER(pdev);
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index ab0c5d14c6f7..63bbe07a1ccd 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -73,8 +73,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
* wants. Most devices only want 1, which will give
* configured_private_bits and request_private_bits equal 0.
*/
- pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
- &control);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
/*
* If the number of private bits has been configured then use
@@ -176,8 +175,7 @@ msi_irq_allocated:
/* Update the number of IRQs the device has available to it */
control &= ~PCI_MSI_FLAGS_QSIZE;
control |= request_private_bits << 4;
- pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
- control);
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
irq_set_msi_desc(irq, desc);
write_msi_msg(irq, &msg);
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index b125ceab149c..3af721633618 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -136,8 +136,6 @@ struct machdep_calls {
int (*pci_setup_phb)(struct pci_controller *host);
#ifdef CONFIG_PCI_MSI
- int (*msi_check_device)(struct pci_dev* dev,
- int nvec, int type);
int (*setup_msi_irqs)(struct pci_dev *dev,
int nvec, int type);
void (*teardown_msi_irqs)(struct pci_dev *dev);
diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
index 8bbc12d20f5c..71bd161640cf 100644
--- a/arch/powerpc/kernel/msi.c
+++ b/arch/powerpc/kernel/msi.c
@@ -13,7 +13,7 @@
#include <asm/machdep.h>
-int arch_msi_check_device(struct pci_dev* dev, int nvec, int type)
+int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
if (!ppc_md.setup_msi_irqs || !ppc_md.teardown_msi_irqs) {
pr_debug("msi: Platform doesn't provide MSI callbacks.\n");
@@ -24,16 +24,6 @@ int arch_msi_check_device(struct pci_dev* dev, int nvec, int type)
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
- if (ppc_md.msi_check_device) {
- pr_debug("msi: Using platform check routine.\n");
- return ppc_md.msi_check_device(dev, nvec, type);
- }
-
- return 0;
-}
-
-int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
return ppc_md.setup_msi_irqs(dev, nvec, type);
}
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 85825b5401e5..862b32702d29 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -199,14 +199,6 @@ out_error:
return msic;
}
-static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type)
-{
- if (!find_msi_translator(dev))
- return -ENODEV;
-
- return 0;
-}
-
static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
{
struct device_node *dn;
@@ -416,7 +408,6 @@ static int axon_msi_probe(struct platform_device *device)
ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
- ppc_md.msi_check_device = axon_msi_check_device;
axon_msi_debug_setup(dn, msic);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b854b57ed5e1..b45c49249a5d 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -46,29 +46,21 @@
//#define cfg_dbg(fmt...) printk(fmt)
#ifdef CONFIG_PCI_MSI
-static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
-{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct pci_dn *pdn = pci_get_pdn(pdev);
-
- if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
- return -ENODEV;
-
- return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
-}
-
static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
+ struct pci_dn *pdn = pci_get_pdn(pdev);
struct msi_desc *entry;
struct msi_msg msg;
int hwirq;
unsigned int virq;
int rc;
- if (WARN_ON(!phb))
+ if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
+ return -ENODEV;
+
+ if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
return -ENODEV;
list_for_each_entry(entry, &pdev->msi_list, list) {
@@ -860,7 +852,6 @@ void __init pnv_pci_init(void)
/* Configure MSIs */
#ifdef CONFIG_PCI_MSI
- ppc_md.msi_check_device = pnv_msi_check_device;
ppc_md.setup_msi_irqs = pnv_setup_msi_irqs;
ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs;
#endif
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 18ff4626d74e..8ab5add4ac82 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -336,26 +336,6 @@ out:
return request;
}
-static int rtas_msi_check_device(struct pci_dev *pdev, int nvec, int type)
-{
- int quota, rc;
-
- if (type == PCI_CAP_ID_MSIX)
- rc = check_req_msix(pdev, nvec);
- else
- rc = check_req_msi(pdev, nvec);
-
- if (rc)
- return rc;
-
- quota = msi_quota_for_device(pdev, nvec);
-
- if (quota && quota < nvec)
- return quota;
-
- return 0;
-}
-
static int check_msix_entries(struct pci_dev *pdev)
{
struct msi_desc *entry;
@@ -397,15 +377,24 @@ static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
{
struct pci_dn *pdn;
- int hwirq, virq, i, rc;
+ int hwirq, virq, i, quota, rc;
struct msi_desc *entry;
struct msi_msg msg;
int nvec = nvec_in;
int use_32bit_msi_hack = 0;
- pdn = pci_get_pdn(pdev);
- if (!pdn)
- return -ENODEV;
+ if (type == PCI_CAP_ID_MSIX)
+ rc = check_req_msix(pdev, nvec);
+ else
+ rc = check_req_msi(pdev, nvec);
+
+ if (rc)
+ return rc;
+
+ quota = msi_quota_for_device(pdev, nvec);
+
+ if (quota && quota < nvec)
+ return quota;
if (type == PCI_CAP_ID_MSIX && check_msix_entries(pdev))
return -EINVAL;
@@ -416,12 +405,14 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
*/
if (type == PCI_CAP_ID_MSIX) {
int m = roundup_pow_of_two(nvec);
- int quota = msi_quota_for_device(pdev, m);
+ quota = msi_quota_for_device(pdev, m);
if (quota >= m)
nvec = m;
}
+ pdn = pci_get_pdn(pdev);
+
/*
* Try the new more explicit firmware interface, if that fails fall
* back to the old interface. The old interface is known to never
@@ -485,7 +476,7 @@ again:
irq_set_msi_desc(virq, entry);
/* Read config space back so we can restore after reset */
- read_msi_msg(virq, &msg);
+ __read_msi_msg(entry, &msg);
entry->msg = msg;
}
@@ -526,7 +517,6 @@ static int rtas_msi_init(void)
WARN_ON(ppc_md.setup_msi_irqs);
ppc_md.setup_msi_irqs = rtas_setup_msi_irqs;
ppc_md.teardown_msi_irqs = rtas_teardown_msi_irqs;
- ppc_md.msi_check_device = rtas_msi_check_device;
WARN_ON(ppc_md.pci_irq_fixup);
ppc_md.pci_irq_fixup = rtas_msi_pci_irq_fixup;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 77efbaec7b9c..b32e79dbef4f 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -109,14 +109,6 @@ static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
return 0;
}
-static int fsl_msi_check_device(struct pci_dev *pdev, int nvec, int type)
-{
- if (type == PCI_CAP_ID_MSIX)
- pr_debug("fslmsi: MSI-X untested, trying anyway.\n");
-
- return 0;
-}
-
static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
@@ -173,6 +165,9 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
struct msi_msg msg;
struct fsl_msi *msi_data;
+ if (type == PCI_CAP_ID_MSIX)
+ pr_debug("fslmsi: MSI-X untested, trying anyway.\n");
+
/*
* If the PCI node has an fsl,msi property, then we need to use it
* to find the specific MSI.
@@ -527,7 +522,6 @@ static int fsl_of_msi_probe(struct platform_device *dev)
if (!ppc_md.setup_msi_irqs) {
ppc_md.setup_msi_irqs = fsl_setup_msi_irqs;
ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs;
- ppc_md.msi_check_device = fsl_msi_check_device;
} else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) {
dev_err(&dev->dev, "Different MSI driver already installed!\n");
err = -ENODEV;
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 38e62382070c..15dccd35fa11 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -63,14 +63,6 @@ static struct irq_chip mpic_pasemi_msi_chip = {
.name = "PASEMI-MSI",
};
-static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
-{
- if (type == PCI_CAP_ID_MSIX)
- pr_debug("pasemi_msi: MSI-X untested, trying anyway\n");
-
- return 0;
-}
-
static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
@@ -97,6 +89,8 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
struct msi_msg msg;
int hwirq;
+ if (type == PCI_CAP_ID_MSIX)
+ pr_debug("pasemi_msi: MSI-X untested, trying anyway\n");
pr_debug("pasemi_msi_setup_msi_irqs, pdev %p nvec %d type %d\n",
pdev, nvec, type);
@@ -169,7 +163,6 @@ int mpic_pasemi_msi_init(struct mpic *mpic)
WARN_ON(ppc_md.setup_msi_irqs);
ppc_md.setup_msi_irqs = pasemi_msi_setup_msi_irqs;
ppc_md.teardown_msi_irqs = pasemi_msi_teardown_msi_irqs;
- ppc_md.msi_check_device = pasemi_msi_check_device;
return 0;
}
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 9a7aa0ed9c1c..623d7fba15b4 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -105,22 +105,6 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
return 0;
}
-static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
-{
- if (type == PCI_CAP_ID_MSIX)
- pr_debug("u3msi: MSI-X untested, trying anyway.\n");
-
- /* If we can't find a magic address then MSI ain't gonna work */
- if (find_ht_magic_addr(pdev, 0) == 0 &&
- find_u4_magic_addr(pdev, 0) == 0) {
- pr_debug("u3msi: no magic address found for %s\n",
- pci_name(pdev));
- return -ENXIO;
- }
-
- return 0;
-}
-
static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
@@ -146,6 +130,17 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
u64 addr;
int hwirq;
+ if (type == PCI_CAP_ID_MSIX)
+ pr_debug("u3msi: MSI-X untested, trying anyway.\n");
+
+ /* If we can't find a magic address then MSI ain't gonna work */
+ if (find_ht_magic_addr(pdev, 0) == 0 &&
+ find_u4_magic_addr(pdev, 0) == 0) {
+ pr_debug("u3msi: no magic address found for %s\n",
+ pci_name(pdev));
+ return -ENXIO;
+ }
+
list_for_each_entry(entry, &pdev->msi_list, list) {
hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1);
if (hwirq < 0) {
@@ -202,7 +197,6 @@ int mpic_u3msi_init(struct mpic *mpic)
WARN_ON(ppc_md.setup_msi_irqs);
ppc_md.setup_msi_irqs = u3msi_setup_msi_irqs;
ppc_md.teardown_msi_irqs = u3msi_teardown_msi_irqs;
- ppc_md.msi_check_device = u3msi_msi_check_device;
return 0;
}
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index 11c888416f0a..a6a4dbda9078 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -44,6 +44,12 @@ static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
int irq, hwirq;
u64 addr;
+ /* We don't support MSI-X */
+ if (type == PCI_CAP_ID_MSIX) {
+ pr_debug("%s: MSI-X not supported.\n", __func__);
+ return -EINVAL;
+ }
+
list_for_each_entry(entry, &dev->msi_list, list) {
irq = msi_bitmap_alloc_hwirqs(&ppc4xx_hsta_msi.bmp, 1);
if (irq < 0) {
@@ -117,17 +123,6 @@ static void hsta_teardown_msi_irqs(struct pci_dev *dev)
}
}
-static int hsta_msi_check_device(struct pci_dev *pdev, int nvec, int type)
-{
- /* We don't support MSI-X */
- if (type == PCI_CAP_ID_MSIX) {
- pr_debug("%s: MSI-X not supported.\n", __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int hsta_msi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -178,7 +173,6 @@ static int hsta_msi_probe(struct platform_device *pdev)
ppc_md.setup_msi_irqs = hsta_setup_msi_irqs;
ppc_md.teardown_msi_irqs = hsta_teardown_msi_irqs;
- ppc_md.msi_check_device = hsta_msi_check_device;
return 0;
out2:
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 43948da837a7..22b5200636e7 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -85,8 +85,12 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
struct msi_desc *entry;
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
- msi_data->msi_virqs = kmalloc((msi_irqs) * sizeof(int),
- GFP_KERNEL);
+ dev_dbg(&dev->dev, "PCIE-MSI:%s called. vec %x type %d\n",
+ __func__, nvec, type);
+ if (type == PCI_CAP_ID_MSIX)
+ pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n");
+
+ msi_data->msi_virqs = kmalloc((msi_irqs) * sizeof(int), GFP_KERNEL);
if (!msi_data->msi_virqs)
return -ENOMEM;
@@ -134,16 +138,6 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
}
}
-static int ppc4xx_msi_check_device(struct pci_dev *pdev, int nvec, int type)
-{
- dev_dbg(&pdev->dev, "PCIE-MSI:%s called. vec %x type %d\n",
- __func__, nvec, type);
- if (type == PCI_CAP_ID_MSIX)
- pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n");
-
- return 0;
-}
-
static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
struct resource res, struct ppc4xx_msi *msi)
{
@@ -259,7 +253,6 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs;
ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
- ppc_md.msi_check_device = ppc4xx_msi_check_device;
return err;
error_out:
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 059a76c29739..7b20bccf3648 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -81,14 +81,14 @@ struct pci_ops pci_root_ops = {
*/
DEFINE_RAW_SPINLOCK(pci_config_lock);
-static int can_skip_ioresource_align(const struct dmi_system_id *d)
+static int __init can_skip_ioresource_align(const struct dmi_system_id *d)
{
pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident);
return 0;
}
-static const struct dmi_system_id can_skip_pciprobe_dmi_table[] = {
+static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __initconst = {
/*
* Systems where PCI IO resource ISA alignment can be skipped
* when the ISA enable bit in the bridge control is not set
@@ -186,7 +186,7 @@ void pcibios_remove_bus(struct pci_bus *bus)
* on the kernel command line (which was parsed earlier).
*/
-static int set_bf_sort(const struct dmi_system_id *d)
+static int __init set_bf_sort(const struct dmi_system_id *d)
{
if (pci_bf_sort == pci_bf_sort_default) {
pci_bf_sort = pci_dmi_bf;
@@ -195,8 +195,8 @@ static int set_bf_sort(const struct dmi_system_id *d)
return 0;
}
-static void read_dmi_type_b1(const struct dmi_header *dm,
- void *private_data)
+static void __init read_dmi_type_b1(const struct dmi_header *dm,
+ void *private_data)
{
u8 *d = (u8 *)dm + 4;
@@ -217,7 +217,7 @@ static void read_dmi_type_b1(const struct dmi_header *dm,
}
}
-static int find_sort_method(const struct dmi_system_id *d)
+static int __init find_sort_method(const struct dmi_system_id *d)
{
dmi_walk(read_dmi_type_b1, NULL);
@@ -232,7 +232,7 @@ static int find_sort_method(const struct dmi_system_id *d)
* Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
*/
#ifdef __i386__
-static int assign_all_busses(const struct dmi_system_id *d)
+static int __init assign_all_busses(const struct dmi_system_id *d)
{
pci_probe |= PCI_ASSIGN_ALL_BUSSES;
printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
@@ -241,7 +241,7 @@ static int assign_all_busses(const struct dmi_system_id *d)
}
#endif
-static int set_scan_all(const struct dmi_system_id *d)
+static int __init set_scan_all(const struct dmi_system_id *d)
{
printk(KERN_INFO "PCI: %s detected, enabling pci=pcie_scan_all\n",
d->ident);
@@ -249,7 +249,7 @@ static int set_scan_all(const struct dmi_system_id *d)
return 0;
}
-static const struct dmi_system_id pciprobe_dmi_table[] = {
+static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
#ifdef __i386__
/*
* Laptops which need pci=assign-busses to see Cardbus cards
@@ -512,7 +512,7 @@ int __init pcibios_init(void)
return 0;
}
-char * __init pcibios_setup(char *str)
+char *__init pcibios_setup(char *str)
{
if (!strcmp(str, "off")) {
pci_probe = 0;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 248642f4bab7..326198a4434e 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -31,7 +31,7 @@ static DEFINE_MUTEX(pci_mmcfg_lock);
LIST_HEAD(pci_mmcfg_list);
-static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
+static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
{
if (cfg->res.parent)
release_resource(&cfg->res);
@@ -39,7 +39,7 @@ static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
kfree(cfg);
}
-static __init void free_all_mmcfg(void)
+static void __init free_all_mmcfg(void)
{
struct pci_mmcfg_region *cfg, *tmp;
@@ -93,7 +93,7 @@ static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start,
return new;
}
-static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
+static struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start,
int end, u64 addr)
{
struct pci_mmcfg_region *new;
@@ -125,7 +125,7 @@ struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
return NULL;
}
-static const char __init *pci_mmcfg_e7520(void)
+static const char *__init pci_mmcfg_e7520(void)
{
u32 win;
raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win);
@@ -140,7 +140,7 @@ static const char __init *pci_mmcfg_e7520(void)
return "Intel Corporation E7520 Memory Controller Hub";
}
-static const char __init *pci_mmcfg_intel_945(void)
+static const char *__init pci_mmcfg_intel_945(void)
{
u32 pciexbar, mask = 0, len = 0;
@@ -184,7 +184,7 @@ static const char __init *pci_mmcfg_intel_945(void)
return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
}
-static const char __init *pci_mmcfg_amd_fam10h(void)
+static const char *__init pci_mmcfg_amd_fam10h(void)
{
u32 low, high, address;
u64 base, msr;
@@ -235,21 +235,25 @@ static const char __init *pci_mmcfg_amd_fam10h(void)
}
static bool __initdata mcp55_checked;
-static const char __init *pci_mmcfg_nvidia_mcp55(void)
+static const char *__init pci_mmcfg_nvidia_mcp55(void)
{
int bus;
int mcp55_mmconf_found = 0;
- static const u32 extcfg_regnum = 0x90;
- static const u32 extcfg_regsize = 4;
- static const u32 extcfg_enable_mask = 1<<31;
- static const u32 extcfg_start_mask = 0xff<<16;
- static const int extcfg_start_shift = 16;
- static const u32 extcfg_size_mask = 0x3<<28;
- static const int extcfg_size_shift = 28;
- static const int extcfg_sizebus[] = {0x100, 0x80, 0x40, 0x20};
- static const u32 extcfg_base_mask[] = {0x7ff8, 0x7ffc, 0x7ffe, 0x7fff};
- static const int extcfg_base_lshift = 25;
+ static const u32 extcfg_regnum __initconst = 0x90;
+ static const u32 extcfg_regsize __initconst = 4;
+ static const u32 extcfg_enable_mask __initconst = 1 << 31;
+ static const u32 extcfg_start_mask __initconst = 0xff << 16;
+ static const int extcfg_start_shift __initconst = 16;
+ static const u32 extcfg_size_mask __initconst = 0x3 << 28;
+ static const int extcfg_size_shift __initconst = 28;
+ static const int extcfg_sizebus[] __initconst = {
+ 0x100, 0x80, 0x40, 0x20
+ };
+ static const u32 extcfg_base_mask[] __initconst = {
+ 0x7ff8, 0x7ffc, 0x7ffe, 0x7fff
+ };
+ static const int extcfg_base_lshift __initconst = 25;
/*
* do check if amd fam10h already took over
@@ -302,7 +306,7 @@ struct pci_mmcfg_hostbridge_probe {
const char *(*probe)(void);
};
-static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = {
+static const struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initconst = {
{ 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 },
{ 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index c77b24a8b2da..9b83b9051ae7 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -79,13 +79,13 @@ union bios32 {
static struct {
unsigned long address;
unsigned short segment;
-} bios32_indirect = { 0, __KERNEL_CS };
+} bios32_indirect __initdata = { 0, __KERNEL_CS };
/*
* Returns the entry point for the given service, NULL on error
*/
-static unsigned long bios32_service(unsigned long service)
+static unsigned long __init bios32_service(unsigned long service)
{
unsigned char return_code; /* %al */
unsigned long address; /* %ebx */
@@ -124,7 +124,7 @@ static struct {
static int pci_bios_present;
-static int check_pcibios(void)
+static int __init check_pcibios(void)
{
u32 signature, eax, ebx, ecx;
u8 status, major_ver, minor_ver, hw_mech;
@@ -312,7 +312,7 @@ static const struct pci_raw_ops pci_bios_access = {
* Try to find PCI BIOS.
*/
-static const struct pci_raw_ops *pci_find_bios(void)
+static const struct pci_raw_ops *__init pci_find_bios(void)
{
union bios32 *check;
unsigned char sum;
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 11323dd5196f..e4259c2c1acc 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -35,7 +35,6 @@
/*
* PCI device IDs.
*/
-#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
/*
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index fa75a29a0408..3e238cd049e6 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -136,6 +136,10 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
struct msi_msg msg;
int virq, hwirq;
+ /* We support MSI, but not MSI-X */
+ if (desc->msi_attrib.is_msix)
+ return -EINVAL;
+
hwirq = armada_370_xp_alloc_msi();
if (hwirq < 0)
return hwirq;
@@ -166,15 +170,6 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
armada_370_xp_free_msi(hwirq);
}
-static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
- int nvec, int type)
-{
- /* We support MSI, but not MSI-X */
- if (type == PCI_CAP_ID_MSI)
- return 0;
- return -EINVAL;
-}
-
static struct irq_chip armada_370_xp_msi_irq_chip = {
.name = "armada_370_xp_msi_irq",
.irq_enable = unmask_msi_irq,
@@ -213,7 +208,6 @@ static int armada_370_xp_msi_init(struct device_node *node,
msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
- msi_chip->check_device = armada_370_xp_check_msi_device;
msi_chip->of_node = node;
armada_370_xp_msi_domain =
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 248399a881af..189b32519748 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -35,7 +35,6 @@
#include "vmci_driver.h"
#include "vmci_event.h"
-#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
#define VMCI_UTIL_NUM_RESOURCES 1
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3759479f959a..5f0199f6c31e 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -117,7 +117,6 @@ enum {
/*
* PCI vendor and device IDs.
*/
-#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
#define MAX_ETHERNET_CARDS 10
#define MAX_PCI_PASSTHRU_DEVICE 6
diff --git a/drivers/of/address.c b/drivers/of/address.c
index e3718250d66e..afdb78299f61 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -5,6 +5,8 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/pci_regs.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
#include <linux/string.h>
/* Max address size we deal with */
@@ -293,6 +295,51 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
}
EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
+/*
+ * of_pci_range_to_resource - Create a resource from an of_pci_range
+ * @range: the PCI range that describes the resource
+ * @np: device node where the range belongs to
+ * @res: pointer to a valid resource that will be updated to
+ * reflect the values contained in the range.
+ *
+ * Returns EINVAL if the range cannot be converted to resource.
+ *
+ * Note that if the range is an IO range, the resource will be converted
+ * using pci_address_to_pio() which can fail if it is called too early or
+ * if the range cannot be matched to any host bridge IO space (our case here).
+ * To guard against that we try to register the IO range first.
+ * If that fails we know that pci_address_to_pio() will do too.
+ */
+int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np, struct resource *res)
+{
+ int err;
+ res->flags = range->flags;
+ res->parent = res->child = res->sibling = NULL;
+ res->name = np->full_name;
+
+ if (res->flags & IORESOURCE_IO) {
+ unsigned long port;
+ err = pci_register_io_range(range->cpu_addr, range->size);
+ if (err)
+ goto invalid_range;
+ port = pci_address_to_pio(range->cpu_addr);
+ if (port == (unsigned long)-1) {
+ err = -EINVAL;
+ goto invalid_range;
+ }
+ res->start = port;
+ } else {
+ res->start = range->cpu_addr;
+ }
+ res->end = res->start + range->size - 1;
+ return 0;
+
+invalid_range:
+ res->start = (resource_size_t)OF_BAD_ADDR;
+ res->end = (resource_size_t)OF_BAD_ADDR;
+ return err;
+}
#endif /* CONFIG_PCI */
/*
@@ -601,12 +648,119 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
}
EXPORT_SYMBOL(of_get_address);
+#ifdef PCI_IOBASE
+struct io_range {
+ struct list_head list;
+ phys_addr_t start;
+ resource_size_t size;
+};
+
+static LIST_HEAD(io_range_list);
+static DEFINE_SPINLOCK(io_range_lock);
+#endif
+
+/*
+ * Record the PCI IO range (expressed as CPU physical address + size).
+ * Return a negative value if an error has occured, zero otherwise
+ */
+int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
+{
+ int err = 0;
+
+#ifdef PCI_IOBASE
+ struct io_range *range;
+ resource_size_t allocated_size = 0;
+
+ /* check if the range hasn't been previously recorded */
+ spin_lock(&io_range_lock);
+ list_for_each_entry(range, &io_range_list, list) {
+ if (addr >= range->start && addr + size <= range->start + size) {
+ /* range already registered, bail out */
+ goto end_register;
+ }
+ allocated_size += range->size;
+ }
+
+ /* range not registed yet, check for available space */
+ if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
+ /* if it's too big check if 64K space can be reserved */
+ if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
+ err = -E2BIG;
+ goto end_register;
+ }
+
+ size = SZ_64K;
+ pr_warn("Requested IO range too big, new size set to 64K\n");
+ }
+
+ /* add the range to the list */
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range) {
+ err = -ENOMEM;
+ goto end_register;
+ }
+
+ range->start = addr;
+ range->size = size;
+
+ list_add_tail(&range->list, &io_range_list);
+
+end_register:
+ spin_unlock(&io_range_lock);
+#endif
+
+ return err;
+}
+
+phys_addr_t pci_pio_to_address(unsigned long pio)
+{
+ phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
+
+#ifdef PCI_IOBASE
+ struct io_range *range;
+ resource_size_t allocated_size = 0;
+
+ if (pio > IO_SPACE_LIMIT)
+ return address;
+
+ spin_lock(&io_range_lock);
+ list_for_each_entry(range, &io_range_list, list) {
+ if (pio >= allocated_size && pio < allocated_size + range->size) {
+ address = range->start + pio - allocated_size;
+ break;
+ }
+ allocated_size += range->size;
+ }
+ spin_unlock(&io_range_lock);
+#endif
+
+ return address;
+}
+
unsigned long __weak pci_address_to_pio(phys_addr_t address)
{
+#ifdef PCI_IOBASE
+ struct io_range *res;
+ resource_size_t offset = 0;
+ unsigned long addr = -1;
+
+ spin_lock(&io_range_lock);
+ list_for_each_entry(res, &io_range_list, list) {
+ if (address >= res->start && address < res->start + res->size) {
+ addr = res->start - address + offset;
+ break;
+ }
+ offset += res->size;
+ }
+ spin_unlock(&io_range_lock);
+
+ return addr;
+#else
if (address > IO_SPACE_LIMIT)
return (unsigned long)-1;
return (unsigned long) address;
+#endif
}
static int __of_address_to_resource(struct device_node *dev,
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 848199633798..8882b467be95 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -1,7 +1,9 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/slab.h>
static inline int __of_pci_pci_compare(struct device_node *node,
unsigned int data)
@@ -89,6 +91,146 @@ int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
}
EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
+/**
+ * This function will try to obtain the host bridge domain number by
+ * finding a property called "linux,pci-domain" of the given device node.
+ *
+ * @node: device tree node with the domain information
+ *
+ * Returns the associated domain number from DT in the range [0-0xffff], or
+ * a negative value if the required property is not found.
+ */
+int of_get_pci_domain_nr(struct device_node *node)
+{
+ const __be32 *value;
+ int len;
+ u16 domain;
+
+ value = of_get_property(node, "linux,pci-domain", &len);
+ if (!value || len < sizeof(*value))
+ return -EINVAL;
+
+ domain = (u16)be32_to_cpup(value);
+
+ return domain;
+}
+EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
+
+#if defined(CONFIG_OF_ADDRESS)
+/**
+ * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
+ * @dev: device node of the host bridge having the range property
+ * @busno: bus number associated with the bridge root bus
+ * @bus_max: maximum number of buses for this bridge
+ * @resources: list where the range of resources will be added after DT parsing
+ * @io_base: pointer to a variable that will contain on return the physical
+ * address for the start of the I/O range. Can be NULL if the caller doesn't
+ * expect IO ranges to be present in the device tree.
+ *
+ * It is the caller's job to free the @resources list.
+ *
+ * This function will parse the "ranges" property of a PCI host bridge device
+ * node and setup the resource mapping based on its content. It is expected
+ * that the property conforms with the Power ePAPR document.
+ *
+ * It returns zero if the range parsing has been successful or a standard error
+ * value if it failed.
+ */
+int of_pci_get_host_bridge_resources(struct device_node *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base)
+{
+ struct resource *res;
+ struct resource *bus_range;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ char range_type[4];
+ int err;
+
+ if (io_base)
+ *io_base = (resource_size_t)OF_BAD_ADDR;
+
+ bus_range = kzalloc(sizeof(*bus_range), GFP_KERNEL);
+ if (!bus_range)
+ return -ENOMEM;
+
+ pr_info("PCI host bridge %s ranges:\n", dev->full_name);
+
+ err = of_pci_parse_bus_range(dev, bus_range);
+ if (err) {
+ bus_range->start = busno;
+ bus_range->end = bus_max;
+ bus_range->flags = IORESOURCE_BUS;
+ pr_info(" No bus range found for %s, using %pR\n",
+ dev->full_name, bus_range);
+ } else {
+ if (bus_range->end > bus_range->start + bus_max)
+ bus_range->end = bus_range->start + bus_max;
+ }
+ pci_add_resource(resources, bus_range);
+
+ /* Check for ranges property */
+ err = of_pci_range_parser_init(&parser, dev);
+ if (err)
+ goto parse_failed;
+
+ pr_debug("Parsing ranges property...\n");
+ for_each_of_pci_range(&parser, &range) {
+ /* Read next ranges element */
+ if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
+ snprintf(range_type, 4, " IO");
+ else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
+ snprintf(range_type, 4, "MEM");
+ else
+ snprintf(range_type, 4, "err");
+ pr_info(" %s %#010llx..%#010llx -> %#010llx\n", range_type,
+ range.cpu_addr, range.cpu_addr + range.size - 1,
+ range.pci_addr);
+
+ /*
+ * If we failed translation or got a zero-sized region
+ * then skip this range
+ */
+ if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
+ continue;
+
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+ if (!res) {
+ err = -ENOMEM;
+ goto parse_failed;
+ }
+
+ err = of_pci_range_to_resource(&range, dev, res);
+ if (err)
+ goto conversion_failed;
+
+ if (resource_type(res) == IORESOURCE_IO) {
+ if (!io_base) {
+ pr_err("I/O range found for %s. Please provide an io_base pointer to save CPU base address\n",
+ dev->full_name);
+ err = -EINVAL;
+ goto conversion_failed;
+ }
+ if (*io_base != (resource_size_t)OF_BAD_ADDR)
+ pr_warn("More than one I/O resource converted for %s. CPU base address for old range lost!\n",
+ dev->full_name);
+ *io_base = range.cpu_addr;
+ }
+
+ pci_add_resource_offset(resources, res, res->start - range.pci_addr);
+ }
+
+ return 0;
+
+conversion_failed:
+ kfree(res);
+parse_failed:
+ pci_free_resource_list(resources);
+ return err;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
+#endif /* CONFIG_OF_ADDRESS */
+
#ifdef CONFIG_PCI_MSI
static LIST_HEAD(of_pci_msi_chip_list);
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 90f5ccacce4b..3dc25fad490c 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -63,4 +63,32 @@ config PCIE_SPEAR13XX
help
Say Y here if you want PCIe support on SPEAr13XX SoCs.
+config PCI_KEYSTONE
+ bool "TI Keystone PCIe controller"
+ depends on ARCH_KEYSTONE
+ select PCIE_DW
+ select PCIEPORTBUS
+ help
+ Say Y here if you want to enable PCI controller support on Keystone
+ SoCs. The PCI controller on Keystone is based on Designware hardware
+ and therefore the driver re-uses the Designware core functions to
+ implement the driver.
+
+config PCIE_XILINX
+ bool "Xilinx AXI PCIe host bridge support"
+ depends on ARCH_ZYNQ
+ help
+ Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
+ Host Bridge driver.
+
+config PCI_XGENE
+ bool "X-Gene PCIe controller"
+ depends on ARCH_XGENE
+ depends on OF
+ select PCIEPORTBUS
+ help
+ Say Y here if you want internal PCI support on APM X-Gene SoC.
+ There are 5 internal PCIe ports available. Each port is GEN3 capable
+ and have varied lanes from x1 to x8.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index d0e88f114ff9..26b3461d68d7 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -8,3 +8,6 @@ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
+obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
+obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
+obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index 35fc73a8d0b3..233fe8a88264 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -257,11 +257,6 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
int ret;
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
-
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
if (ret) {
dev_err(pp->dev, "unable to enable pcie_phy clock\n");
@@ -283,6 +278,12 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
/* allow the clocks to stabilize */
usleep_range(200, 500);
+ /* power up core phy and enable ref clock */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+
/* Some boards don't have PCIe reset GPIO. */
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
gpio_set_value(imx6_pcie->reset_gpio, 0);
@@ -647,7 +648,7 @@ static int __init imx6_pcie_init(void)
{
return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
}
-fs_initcall(imx6_pcie_init);
+module_init(imx6_pcie_init);
MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
new file mode 100644
index 000000000000..34086ce88e8e
--- /dev/null
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -0,0 +1,516 @@
+/*
+ * Designware application register space functions for Keystone PCI controller
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ * http://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "pcie-designware.h"
+#include "pci-keystone.h"
+
+/* Application register defines */
+#define LTSSM_EN_VAL 1
+#define LTSSM_STATE_MASK 0x1f
+#define LTSSM_STATE_L0 0x11
+#define DBI_CS2_EN_VAL 0x20
+#define OB_XLAT_EN_VAL 2
+
+/* Application registers */
+#define CMD_STATUS 0x004
+#define CFG_SETUP 0x008
+#define OB_SIZE 0x030
+#define CFG_PCIM_WIN_SZ_IDX 3
+#define CFG_PCIM_WIN_CNT 32
+#define SPACE0_REMOTE_CFG_OFFSET 0x1000
+#define OB_OFFSET_INDEX(n) (0x200 + (8 * n))
+#define OB_OFFSET_HI(n) (0x204 + (8 * n))
+
+/* IRQ register defines */
+#define IRQ_EOI 0x050
+#define IRQ_STATUS 0x184
+#define IRQ_ENABLE_SET 0x188
+#define IRQ_ENABLE_CLR 0x18c
+
+#define MSI_IRQ 0x054
+#define MSI0_IRQ_STATUS 0x104
+#define MSI0_IRQ_ENABLE_SET 0x108
+#define MSI0_IRQ_ENABLE_CLR 0x10c
+#define IRQ_STATUS 0x184
+#define MSI_IRQ_OFFSET 4
+
+/* Config space registers */
+#define DEBUG0 0x728
+
+#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
+
+static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
+ u32 *bit_pos)
+{
+ *reg_offset = offset % 8;
+ *bit_pos = offset >> 3;
+}
+
+u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+
+ return ks_pcie->app.start + MSI_IRQ;
+}
+
+void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
+{
+ struct pcie_port *pp = &ks_pcie->pp;
+ u32 pending, vector;
+ int src, virq;
+
+ pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
+
+ /*
+ * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+ * shows 1, 9, 17, 25 and so forth
+ */
+ for (src = 0; src < 4; src++) {
+ if (BIT(src) & pending) {
+ vector = offset + (src << 3);
+ virq = irq_linear_revmap(pp->irq_domain, vector);
+ dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
+ src, vector, virq);
+ generic_handle_irq(virq);
+ }
+ }
+}
+
+static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
+{
+ u32 offset, reg_offset, bit_pos;
+ struct keystone_pcie *ks_pcie;
+ unsigned int irq = d->irq;
+ struct msi_desc *msi;
+ struct pcie_port *pp;
+
+ msi = irq_get_msi_desc(irq);
+ pp = sys_to_pcie(msi->dev->bus->sysdata);
+ ks_pcie = to_keystone_pcie(pp);
+ offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+ update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
+
+ writel(BIT(bit_pos),
+ ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
+ writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
+}
+
+void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+{
+ u32 reg_offset, bit_pos;
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+ writel(BIT(bit_pos),
+ ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
+}
+
+void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+{
+ u32 reg_offset, bit_pos;
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+ writel(BIT(bit_pos),
+ ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
+}
+
+static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
+{
+ struct keystone_pcie *ks_pcie;
+ unsigned int irq = d->irq;
+ struct msi_desc *msi;
+ struct pcie_port *pp;
+ u32 offset;
+
+ msi = irq_get_msi_desc(irq);
+ pp = sys_to_pcie(msi->dev->bus->sysdata);
+ ks_pcie = to_keystone_pcie(pp);
+ offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+
+ /* Mask the end point if PVM implemented */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ if (msi->msi_attrib.maskbit)
+ mask_msi_irq(d);
+ }
+
+ ks_dw_pcie_msi_clear_irq(pp, offset);
+}
+
+static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
+{
+ struct keystone_pcie *ks_pcie;
+ unsigned int irq = d->irq;
+ struct msi_desc *msi;
+ struct pcie_port *pp;
+ u32 offset;
+
+ msi = irq_get_msi_desc(irq);
+ pp = sys_to_pcie(msi->dev->bus->sysdata);
+ ks_pcie = to_keystone_pcie(pp);
+ offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+
+ /* Mask the end point if PVM implemented */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ if (msi->msi_attrib.maskbit)
+ unmask_msi_irq(d);
+ }
+
+ ks_dw_pcie_msi_set_irq(pp, offset);
+}
+
+static struct irq_chip ks_dw_pcie_msi_irq_chip = {
+ .name = "Keystone-PCIe-MSI-IRQ",
+ .irq_ack = ks_dw_pcie_msi_irq_ack,
+ .irq_mask = ks_dw_pcie_msi_irq_mask,
+ .irq_unmask = ks_dw_pcie_msi_irq_unmask,
+};
+
+static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
+ .map = ks_dw_pcie_msi_map,
+};
+
+int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+ int i;
+
+ pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
+ MAX_MSI_IRQS,
+ &ks_dw_pcie_msi_domain_ops,
+ chip);
+ if (!pp->irq_domain) {
+ dev_err(pp->dev, "irq domain init failed\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < MAX_MSI_IRQS; i++)
+ irq_create_mapping(pp->irq_domain, i);
+
+ return 0;
+}
+
+void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
+{
+ int i;
+
+ for (i = 0; i < MAX_LEGACY_IRQS; i++)
+ writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
+}
+
+void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
+{
+ struct pcie_port *pp = &ks_pcie->pp;
+ u32 pending;
+ int virq;
+
+ pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
+
+ if (BIT(0) & pending) {
+ virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
+ dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
+ virq);
+ generic_handle_irq(virq);
+ }
+
+ /* EOI the INTx interrupt */
+ writel(offset, ks_pcie->va_app_base + IRQ_EOI);
+}
+
+static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
+ .name = "Keystone-PCI-Legacy-IRQ",
+ .irq_ack = ks_dw_pcie_ack_legacy_irq,
+ .irq_mask = ks_dw_pcie_mask_legacy_irq,
+ .irq_unmask = ks_dw_pcie_unmask_legacy_irq,
+};
+
+static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hw_irq)
+{
+ irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
+ .map = ks_dw_pcie_init_legacy_irq_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
+ * registers
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
+{
+ u32 val;
+
+ writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
+ reg_virt + CMD_STATUS);
+
+ do {
+ val = readl(reg_virt + CMD_STATUS);
+ } while (!(val & DBI_CS2_EN_VAL));
+}
+
+/**
+ * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
+{
+ u32 val;
+
+ writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
+ reg_virt + CMD_STATUS);
+
+ do {
+ val = readl(reg_virt + CMD_STATUS);
+ } while (val & DBI_CS2_EN_VAL);
+}
+
+void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+{
+ struct pcie_port *pp = &ks_pcie->pp;
+ u32 start = pp->mem.start, end = pp->mem.end;
+ int i, tr_size;
+
+ /* Disable BARs for inbound access */
+ ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
+ writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
+ writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
+ ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
+
+ /* Set outbound translation size per window division */
+ writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
+
+ tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
+
+ /* Using Direct 1:1 mapping of RC <-> PCI memory space */
+ for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
+ writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
+ writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
+ start += tr_size;
+ }
+
+ /* Enable OB translation */
+ writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
+ ks_pcie->va_app_base + CMD_STATUS);
+}
+
+/**
+ * ks_pcie_cfg_setup() - Set up configuration space address for a device
+ *
+ * @ks_pcie: ptr to keystone_pcie structure
+ * @bus: Bus number the device is residing on
+ * @devfn: device, function number info
+ *
+ * Forms and returns the address of configuration space mapped in PCIESS
+ * address space 0. Also configures CFG_SETUP for remote configuration space
+ * access.
+ *
+ * The address space has two regions to access configuration - local and remote.
+ * We access local region for bus 0 (as RC is attached on bus 0) and remote
+ * region for others with TYPE 1 access when bus > 1. As for device on bus = 1,
+ * we will do TYPE 0 access as it will be on our secondary bus (logical).
+ * CFG_SETUP is needed only for remote configuration access.
+ */
+static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
+ unsigned int devfn)
+{
+ u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
+ struct pcie_port *pp = &ks_pcie->pp;
+ u32 regval;
+
+ if (bus == 0)
+ return pp->dbi_base;
+
+ regval = (bus << 16) | (device << 8) | function;
+
+ /*
+ * Since Bus#1 will be a virtual bus, we need to have TYPE0
+ * access only.
+ * TYPE 1
+ */
+ if (bus != 1)
+ regval |= BIT(24);
+
+ writel(regval, ks_pcie->va_app_base + CFG_SETUP);
+ return pp->va_cfg0_base;
+}
+
+int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 *val)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+ u8 bus_num = bus->number;
+ void __iomem *addr;
+
+ addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
+
+ return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val);
+}
+
+int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 val)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+ u8 bus_num = bus->number;
+ void __iomem *addr;
+
+ addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
+
+ return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val);
+}
+
+/**
+ * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
+ *
+ * This sets BAR0 to enable inbound access for MSI_IRQ register
+ */
+void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+
+ /* Configure and set up BAR0 */
+ ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
+
+ /* Enable BAR0 */
+ writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
+ writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
+
+ ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
+}
+
+/**
+ * ks_dw_pcie_link_up() - Check if link up
+ */
+int ks_dw_pcie_link_up(struct pcie_port *pp)
+{
+ u32 val = readl(pp->dbi_base + DEBUG0);
+
+ return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
+}
+
+void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ /* Disable Link training */
+ val = readl(ks_pcie->va_app_base + CMD_STATUS);
+ val &= ~LTSSM_EN_VAL;
+ writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
+
+ /* Initiate Link Training */
+ val = readl(ks_pcie->va_app_base + CMD_STATUS);
+ writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
+}
+
+/**
+ * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
+ *
+ * Ioremap the register resources, initialize legacy irq domain
+ * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
+ * PCI host controller.
+ */
+int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
+ struct device_node *msi_intc_np)
+{
+ struct pcie_port *pp = &ks_pcie->pp;
+ struct platform_device *pdev = to_platform_device(pp->dev);
+ struct resource *res;
+
+ /* Index 0 is the config reg. space address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pp->dbi_base = devm_ioremap_resource(pp->dev, res);
+ if (IS_ERR(pp->dbi_base))
+ return PTR_ERR(pp->dbi_base);
+
+ /*
+ * We set these same and is used in pcie rd/wr_other_conf
+ * functions
+ */
+ pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
+ pp->va_cfg1_base = pp->va_cfg0_base;
+
+ /* Index 1 is the application reg. space address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ks_pcie->app = *res;
+ ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
+ if (IS_ERR(ks_pcie->va_app_base))
+ return PTR_ERR(ks_pcie->va_app_base);
+
+ /* Create legacy IRQ domain */
+ ks_pcie->legacy_irq_domain =
+ irq_domain_add_linear(ks_pcie->legacy_intc_np,
+ MAX_LEGACY_IRQS,
+ &ks_dw_pcie_legacy_irq_domain_ops,
+ NULL);
+ if (!ks_pcie->legacy_irq_domain) {
+ dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
+ return -EINVAL;
+ }
+
+ return dw_pcie_host_init(pp);
+}
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
new file mode 100644
index 000000000000..1b893bc8b842
--- /dev/null
+++ b/drivers/pci/host/pci-keystone.c
@@ -0,0 +1,415 @@
+/*
+ * PCIe host controller driver for Texas Instruments Keystone SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ * http://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ * Implementation based on pci-exynos.c and pcie-designware.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/irqchip/chained_irq.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+
+#include "pcie-designware.h"
+#include "pci-keystone.h"
+
+#define DRIVER_NAME "keystone-pcie"
+
+/* driver specific constants */
+#define MAX_MSI_HOST_IRQS 8
+#define MAX_LEGACY_HOST_IRQS 4
+
+/* DEV_STAT_CTRL */
+#define PCIE_CAP_BASE 0x70
+
+/* PCIE controller device IDs */
+#define PCIE_RC_K2HK 0xb008
+#define PCIE_RC_K2E 0xb009
+#define PCIE_RC_K2L 0xb00a
+
+#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
+
+static void quirk_limit_mrrs(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+ struct pci_dev *bridge = bus->self;
+ static const struct pci_device_id rc_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { 0, },
+ };
+
+ if (pci_is_root_bus(bus))
+ return;
+
+ /* look for the host bridge */
+ while (!pci_is_root_bus(bus)) {
+ bridge = bus->self;
+ bus = bus->parent;
+ }
+
+ if (bridge) {
+ /*
+ * Keystone PCI controller has a h/w limitation of
+ * 256 bytes maximum read request size. It can't handle
+ * anything higher than this. So force this limit on
+ * all downstream devices.
+ */
+ if (pci_match_id(rc_pci_devids, bridge)) {
+ if (pcie_get_readrq(dev) > 256) {
+ dev_info(&dev->dev, "limiting MRRS to 256\n");
+ pcie_set_readrq(dev, 256);
+ }
+ }
+ }
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
+
+static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
+{
+ struct pcie_port *pp = &ks_pcie->pp;
+ int count = 200;
+
+ dw_pcie_setup_rc(pp);
+
+ if (dw_pcie_link_up(pp)) {
+ dev_err(pp->dev, "Link already up\n");
+ return 0;
+ }
+
+ ks_dw_pcie_initiate_link_train(ks_pcie);
+ /* check if the link is up or not */
+ while (!dw_pcie_link_up(pp)) {
+ usleep_range(100, 1000);
+ if (--count) {
+ ks_dw_pcie_initiate_link_train(ks_pcie);
+ continue;
+ }
+ dev_err(pp->dev, "phy link never came up\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+ u32 offset = irq - ks_pcie->msi_host_irqs[0];
+ struct pcie_port *pp = &ks_pcie->pp;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq);
+
+ /*
+ * The chained irq handler installation would have replaced normal
+ * interrupt driver handler so we need to take care of mask/unmask and
+ * ack operation.
+ */
+ chained_irq_enter(chip, desc);
+ ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
+ chained_irq_exit(chip, desc);
+}
+
+/**
+ * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
+ * @irq: IRQ line for legacy interrupts
+ * @desc: Pointer to irq descriptor
+ *
+ * Traverse through pending legacy interrupts and invoke handler for each. Also
+ * takes care of interrupt controller level mask/ack operation.
+ */
+static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+ struct pcie_port *pp = &ks_pcie->pp;
+ u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
+
+ /*
+ * The chained irq handler installation would have replaced normal
+ * interrupt driver handler so we need to take care of mask/unmask and
+ * ack operation.
+ */
+ chained_irq_enter(chip, desc);
+ ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+ chained_irq_exit(chip, desc);
+}
+
+static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
+ char *controller, int *num_irqs)
+{
+ int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL;
+ struct device *dev = ks_pcie->pp.dev;
+ struct device_node *np_pcie = dev->of_node, **np_temp;
+
+ if (!strcmp(controller, "msi-interrupt-controller"))
+ legacy = 0;
+
+ if (legacy) {
+ np_temp = &ks_pcie->legacy_intc_np;
+ max_host_irqs = MAX_LEGACY_HOST_IRQS;
+ host_irqs = &ks_pcie->legacy_host_irqs[0];
+ } else {
+ np_temp = &ks_pcie->msi_intc_np;
+ max_host_irqs = MAX_MSI_HOST_IRQS;
+ host_irqs = &ks_pcie->msi_host_irqs[0];
+ }
+
+ /* interrupt controller is in a child node */
+ *np_temp = of_find_node_by_name(np_pcie, controller);
+ if (!(*np_temp)) {
+ dev_err(dev, "Node for %s is absent\n", controller);
+ goto out;
+ }
+ temp = of_irq_count(*np_temp);
+ if (!temp)
+ goto out;
+ if (temp > max_host_irqs)
+ dev_warn(dev, "Too many %s interrupts defined %u\n",
+ (legacy ? "legacy" : "MSI"), temp);
+
+ /*
+ * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
+ * 7 (MSI)
+ */
+ for (temp = 0; temp < max_host_irqs; temp++) {
+ host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
+ if (host_irqs[temp] < 0)
+ break;
+ }
+ if (temp) {
+ *num_irqs = temp;
+ ret = 0;
+ }
+out:
+ return ret;
+}
+
+static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
+{
+ int i;
+
+ /* Legacy IRQ */
+ for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
+ irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
+ irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
+ ks_pcie_legacy_irq_handler);
+ }
+ ks_dw_pcie_enable_legacy_irqs(ks_pcie);
+
+ /* MSI IRQ */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
+ irq_set_chained_handler(ks_pcie->msi_host_irqs[i],
+ ks_pcie_msi_irq_handler);
+ irq_set_handler_data(ks_pcie->msi_host_irqs[i],
+ ks_pcie);
+ }
+ }
+}
+
+/*
+ * When a PCI device does not exist during config cycles, keystone host gets a
+ * bus error instead of returning 0xffffffff. This handler always returns 0
+ * for this kind of faults.
+ */
+static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ unsigned long instr = *(unsigned long *) instruction_pointer(regs);
+
+ if ((instr & 0x0e100090) == 0x00100090) {
+ int reg = (instr >> 12) & 15;
+
+ regs->uregs[reg] = -1;
+ regs->ARM_pc += 4;
+ }
+
+ return 0;
+}
+
+static void __init ks_pcie_host_init(struct pcie_port *pp)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+ u32 val;
+
+ ks_pcie_establish_link(ks_pcie);
+ ks_dw_pcie_setup_rc_app_regs(ks_pcie);
+ ks_pcie_setup_interrupts(ks_pcie);
+ writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
+ pp->dbi_base + PCI_IO_BASE);
+
+ /* update the Vendor ID */
+ writew(ks_pcie->device_id, pp->dbi_base + PCI_DEVICE_ID);
+
+ /* update the DEV_STAT_CTRL to publish right mrrs */
+ val = readl(pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ /* set the mrrs to 256 bytes */
+ val |= BIT(12);
+ writel(val, pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
+
+ /*
+ * PCIe access errors that result into OCP errors are caught by ARM as
+ * "External aborts"
+ */
+ hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
+ "Asynchronous external abort");
+}
+
+static struct pcie_host_ops keystone_pcie_host_ops = {
+ .rd_other_conf = ks_dw_pcie_rd_other_conf,
+ .wr_other_conf = ks_dw_pcie_wr_other_conf,
+ .link_up = ks_dw_pcie_link_up,
+ .host_init = ks_pcie_host_init,
+ .msi_set_irq = ks_dw_pcie_msi_set_irq,
+ .msi_clear_irq = ks_dw_pcie_msi_clear_irq,
+ .get_msi_addr = ks_dw_pcie_get_msi_addr,
+ .msi_host_init = ks_dw_pcie_msi_host_init,
+ .scan_bus = ks_dw_pcie_v3_65_scan_bus,
+};
+
+static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
+ struct platform_device *pdev)
+{
+ struct pcie_port *pp = &ks_pcie->pp;
+ int ret;
+
+ ret = ks_pcie_get_irq_controller_info(ks_pcie,
+ "legacy-interrupt-controller",
+ &ks_pcie->num_legacy_host_irqs);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ ret = ks_pcie_get_irq_controller_info(ks_pcie,
+ "msi-interrupt-controller",
+ &ks_pcie->num_msi_host_irqs);
+ if (ret)
+ return ret;
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &keystone_pcie_host_ops;
+ ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct of_device_id ks_pcie_of_match[] = {
+ {
+ .type = "pci",
+ .compatible = "ti,keystone-pcie",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
+
+static int __exit ks_pcie_remove(struct platform_device *pdev)
+{
+ struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ks_pcie->clk);
+
+ return 0;
+}
+
+static int __init ks_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct keystone_pcie *ks_pcie;
+ struct pcie_port *pp;
+ struct resource *res;
+ void __iomem *reg_p;
+ struct phy *phy;
+ int ret = 0;
+
+ ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
+ GFP_KERNEL);
+ if (!ks_pcie) {
+ dev_err(dev, "no memory for keystone pcie\n");
+ return -ENOMEM;
+ }
+ pp = &ks_pcie->pp;
+
+ /* initialize SerDes Phy if present */
+ phy = devm_phy_get(dev, "pcie-phy");
+ if (!IS_ERR_OR_NULL(phy)) {
+ ret = phy_init(phy);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* index 2 is to read PCI DEVICE_ID */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ reg_p = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg_p))
+ return PTR_ERR(reg_p);
+ ks_pcie->device_id = readl(reg_p) >> 16;
+ devm_iounmap(dev, reg_p);
+ devm_release_mem_region(dev, res->start, resource_size(res));
+
+ pp->dev = dev;
+ platform_set_drvdata(pdev, ks_pcie);
+ ks_pcie->clk = devm_clk_get(dev, "pcie");
+ if (IS_ERR(ks_pcie->clk)) {
+ dev_err(dev, "Failed to get pcie rc clock\n");
+ return PTR_ERR(ks_pcie->clk);
+ }
+ ret = clk_prepare_enable(ks_pcie->clk);
+ if (ret)
+ return ret;
+
+ ret = ks_add_pcie_port(ks_pcie, pdev);
+ if (ret < 0)
+ goto fail_clk;
+
+ return 0;
+fail_clk:
+ clk_disable_unprepare(ks_pcie->clk);
+
+ return ret;
+}
+
+static struct platform_driver ks_pcie_driver __refdata = {
+ .probe = ks_pcie_probe,
+ .remove = __exit_p(ks_pcie_remove),
+ .driver = {
+ .name = "keystone-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ks_pcie_of_match),
+ },
+};
+
+module_platform_driver(ks_pcie_driver);
+
+MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
+MODULE_DESCRIPTION("Keystone PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
new file mode 100644
index 000000000000..1fc1fceede9e
--- /dev/null
+++ b/drivers/pci/host/pci-keystone.h
@@ -0,0 +1,58 @@
+/*
+ * Keystone PCI Controller's common includes
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ * http://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define MAX_LEGACY_IRQS 4
+#define MAX_MSI_HOST_IRQS 8
+#define MAX_LEGACY_HOST_IRQS 4
+
+struct keystone_pcie {
+ struct clk *clk;
+ struct pcie_port pp;
+ /* PCI Device ID */
+ u32 device_id;
+ int num_legacy_host_irqs;
+ int legacy_host_irqs[MAX_LEGACY_HOST_IRQS];
+ struct device_node *legacy_intc_np;
+
+ int num_msi_host_irqs;
+ int msi_host_irqs[MAX_MSI_HOST_IRQS];
+ struct device_node *msi_intc_np;
+ struct irq_domain *legacy_irq_domain;
+
+ /* Application register space */
+ void __iomem *va_app_base;
+ struct resource app;
+};
+
+/* Keystone DW specific MSI controller APIs/definitions */
+void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
+u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
+
+/* Keystone specific PCI controller APIs */
+void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
+void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
+int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
+ struct device_node *msi_intc_np);
+int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 val);
+int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 *val);
+void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
+int ks_dw_pcie_link_up(struct pcie_port *pp);
+void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
+void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
+void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
+void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
+int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
+ struct msi_chip *chip);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index a8c6f1a92e0f..b1315e197ffb 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -873,7 +873,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
rangesz = pna + na + ns;
nranges = rlen / sizeof(__be32) / rangesz;
- for (i = 0; i < nranges; i++) {
+ for (i = 0; i < nranges; i++, range += rangesz) {
u32 flags = of_read_number(range, 1);
u32 slot = of_read_number(range + 1, 1);
u64 cpuaddr = of_read_number(range + na, pna);
@@ -883,14 +883,14 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
rtype = IORESOURCE_IO;
else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
rtype = IORESOURCE_MEM;
+ else
+ continue;
if (slot == PCI_SLOT(devfn) && type == rtype) {
*tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
*attr = DT_CPUADDR_TO_ATTR(cpuaddr);
return 0;
}
-
- range += rangesz;
}
return -ENOENT;
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 0fb0fdb223d5..3d43874319be 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -38,6 +38,7 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sizes.h>
@@ -115,13 +116,20 @@
#define AFI_INTR_CODE 0xb8
#define AFI_INTR_CODE_MASK 0xf
-#define AFI_INTR_AXI_SLAVE_ERROR 1
-#define AFI_INTR_AXI_DECODE_ERROR 2
+#define AFI_INTR_INI_SLAVE_ERROR 1
+#define AFI_INTR_INI_DECODE_ERROR 2
#define AFI_INTR_TARGET_ABORT 3
#define AFI_INTR_MASTER_ABORT 4
#define AFI_INTR_INVALID_WRITE 5
#define AFI_INTR_LEGACY 6
#define AFI_INTR_FPCI_DECODE_ERROR 7
+#define AFI_INTR_AXI_DECODE_ERROR 8
+#define AFI_INTR_FPCI_TIMEOUT 9
+#define AFI_INTR_PE_PRSNT_SENSE 10
+#define AFI_INTR_PE_CLKREQ_SENSE 11
+#define AFI_INTR_CLKCLAMP_SENSE 12
+#define AFI_INTR_RDY4PD_SENSE 13
+#define AFI_INTR_P2P_ERROR 14
#define AFI_INTR_SIGNATURE 0xbc
#define AFI_UPPER_FPCI_ADDRESS 0xc0
@@ -152,8 +160,10 @@
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
#define AFI_FUSE 0x104
@@ -165,12 +175,21 @@
#define AFI_PEX_CTRL_RST (1 << 0)
#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
+#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
+
+#define AFI_PLLE_CONTROL 0x160
+#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
+#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
#define AFI_PEXBIAS_CTRL_0 0x168
#define RP_VEND_XP 0x00000F00
#define RP_VEND_XP_DL_UP (1 << 30)
+#define RP_PRIV_MISC 0x00000FE0
+#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
+#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
+
#define RP_LINK_CONTROL_STATUS 0x00000090
#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
@@ -197,6 +216,7 @@
#define PADS_REFCLK_CFG0 0x000000C8
#define PADS_REFCLK_CFG1 0x000000CC
+#define PADS_REFCLK_BIAS 0x000000D0
/*
* Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
@@ -236,6 +256,7 @@ struct tegra_pcie_soc_data {
bool has_pex_bias_ctrl;
bool has_intr_prsnt_sense;
bool has_cml_clk;
+ bool has_gen2;
};
static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
@@ -253,6 +274,7 @@ struct tegra_pcie {
struct list_head buses;
struct resource *cs;
+ struct resource all;
struct resource io;
struct resource mem;
struct resource prefetch;
@@ -267,6 +289,8 @@ struct tegra_pcie {
struct reset_control *afi_rst;
struct reset_control *pcie_xrst;
+ struct phy *phy;
+
struct tegra_msi msi;
struct list_head ports;
@@ -382,7 +406,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
for (i = 0; i < 16; i++) {
unsigned long virt = (unsigned long)bus->area->addr +
i * SZ_64K;
- phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K;
+ phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
if (err < 0) {
@@ -561,6 +585,8 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
if (soc->has_pex_clkreq_en)
value |= AFI_PEX_CTRL_CLKREQ_EN;
+ value |= AFI_PEX_CTRL_OVERRIDE_EN;
+
afi_writel(port->pcie, value, ctrl);
tegra_pcie_port_reset(port);
@@ -568,6 +594,7 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
{
+ const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
unsigned long value;
@@ -578,6 +605,10 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
/* disable reference clock */
value = afi_readl(port->pcie, ctrl);
+
+ if (soc->has_pex_clkreq_en)
+ value &= ~AFI_PEX_CTRL_CLKREQ_EN;
+
value &= ~AFI_PEX_CTRL_REFCLK_EN;
afi_writel(port->pcie, value, ctrl);
}
@@ -626,13 +657,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct tegra_pcie *pcie = sys_to_pcie(sys);
+ int err;
+ phys_addr_t io_start;
+
+ err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
+ if (err < 0)
+ return err;
+
+ err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
+ if (err)
+ return err;
+
+ io_start = pci_pio_to_address(pcie->io.start);
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource_offset(&sys->resources, &pcie->prefetch,
sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
- pci_ioremap_io(nr * SZ_64K, pcie->io.start);
+ pci_ioremap_io(nr * SZ_64K, io_start);
return 1;
}
@@ -684,9 +727,15 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
"Target abort",
"Master abort",
"Invalid write",
+ "Legacy interrupt",
"Response decoding error",
"AXI response decoding error",
"Transaction timeout",
+ "Slot present pin change",
+ "Slot clock request change",
+ "TMS clock ramp change",
+ "TMS ready for power down",
+ "Peer2Peer error",
};
struct tegra_pcie *pcie = arg;
u32 code, signature;
@@ -737,6 +786,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
{
u32 fpci_bar, size, axi_address;
+ phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
/* Bar 0: type 1 extended configuration space */
fpci_bar = 0xfe100000;
@@ -749,7 +799,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
/* Bar 1: downstream IO bar */
fpci_bar = 0xfdfc0000;
size = resource_size(&pcie->io);
- axi_address = pcie->io.start;
+ axi_address = io_start;
afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
@@ -792,30 +842,27 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
}
-static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
+static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
{
const struct tegra_pcie_soc_data *soc = pcie->soc_data;
- struct tegra_pcie_port *port;
- unsigned int timeout;
- unsigned long value;
+ u32 value;
- /* power down PCIe slot clock bias pad */
- if (soc->has_pex_bias_ctrl)
- afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
+ timeout = jiffies + msecs_to_jiffies(timeout);
- /* configure mode and disable all ports */
- value = afi_readl(pcie, AFI_PCIE_CONFIG);
- value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
- value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
-
- list_for_each_entry(port, &pcie->ports, list)
- value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+ while (time_before(jiffies, timeout)) {
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ if (value & PADS_PLL_CTL_LOCKDET)
+ return 0;
+ }
- afi_writel(pcie, value, AFI_PCIE_CONFIG);
+ return -ETIMEDOUT;
+}
- value = afi_readl(pcie, AFI_FUSE);
- value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
- afi_writel(pcie, value, AFI_FUSE);
+static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ u32 value;
+ int err;
/* initialize internal PHY, enable up to 16 PCIE lanes */
pads_writel(pcie, 0x0, PADS_CTL_SEL);
@@ -834,6 +881,13 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
pads_writel(pcie, value, soc->pads_pll_ctl);
+ /* reset PLL */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value &= ~PADS_PLL_CTL_RST_B4SM;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ usleep_range(20, 100);
+
/* take PLL out of reset */
value = pads_readl(pcie, soc->pads_pll_ctl);
value |= PADS_PLL_CTL_RST_B4SM;
@@ -846,15 +900,11 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
/* wait for the PLL to lock */
- timeout = 300;
- do {
- value = pads_readl(pcie, soc->pads_pll_ctl);
- usleep_range(1000, 2000);
- if (--timeout == 0) {
- pr_err("Tegra PCIe error: timeout waiting for PLL\n");
- return -EBUSY;
- }
- } while (!(value & PADS_PLL_CTL_LOCKDET));
+ err = tegra_pcie_pll_wait(pcie, 500);
+ if (err < 0) {
+ dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
+ return err;
+ }
/* turn off IDDQ override */
value = pads_readl(pcie, PADS_CTL);
@@ -866,6 +916,58 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
pads_writel(pcie, value, PADS_CTL);
+ return 0;
+}
+
+static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct tegra_pcie_port *port;
+ unsigned long value;
+ int err;
+
+ /* enable PLL power down */
+ if (pcie->phy) {
+ value = afi_readl(pcie, AFI_PLLE_CONTROL);
+ value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
+ value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
+ afi_writel(pcie, value, AFI_PLLE_CONTROL);
+ }
+
+ /* power down PCIe slot clock bias pad */
+ if (soc->has_pex_bias_ctrl)
+ afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
+
+ /* configure mode and disable all ports */
+ value = afi_readl(pcie, AFI_PCIE_CONFIG);
+ value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
+ value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+
+ afi_writel(pcie, value, AFI_PCIE_CONFIG);
+
+ if (soc->has_gen2) {
+ value = afi_readl(pcie, AFI_FUSE);
+ value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
+ afi_writel(pcie, value, AFI_FUSE);
+ } else {
+ value = afi_readl(pcie, AFI_FUSE);
+ value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
+ afi_writel(pcie, value, AFI_FUSE);
+ }
+
+ if (!pcie->phy)
+ err = tegra_pcie_phy_enable(pcie);
+ else
+ err = phy_power_on(pcie->phy);
+
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
+ return err;
+ }
+
/* take the PCIe interface module out of reset */
reset_control_deassert(pcie->pcie_xrst);
@@ -899,6 +1001,10 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
/* TODO: disable and unprepare clocks? */
+ err = phy_power_off(pcie->phy);
+ if (err < 0)
+ dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
+
reset_control_assert(pcie->pcie_xrst);
reset_control_assert(pcie->afi_rst);
reset_control_assert(pcie->pex_rst);
@@ -1020,6 +1126,19 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
return err;
}
+ pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
+ if (IS_ERR(pcie->phy)) {
+ err = PTR_ERR(pcie->phy);
+ dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
+ return err;
+ }
+
+ err = phy_init(pcie->phy);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
+ return err;
+ }
+
err = tegra_pcie_power_on(pcie);
if (err) {
dev_err(&pdev->dev, "failed to power up: %d\n", err);
@@ -1078,10 +1197,17 @@ poweroff:
static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
{
+ int err;
+
if (pcie->irq > 0)
free_irq(pcie->irq, pcie);
tegra_pcie_power_off(pcie);
+
+ err = phy_exit(pcie->phy);
+ if (err < 0)
+ dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
+
return 0;
}
@@ -1170,8 +1296,10 @@ static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
return hwirq;
irq = irq_create_mapping(msi->domain, hwirq);
- if (!irq)
+ if (!irq) {
+ tegra_msi_free(msi, hwirq);
return -EINVAL;
+ }
irq_set_msi_desc(irq, desc);
@@ -1189,8 +1317,10 @@ static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
{
struct tegra_msi *msi = to_tegra_msi(chip);
struct irq_data *d = irq_get_irq_data(irq);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- tegra_msi_free(msi, d->hwirq);
+ irq_dispose_mapping(irq);
+ tegra_msi_free(msi, hwirq);
}
static struct irq_chip tegra_msi_irq_chip = {
@@ -1327,7 +1457,19 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
{
struct device_node *np = pcie->dev->of_node;
- if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+ switch (lanes) {
+ case 0x0000104:
+ dev_info(pcie->dev, "4x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
+ return 0;
+
+ case 0x0000102:
+ dev_info(pcie->dev, "2x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
+ return 0;
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
switch (lanes) {
case 0x00000204:
dev_info(pcie->dev, "4x1, 2x1 configuration\n");
@@ -1435,7 +1577,23 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
struct device_node *np = pcie->dev->of_node;
unsigned int i = 0;
- if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+ pcie->num_supplies = 7;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "avddio-pex";
+ pcie->supplies[i++].supply = "dvddio-pex";
+ pcie->supplies[i++].supply = "avdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex";
+ pcie->supplies[i++].supply = "hvdd-pex-pll-e";
+ pcie->supplies[i++].supply = "vddio-pex-ctl";
+ pcie->supplies[i++].supply = "avdd-pll-erefe";
+ } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
bool need_pexa = false, need_pexb = false;
/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
@@ -1514,32 +1672,50 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
struct resource res;
int err;
+ memset(&pcie->all, 0, sizeof(pcie->all));
+ pcie->all.flags = IORESOURCE_MEM;
+ pcie->all.name = np->full_name;
+ pcie->all.start = ~0;
+ pcie->all.end = 0;
+
if (of_pci_range_parser_init(&parser, np)) {
dev_err(pcie->dev, "missing \"ranges\" property\n");
return -EINVAL;
}
for_each_of_pci_range(&parser, &range) {
- of_pci_range_to_resource(&range, np, &res);
+ err = of_pci_range_to_resource(&range, np, &res);
+ if (err < 0)
+ return err;
switch (res.flags & IORESOURCE_TYPE_BITS) {
case IORESOURCE_IO:
memcpy(&pcie->io, &res, sizeof(res));
- pcie->io.name = "I/O";
+ pcie->io.name = np->full_name;
break;
case IORESOURCE_MEM:
if (res.flags & IORESOURCE_PREFETCH) {
memcpy(&pcie->prefetch, &res, sizeof(res));
- pcie->prefetch.name = "PREFETCH";
+ pcie->prefetch.name = "prefetchable";
} else {
memcpy(&pcie->mem, &res, sizeof(res));
- pcie->mem.name = "MEM";
+ pcie->mem.name = "non-prefetchable";
}
break;
}
+
+ if (res.start <= pcie->all.start)
+ pcie->all.start = res.start;
+
+ if (res.end >= pcie->all.end)
+ pcie->all.end = res.end;
}
+ err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
+ if (err < 0)
+ return err;
+
err = of_pci_parse_bus_range(np, &pcie->busn);
if (err < 0) {
dev_err(pcie->dev, "failed to parse ranges property: %d\n",
@@ -1641,6 +1817,12 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
unsigned int retries = 3;
unsigned long value;
+ /* override presence detection */
+ value = readl(port->base + RP_PRIV_MISC);
+ value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
+ value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
+ writel(value, port->base + RP_PRIV_MISC);
+
do {
unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
@@ -1721,6 +1903,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
.has_pex_bias_ctrl = false,
.has_intr_prsnt_sense = false,
.has_cml_clk = false,
+ .has_gen2 = false,
};
static const struct tegra_pcie_soc_data tegra30_pcie_data = {
@@ -1732,9 +1915,23 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
.has_cml_clk = true,
+ .has_gen2 = false,
+};
+
+static const struct tegra_pcie_soc_data tegra124_pcie_data = {
+ .num_ports = 2,
+ .msi_base_shift = 8,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = true,
+ .has_gen2 = true,
};
static const struct of_device_id tegra_pcie_of_match[] = {
+ { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
{ },
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
new file mode 100644
index 000000000000..9ecabfa8c634
--- /dev/null
+++ b/drivers/pci/host/pci-xgene.c
@@ -0,0 +1,659 @@
+/**
+ * APM X-Gene PCIe Driver
+ *
+ * Copyright (c) 2014 Applied Micro Circuits Corporation.
+ *
+ * Author: Tanmay Inamdar <tinamdar@apm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk-private.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define PCIECORE_CTLANDSTATUS 0x50
+#define PIM1_1L 0x80
+#define IBAR2 0x98
+#define IR2MSK 0x9c
+#define PIM2_1L 0xa0
+#define IBAR3L 0xb4
+#define IR3MSKL 0xbc
+#define PIM3_1L 0xc4
+#define OMR1BARL 0x100
+#define OMR2BARL 0x118
+#define OMR3BARL 0x130
+#define CFGBARL 0x154
+#define CFGBARH 0x158
+#define CFGCTL 0x15c
+#define RTDID 0x160
+#define BRIDGE_CFG_0 0x2000
+#define BRIDGE_CFG_4 0x2010
+#define BRIDGE_STATUS_0 0x2600
+
+#define LINK_UP_MASK 0x00000100
+#define AXI_EP_CFG_ACCESS 0x10000
+#define EN_COHERENCY 0xF0000000
+#define EN_REG 0x00000001
+#define OB_LO_IO 0x00000002
+#define XGENE_PCIE_VENDORID 0x10E8
+#define XGENE_PCIE_DEVICEID 0xE004
+#define SZ_1T (SZ_1G*1024ULL)
+#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe)
+
+struct xgene_pcie_port {
+ struct device_node *node;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *csr_base;
+ void __iomem *cfg_base;
+ unsigned long cfg_addr;
+ bool link_up;
+};
+
+static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
+{
+ return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
+}
+
+/* PCIe Configuration Out/In */
+static inline void xgene_pcie_cfg_out32(void __iomem *addr, int offset, u32 val)
+{
+ writel(val, addr + offset);
+}
+
+static inline void xgene_pcie_cfg_out16(void __iomem *addr, int offset, u16 val)
+{
+ u32 val32 = readl(addr + (offset & ~0x3));
+
+ switch (offset & 0x3) {
+ case 2:
+ val32 &= ~0xFFFF0000;
+ val32 |= (u32)val << 16;
+ break;
+ case 0:
+ default:
+ val32 &= ~0xFFFF;
+ val32 |= val;
+ break;
+ }
+ writel(val32, addr + (offset & ~0x3));
+}
+
+static inline void xgene_pcie_cfg_out8(void __iomem *addr, int offset, u8 val)
+{
+ u32 val32 = readl(addr + (offset & ~0x3));
+
+ switch (offset & 0x3) {
+ case 0:
+ val32 &= ~0xFF;
+ val32 |= val;
+ break;
+ case 1:
+ val32 &= ~0xFF00;
+ val32 |= (u32)val << 8;
+ break;
+ case 2:
+ val32 &= ~0xFF0000;
+ val32 |= (u32)val << 16;
+ break;
+ case 3:
+ default:
+ val32 &= ~0xFF000000;
+ val32 |= (u32)val << 24;
+ break;
+ }
+ writel(val32, addr + (offset & ~0x3));
+}
+
+static inline void xgene_pcie_cfg_in32(void __iomem *addr, int offset, u32 *val)
+{
+ *val = readl(addr + offset);
+}
+
+static inline void xgene_pcie_cfg_in16(void __iomem *addr, int offset, u32 *val)
+{
+ *val = readl(addr + (offset & ~0x3));
+
+ switch (offset & 0x3) {
+ case 2:
+ *val >>= 16;
+ break;
+ }
+
+ *val &= 0xFFFF;
+}
+
+static inline void xgene_pcie_cfg_in8(void __iomem *addr, int offset, u32 *val)
+{
+ *val = readl(addr + (offset & ~0x3));
+
+ switch (offset & 0x3) {
+ case 3:
+ *val = *val >> 24;
+ break;
+ case 2:
+ *val = *val >> 16;
+ break;
+ case 1:
+ *val = *val >> 8;
+ break;
+ }
+ *val &= 0xFF;
+}
+
+/*
+ * When the address bit [17:16] is 2'b01, the Configuration access will be
+ * treated as Type 1 and it will be forwarded to external PCIe device.
+ */
+static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
+{
+ struct xgene_pcie_port *port = bus->sysdata;
+
+ if (bus->number >= (bus->primary + 1))
+ return port->cfg_base + AXI_EP_CFG_ACCESS;
+
+ return port->cfg_base;
+}
+
+/*
+ * For Configuration request, RTDID register is used as Bus Number,
+ * Device Number and Function number of the header fields.
+ */
+static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
+{
+ struct xgene_pcie_port *port = bus->sysdata;
+ unsigned int b, d, f;
+ u32 rtdid_val = 0;
+
+ b = bus->number;
+ d = PCI_SLOT(devfn);
+ f = PCI_FUNC(devfn);
+
+ if (!pci_is_root_bus(bus))
+ rtdid_val = (b << 8) | (d << 3) | f;
+
+ writel(rtdid_val, port->csr_base + RTDID);
+ /* read the register back to ensure flush */
+ readl(port->csr_base + RTDID);
+}
+
+/*
+ * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as
+ * the translation from PCI bus to native BUS. Entire DDR region
+ * is mapped into PCIe space using these registers, so it can be
+ * reached by DMA from EP devices. The BAR0/1 of bridge should be
+ * hidden during enumeration to avoid the sizing and resource allocation
+ * by PCIe core.
+ */
+static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
+{
+ if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) ||
+ (offset == PCI_BASE_ADDRESS_1)))
+ return true;
+
+ return false;
+}
+
+static int xgene_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 *val)
+{
+ struct xgene_pcie_port *port = bus->sysdata;
+ void __iomem *addr;
+
+ if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (xgene_pcie_hide_rc_bars(bus, offset)) {
+ *val = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ xgene_pcie_set_rtdid_reg(bus, devfn);
+ addr = xgene_pcie_get_cfg_base(bus);
+ switch (len) {
+ case 1:
+ xgene_pcie_cfg_in8(addr, offset, val);
+ break;
+ case 2:
+ xgene_pcie_cfg_in16(addr, offset, val);
+ break;
+ default:
+ xgene_pcie_cfg_in32(addr, offset, val);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int xgene_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 val)
+{
+ struct xgene_pcie_port *port = bus->sysdata;
+ void __iomem *addr;
+
+ if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (xgene_pcie_hide_rc_bars(bus, offset))
+ return PCIBIOS_SUCCESSFUL;
+
+ xgene_pcie_set_rtdid_reg(bus, devfn);
+ addr = xgene_pcie_get_cfg_base(bus);
+ switch (len) {
+ case 1:
+ xgene_pcie_cfg_out8(addr, offset, (u8)val);
+ break;
+ case 2:
+ xgene_pcie_cfg_out16(addr, offset, (u16)val);
+ break;
+ default:
+ xgene_pcie_cfg_out32(addr, offset, val);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops xgene_pcie_ops = {
+ .read = xgene_pcie_read_config,
+ .write = xgene_pcie_write_config
+};
+
+static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
+ u32 flags, u64 size)
+{
+ u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
+ u32 val32 = 0;
+ u32 val;
+
+ val32 = readl(csr_base + addr);
+ val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
+ writel(val, csr_base + addr);
+
+ val32 = readl(csr_base + addr + 0x04);
+ val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
+ writel(val, csr_base + addr + 0x04);
+
+ val32 = readl(csr_base + addr + 0x04);
+ val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
+ writel(val, csr_base + addr + 0x04);
+
+ val32 = readl(csr_base + addr + 0x08);
+ val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
+ writel(val, csr_base + addr + 0x08);
+
+ return mask;
+}
+
+static void xgene_pcie_linkup(struct xgene_pcie_port *port,
+ u32 *lanes, u32 *speed)
+{
+ void __iomem *csr_base = port->csr_base;
+ u32 val32;
+
+ port->link_up = false;
+ val32 = readl(csr_base + PCIECORE_CTLANDSTATUS);
+ if (val32 & LINK_UP_MASK) {
+ port->link_up = true;
+ *speed = PIPE_PHY_RATE_RD(val32);
+ val32 = readl(csr_base + BRIDGE_STATUS_0);
+ *lanes = val32 >> 26;
+ }
+}
+
+static int xgene_pcie_init_port(struct xgene_pcie_port *port)
+{
+ int rc;
+
+ port->clk = clk_get(port->dev, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(port->dev, "clock not available\n");
+ return -ENODEV;
+ }
+
+ rc = clk_prepare_enable(port->clk);
+ if (rc) {
+ dev_err(port->dev, "clock enable failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
+ port->csr_base = devm_ioremap_resource(port->dev, res);
+ if (IS_ERR(port->csr_base))
+ return PTR_ERR(port->csr_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ port->cfg_base = devm_ioremap_resource(port->dev, res);
+ if (IS_ERR(port->cfg_base))
+ return PTR_ERR(port->cfg_base);
+ port->cfg_addr = res->start;
+
+ return 0;
+}
+
+static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
+ struct resource *res, u32 offset,
+ u64 cpu_addr, u64 pci_addr)
+{
+ void __iomem *base = port->csr_base + offset;
+ resource_size_t size = resource_size(res);
+ u64 restype = resource_type(res);
+ u64 mask = 0;
+ u32 min_size;
+ u32 flag = EN_REG;
+
+ if (restype == IORESOURCE_MEM) {
+ min_size = SZ_128M;
+ } else {
+ min_size = 128;
+ flag |= OB_LO_IO;
+ }
+
+ if (size >= min_size)
+ mask = ~(size - 1) | flag;
+ else
+ dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n",
+ (u64)size, min_size);
+
+ writel(lower_32_bits(cpu_addr), base);
+ writel(upper_32_bits(cpu_addr), base + 0x04);
+ writel(lower_32_bits(mask), base + 0x08);
+ writel(upper_32_bits(mask), base + 0x0c);
+ writel(lower_32_bits(pci_addr), base + 0x10);
+ writel(upper_32_bits(pci_addr), base + 0x14);
+}
+
+static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr)
+{
+ writel(lower_32_bits(addr), csr_base + CFGBARL);
+ writel(upper_32_bits(addr), csr_base + CFGBARH);
+ writel(EN_REG, csr_base + CFGCTL);
+}
+
+static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
+ struct list_head *res,
+ resource_size_t io_base)
+{
+ struct pci_host_bridge_window *window;
+ struct device *dev = port->dev;
+ int ret;
+
+ list_for_each_entry(window, res, list) {
+ struct resource *res = window->res;
+ u64 restype = resource_type(res);
+
+ dev_dbg(port->dev, "%pR\n", res);
+
+ switch (restype) {
+ case IORESOURCE_IO:
+ xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
+ res->start - window->offset);
+ ret = pci_remap_iospace(res, io_base);
+ if (ret < 0)
+ return ret;
+ break;
+ case IORESOURCE_MEM:
+ xgene_pcie_setup_ob_reg(port, res, OMR1BARL, res->start,
+ res->start - window->offset);
+ break;
+ case IORESOURCE_BUS:
+ break;
+ default:
+ dev_err(dev, "invalid resource %pR\n", res);
+ return -EINVAL;
+ }
+ }
+ xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr);
+
+ return 0;
+}
+
+static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size)
+{
+ writel(lower_32_bits(pim), addr);
+ writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04);
+ writel(lower_32_bits(size), addr + 0x10);
+ writel(upper_32_bits(size), addr + 0x14);
+}
+
+/*
+ * X-Gene PCIe support maximum 3 inbound memory regions
+ * This function helps to select a region based on size of region
+ */
+static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
+{
+ if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) {
+ *ib_reg_mask |= (1 << 1);
+ return 1;
+ }
+
+ if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
+ *ib_reg_mask |= (1 << 0);
+ return 0;
+ }
+
+ if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) {
+ *ib_reg_mask |= (1 << 2);
+ return 2;
+ }
+
+ return -EINVAL;
+}
+
+static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
+ struct of_pci_range *range, u8 *ib_reg_mask)
+{
+ void __iomem *csr_base = port->csr_base;
+ void __iomem *cfg_base = port->cfg_base;
+ void *bar_addr;
+ void *pim_addr;
+ u64 cpu_addr = range->cpu_addr;
+ u64 pci_addr = range->pci_addr;
+ u64 size = range->size;
+ u64 mask = ~(size - 1) | EN_REG;
+ u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
+ u32 bar_low;
+ int region;
+
+ region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
+ if (region < 0) {
+ dev_warn(port->dev, "invalid pcie dma-range config\n");
+ return;
+ }
+
+ if (range->flags & IORESOURCE_PREFETCH)
+ flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
+
+ bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
+ switch (region) {
+ case 0:
+ xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size);
+ bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
+ writel(bar_low, bar_addr);
+ writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
+ pim_addr = csr_base + PIM1_1L;
+ break;
+ case 1:
+ bar_addr = csr_base + IBAR2;
+ writel(bar_low, bar_addr);
+ writel(lower_32_bits(mask), csr_base + IR2MSK);
+ pim_addr = csr_base + PIM2_1L;
+ break;
+ case 2:
+ bar_addr = csr_base + IBAR3L;
+ writel(bar_low, bar_addr);
+ writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
+ writel(lower_32_bits(mask), csr_base + IR3MSKL);
+ writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4);
+ pim_addr = csr_base + PIM3_1L;
+ break;
+ }
+
+ xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1));
+}
+
+static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ const int na = 3, ns = 2;
+ int rlen;
+
+ parser->node = node;
+ parser->pna = of_n_addr_cells(node);
+ parser->np = parser->pna + na + ns;
+
+ parser->range = of_get_property(node, "dma-ranges", &rlen);
+ if (!parser->range)
+ return -ENOENT;
+ parser->end = parser->range + rlen / sizeof(__be32);
+
+ return 0;
+}
+
+static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
+{
+ struct device_node *np = port->node;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ struct device *dev = port->dev;
+ u8 ib_reg_mask = 0;
+
+ if (pci_dma_range_parser_init(&parser, np)) {
+ dev_err(dev, "missing dma-ranges property\n");
+ return -EINVAL;
+ }
+
+ /* Get the dma-ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ u64 end = range.cpu_addr + range.size - 1;
+
+ dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+ range.flags, range.cpu_addr, end, range.pci_addr);
+ xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
+ }
+ return 0;
+}
+
+/* clear BAR configuration which was done by firmware */
+static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
+{
+ int i;
+
+ for (i = PIM1_1L; i <= CFGCTL; i += 4)
+ writel(0x0, port->csr_base + i);
+}
+
+static int xgene_pcie_setup(struct xgene_pcie_port *port,
+ struct list_head *res,
+ resource_size_t io_base)
+{
+ u32 val, lanes = 0, speed = 0;
+ int ret;
+
+ xgene_pcie_clear_config(port);
+
+ /* setup the vendor and device IDs correctly */
+ val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
+ writel(val, port->csr_base + BRIDGE_CFG_0);
+
+ ret = xgene_pcie_map_ranges(port, res, io_base);
+ if (ret)
+ return ret;
+
+ ret = xgene_pcie_parse_map_dma_ranges(port);
+ if (ret)
+ return ret;
+
+ xgene_pcie_linkup(port, &lanes, &speed);
+ if (!port->link_up)
+ dev_info(port->dev, "(rc) link down\n");
+ else
+ dev_info(port->dev, "(rc) x%d gen-%d link up\n",
+ lanes, speed + 1);
+ return 0;
+}
+
+static int xgene_pcie_probe_bridge(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct xgene_pcie_port *port;
+ resource_size_t iobase = 0;
+ struct pci_bus *bus;
+ int ret;
+ LIST_HEAD(res);
+
+ port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+ port->node = of_node_get(pdev->dev.of_node);
+ port->dev = &pdev->dev;
+
+ ret = xgene_pcie_map_reg(port, pdev);
+ if (ret)
+ return ret;
+
+ ret = xgene_pcie_init_port(port);
+ if (ret)
+ return ret;
+
+ ret = of_pci_get_host_bridge_resources(dn, 0, 0xff, &res, &iobase);
+ if (ret)
+ return ret;
+
+ ret = xgene_pcie_setup(port, &res, iobase);
+ if (ret)
+ return ret;
+
+ bus = pci_scan_root_bus(&pdev->dev, 0, &xgene_pcie_ops, port, &res);
+ if (!bus)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, port);
+ return 0;
+}
+
+static const struct of_device_id xgene_pcie_match_table[] = {
+ {.compatible = "apm,xgene-pcie",},
+ {},
+};
+
+static struct platform_driver xgene_pcie_driver = {
+ .driver = {
+ .name = "xgene-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(xgene_pcie_match_table),
+ },
+ .probe = xgene_pcie_probe_bridge,
+};
+module_platform_driver(xgene_pcie_driver);
+
+MODULE_AUTHOR("Tanmay Inamdar <tinamdar@apm.com>");
+MODULE_DESCRIPTION("APM X-Gene PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 52bd3a143563..dfed00aa3ac0 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -73,6 +73,8 @@ static unsigned long global_io_offset;
static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
{
+ BUG_ON(!sys->private_data);
+
return sys->private_data;
}
@@ -194,30 +196,6 @@ void dw_pcie_msi_init(struct pcie_port *pp)
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
}
-static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
-{
- int flag = 1;
-
- do {
- pos = find_next_zero_bit(pp->msi_irq_in_use,
- MAX_MSI_IRQS, pos);
- /*if you have reached to the end then get out from here.*/
- if (pos == MAX_MSI_IRQS)
- return -ENOSPC;
- /*
- * Check if this position is at correct offset.nvec is always a
- * power of two. pos0 must be nvec bit aligned.
- */
- if (pos % msgvec)
- pos += msgvec - (pos % msgvec);
- else
- flag = 0;
- } while (flag);
-
- *pos0 = pos;
- return 0;
-}
-
static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
{
unsigned int res, bit, val;
@@ -236,13 +214,14 @@ static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
for (i = 0; i < nvec; i++) {
irq_set_msi_desc_off(irq_base, i, NULL);
- clear_bit(pos + i, pp->msi_irq_in_use);
/* Disable corresponding interrupt on MSI controller */
if (pp->ops->msi_clear_irq)
pp->ops->msi_clear_irq(pp, pos + i);
else
dw_pcie_msi_clear_irq(pp, pos + i);
}
+
+ bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
}
static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
@@ -258,31 +237,13 @@ static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
- int irq, pos0, pos1, i;
+ int irq, pos0, i;
struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
- if (!pp) {
- BUG();
- return -EINVAL;
- }
-
- pos0 = find_first_zero_bit(pp->msi_irq_in_use,
- MAX_MSI_IRQS);
- if (pos0 % no_irqs) {
- if (find_valid_pos0(pp, no_irqs, pos0, &pos0))
- goto no_valid_irq;
- }
- if (no_irqs > 1) {
- pos1 = find_next_bit(pp->msi_irq_in_use,
- MAX_MSI_IRQS, pos0);
- /* there must be nvec number of consecutive free bits */
- while ((pos1 - pos0) < no_irqs) {
- if (find_valid_pos0(pp, no_irqs, pos1, &pos0))
- goto no_valid_irq;
- pos1 = find_next_bit(pp->msi_irq_in_use,
- MAX_MSI_IRQS, pos0);
- }
- }
+ pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
+ order_base_2(no_irqs));
+ if (pos0 < 0)
+ goto no_valid_irq;
irq = irq_find_mapping(pp->irq_domain, pos0);
if (!irq)
@@ -300,7 +261,6 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
clear_irq_range(pp, irq, i, pos0);
goto no_valid_irq;
}
- set_bit(pos0 + i, pp->msi_irq_in_use);
/*Enable corresponding interrupt in MSI interrupt controller */
if (pp->ops->msi_set_irq)
pp->ops->msi_set_irq(pp, pos0 + i);
@@ -316,69 +276,28 @@ no_valid_irq:
return -ENOSPC;
}
-static void clear_irq(unsigned int irq)
-{
- unsigned int pos, nvec;
- struct msi_desc *msi;
- struct pcie_port *pp;
- struct irq_data *data = irq_get_irq_data(irq);
-
- /* get the port structure */
- msi = irq_data_get_msi(data);
- pp = sys_to_pcie(msi->dev->bus->sysdata);
- if (!pp) {
- BUG();
- return;
- }
-
- /* undo what was done in assign_irq */
- pos = data->hwirq;
- nvec = 1 << msi->msi_attrib.multiple;
-
- clear_irq_range(pp, irq, nvec, pos);
-
- /* all irqs cleared; reset attributes */
- msi->irq = 0;
- msi->msi_attrib.multiple = 0;
-}
-
static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
struct msi_desc *desc)
{
- int irq, pos, msgvec;
- u16 msg_ctr;
+ int irq, pos;
struct msi_msg msg;
struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
- if (!pp) {
- BUG();
- return -EINVAL;
- }
-
- pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
- &msg_ctr);
- msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
- if (msgvec == 0)
- msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
- if (msgvec > 5)
- msgvec = 0;
-
- irq = assign_irq((1 << msgvec), desc, &pos);
+ irq = assign_irq(1, desc, &pos);
if (irq < 0)
return irq;
- /*
- * write_msi_msg() will update PCI_MSI_FLAGS so there is
- * no need to explicitly call pci_write_config_word().
- */
- desc->msi_attrib.multiple = msgvec;
-
- if (pp->ops->get_msi_data)
- msg.address_lo = pp->ops->get_msi_data(pp);
+ if (pp->ops->get_msi_addr)
+ msg.address_lo = pp->ops->get_msi_addr(pp);
else
msg.address_lo = virt_to_phys((void *)pp->msi_data);
msg.address_hi = 0x0;
- msg.data = pos;
+
+ if (pp->ops->get_msi_data)
+ msg.data = pp->ops->get_msi_data(pp, pos);
+ else
+ msg.data = pos;
+
write_msi_msg(irq, &msg);
return 0;
@@ -386,7 +305,11 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
{
- clear_irq(irq);
+ struct irq_data *data = irq_get_irq_data(irq);
+ struct msi_desc *msi = irq_data_get_msi(data);
+ struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata);
+
+ clear_irq_range(pp, irq, 1, data->hwirq);
}
static struct msi_chip dw_pcie_msi_chip = {
@@ -425,7 +348,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
struct resource *cfg_res;
u32 val, na, ns;
const __be32 *addrp;
- int i, index;
+ int i, index, ret;
/* Find the address cell size and the number of cells in order to get
* the untranslated address.
@@ -435,16 +358,16 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
- pp->config.cfg0_size = resource_size(cfg_res)/2;
- pp->config.cfg1_size = resource_size(cfg_res)/2;
+ pp->cfg0_size = resource_size(cfg_res)/2;
+ pp->cfg1_size = resource_size(cfg_res)/2;
pp->cfg0_base = cfg_res->start;
- pp->cfg1_base = cfg_res->start + pp->config.cfg0_size;
+ pp->cfg1_base = cfg_res->start + pp->cfg0_size;
/* Find the untranslated configuration space address */
index = of_property_match_string(np, "reg-names", "config");
- addrp = of_get_address(np, index, false, false);
+ addrp = of_get_address(np, index, NULL, NULL);
pp->cfg0_mod_base = of_read_number(addrp, ns);
- pp->cfg1_mod_base = pp->cfg0_mod_base + pp->config.cfg0_size;
+ pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
} else {
dev_err(pp->dev, "missing *config* reg space\n");
}
@@ -466,9 +389,9 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
pp->io.end = min_t(resource_size_t,
IO_SPACE_LIMIT,
range.pci_addr + range.size
- + global_io_offset);
- pp->config.io_size = resource_size(&pp->io);
- pp->config.io_bus_addr = range.pci_addr;
+ + global_io_offset - 1);
+ pp->io_size = resource_size(&pp->io);
+ pp->io_bus_addr = range.pci_addr;
pp->io_base = range.cpu_addr;
/* Find the untranslated IO space address */
@@ -478,8 +401,8 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
if (restype == IORESOURCE_MEM) {
of_pci_range_to_resource(&range, np, &pp->mem);
pp->mem.name = "MEM";
- pp->config.mem_size = resource_size(&pp->mem);
- pp->config.mem_bus_addr = range.pci_addr;
+ pp->mem_size = resource_size(&pp->mem);
+ pp->mem_bus_addr = range.pci_addr;
/* Find the untranslated MEM space address */
pp->mem_mod_base = of_read_number(parser.range -
@@ -487,19 +410,29 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
}
if (restype == 0) {
of_pci_range_to_resource(&range, np, &pp->cfg);
- pp->config.cfg0_size = resource_size(&pp->cfg)/2;
- pp->config.cfg1_size = resource_size(&pp->cfg)/2;
+ pp->cfg0_size = resource_size(&pp->cfg)/2;
+ pp->cfg1_size = resource_size(&pp->cfg)/2;
pp->cfg0_base = pp->cfg.start;
- pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
+ pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
/* Find the untranslated configuration space address */
pp->cfg0_mod_base = of_read_number(parser.range -
parser.np + na, ns);
pp->cfg1_mod_base = pp->cfg0_mod_base +
- pp->config.cfg0_size;
+ pp->cfg0_size;
}
}
+ ret = of_pci_parse_bus_range(np, &pp->busn);
+ if (ret < 0) {
+ pp->busn.name = np->name;
+ pp->busn.start = 0;
+ pp->busn.end = 0xff;
+ pp->busn.flags = IORESOURCE_BUS;
+ dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
+ ret, &pp->busn);
+ }
+
if (!pp->dbi_base) {
pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
resource_size(&pp->cfg));
@@ -511,17 +444,22 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
pp->mem_base = pp->mem.start;
- pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
- pp->config.cfg0_size);
if (!pp->va_cfg0_base) {
- dev_err(pp->dev, "error with ioremap in function\n");
- return -ENOMEM;
+ pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
+ pp->cfg0_size);
+ if (!pp->va_cfg0_base) {
+ dev_err(pp->dev, "error with ioremap in function\n");
+ return -ENOMEM;
+ }
}
- pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
- pp->config.cfg1_size);
+
if (!pp->va_cfg1_base) {
- dev_err(pp->dev, "error with ioremap\n");
- return -ENOMEM;
+ pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
+ pp->cfg1_size);
+ if (!pp->va_cfg1_base) {
+ dev_err(pp->dev, "error with ioremap\n");
+ return -ENOMEM;
+ }
}
if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
@@ -530,16 +468,22 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
- MAX_MSI_IRQS, &msi_domain_ops,
- &dw_pcie_msi_chip);
- if (!pp->irq_domain) {
- dev_err(pp->dev, "irq domain init failed\n");
- return -ENXIO;
- }
+ if (!pp->ops->msi_host_init) {
+ pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
+ MAX_MSI_IRQS, &msi_domain_ops,
+ &dw_pcie_msi_chip);
+ if (!pp->irq_domain) {
+ dev_err(pp->dev, "irq domain init failed\n");
+ return -ENXIO;
+ }
- for (i = 0; i < MAX_MSI_IRQS; i++)
- irq_create_mapping(pp->irq_domain, i);
+ for (i = 0; i < MAX_MSI_IRQS; i++)
+ irq_create_mapping(pp->irq_domain, i);
+ } else {
+ ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
+ if (ret < 0)
+ return ret;
+ }
}
if (pp->ops->host_init)
@@ -558,7 +502,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
dw_pci.private_data = (void **)&pp;
pci_common_init_dev(pp->dev, &dw_pci);
- pci_assign_unassigned_resources();
#ifdef CONFIG_PCI_DOMAINS
dw_pci.domain++;
#endif
@@ -573,7 +516,7 @@ static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
PCIE_ATU_VIEWPORT);
dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->config.cfg0_size - 1,
+ dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1,
PCIE_ATU_LIMIT);
dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -589,7 +532,7 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->config.cfg1_size - 1,
+ dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1,
PCIE_ATU_LIMIT);
dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -604,10 +547,10 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->config.mem_size - 1,
+ dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1,
PCIE_ATU_LIMIT);
- dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
- dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
+ dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr),
PCIE_ATU_UPPER_TARGET);
dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
}
@@ -620,10 +563,10 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, pp->io_mod_base + pp->config.io_size - 1,
+ dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1,
PCIE_ATU_LIMIT);
- dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
- dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
+ dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr),
PCIE_ATU_UPPER_TARGET);
dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
}
@@ -707,11 +650,6 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
struct pcie_port *pp = sys_to_pcie(bus->sysdata);
int ret;
- if (!pp) {
- BUG();
- return -EINVAL;
- }
-
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -736,11 +674,6 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
struct pcie_port *pp = sys_to_pcie(bus->sysdata);
int ret;
- if (!pp) {
- BUG();
- return -EINVAL;
- }
-
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -768,19 +701,17 @@ static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
pp = sys_to_pcie(sys);
- if (!pp)
- return 0;
-
- if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
- sys->io_offset = global_io_offset - pp->config.io_bus_addr;
+ if (global_io_offset < SZ_1M && pp->io_size > 0) {
+ sys->io_offset = global_io_offset - pp->io_bus_addr;
pci_ioremap_io(global_io_offset, pp->io_base);
global_io_offset += SZ_64K;
pci_add_resource_offset(&sys->resources, &pp->io,
sys->io_offset);
}
- sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
+ sys->mem_offset = pp->mem.start - pp->mem_bus_addr;
pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
+ pci_add_resource(&sys->resources, &pp->busn);
return 1;
}
@@ -790,14 +721,16 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
struct pci_bus *bus;
struct pcie_port *pp = sys_to_pcie(sys);
- if (pp) {
- pp->root_bus_nr = sys->busnr;
- bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops,
- sys, &sys->resources);
- } else {
- bus = NULL;
- BUG();
- }
+ pp->root_bus_nr = sys->busnr;
+ bus = pci_create_root_bus(pp->dev, sys->busnr,
+ &dw_pcie_ops, sys, &sys->resources);
+ if (!bus)
+ return NULL;
+
+ pci_scan_child_bus(bus);
+
+ if (bus && pp->ops->scan_bus)
+ pp->ops->scan_bus(pp);
return bus;
}
@@ -833,7 +766,6 @@ static struct hw_pci dw_pci = {
void dw_pcie_setup_rc(struct pcie_port *pp)
{
- struct pcie_port_info *config = &pp->config;
u32 val;
u32 membase;
u32 memlimit;
@@ -888,7 +820,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* setup memory base, memory limit */
membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
- memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000;
+ memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
val = memlimit | membase;
dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index daf81f922cda..c6256751daff 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -14,15 +14,6 @@
#ifndef _PCIE_DESIGNWARE_H
#define _PCIE_DESIGNWARE_H
-struct pcie_port_info {
- u32 cfg0_size;
- u32 cfg1_size;
- u32 io_size;
- u32 mem_size;
- phys_addr_t io_bus_addr;
- phys_addr_t mem_bus_addr;
-};
-
/*
* Maximum number of MSI IRQs can be 256 per controller. But keep
* it 32 as of now. Probably we will never need more than 32. If needed,
@@ -38,17 +29,23 @@ struct pcie_port {
u64 cfg0_base;
u64 cfg0_mod_base;
void __iomem *va_cfg0_base;
+ u32 cfg0_size;
u64 cfg1_base;
u64 cfg1_mod_base;
void __iomem *va_cfg1_base;
+ u32 cfg1_size;
u64 io_base;
u64 io_mod_base;
+ phys_addr_t io_bus_addr;
+ u32 io_size;
u64 mem_base;
u64 mem_mod_base;
+ phys_addr_t mem_bus_addr;
+ u32 mem_size;
struct resource cfg;
struct resource io;
struct resource mem;
- struct pcie_port_info config;
+ struct resource busn;
int irq;
u32 lanes;
struct pcie_host_ops *ops;
@@ -73,7 +70,10 @@ struct pcie_host_ops {
void (*host_init)(struct pcie_port *pp);
void (*msi_set_irq)(struct pcie_port *pp, int irq);
void (*msi_clear_irq)(struct pcie_port *pp, int irq);
- u32 (*get_msi_data)(struct pcie_port *pp);
+ u32 (*get_msi_addr)(struct pcie_port *pp);
+ u32 (*get_msi_data)(struct pcie_port *pp, int pos);
+ void (*scan_bus)(struct pcie_port *pp);
+ int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip);
};
int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 4884ee5e07d4..61158e03ab5f 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -323,6 +323,7 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
/* Setup PCIe address space mappings for each resource */
resource_size_t size;
+ resource_size_t res_start;
u32 mask;
rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
@@ -335,8 +336,13 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
mask = (roundup_pow_of_two(size) / SZ_128) - 1;
rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
- rcar_pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win));
- rcar_pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win));
+ if (res->flags & IORESOURCE_IO)
+ res_start = pci_pio_to_address(res->start);
+ else
+ res_start = res->start;
+
+ rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPARH(win));
+ rcar_pci_write_reg(pcie, lower_32_bits(res_start), PCIEPARL(win));
/* First resource is for IO */
mask = PAR_ENABLE;
@@ -363,9 +369,10 @@ static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
rcar_pcie_setup_window(i, pcie);
- if (res->flags & IORESOURCE_IO)
- pci_ioremap_io(nr * SZ_64K, res->start);
- else
+ if (res->flags & IORESOURCE_IO) {
+ phys_addr_t io_start = pci_pio_to_address(res->start);
+ pci_ioremap_io(nr * SZ_64K, io_start);
+ } else
pci_add_resource(&sys->resources, res);
}
pci_add_resource(&sys->resources, &pcie->busn);
@@ -935,8 +942,10 @@ static int rcar_pcie_probe(struct platform_device *pdev)
}
for_each_of_pci_range(&parser, &range) {
- of_pci_range_to_resource(&range, pdev->dev.of_node,
+ err = of_pci_range_to_resource(&range, pdev->dev.of_node,
&pcie->res[win++]);
+ if (err < 0)
+ return err;
if (win > RCAR_PCI_MAX_RESOURCES)
break;
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 6dea9e43a75c..85f594e1708f 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -340,7 +340,7 @@ static int __init spear13xx_pcie_probe(struct platform_device *pdev)
pp->dev = dev;
- dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pp->dbi_base)) {
dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
new file mode 100644
index 000000000000..ccc496b33a97
--- /dev/null
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -0,0 +1,970 @@
+/*
+ * PCIe host controller driver for Xilinx AXI PCIe Bridge
+ *
+ * Copyright (c) 2012 - 2014 Xilinx, Inc.
+ *
+ * Based on the Tegra PCIe driver
+ *
+ * Bits taken from Synopsys Designware Host controller driver and
+ * ARM PCI Host generic driver.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+/* Register definitions */
+#define XILINX_PCIE_REG_BIR 0x00000130
+#define XILINX_PCIE_REG_IDR 0x00000138
+#define XILINX_PCIE_REG_IMR 0x0000013c
+#define XILINX_PCIE_REG_PSCR 0x00000144
+#define XILINX_PCIE_REG_RPSC 0x00000148
+#define XILINX_PCIE_REG_MSIBASE1 0x0000014c
+#define XILINX_PCIE_REG_MSIBASE2 0x00000150
+#define XILINX_PCIE_REG_RPEFR 0x00000154
+#define XILINX_PCIE_REG_RPIFR1 0x00000158
+#define XILINX_PCIE_REG_RPIFR2 0x0000015c
+
+/* Interrupt registers definitions */
+#define XILINX_PCIE_INTR_LINK_DOWN BIT(0)
+#define XILINX_PCIE_INTR_ECRC_ERR BIT(1)
+#define XILINX_PCIE_INTR_STR_ERR BIT(2)
+#define XILINX_PCIE_INTR_HOT_RESET BIT(3)
+#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8)
+#define XILINX_PCIE_INTR_CORRECTABLE BIT(9)
+#define XILINX_PCIE_INTR_NONFATAL BIT(10)
+#define XILINX_PCIE_INTR_FATAL BIT(11)
+#define XILINX_PCIE_INTR_INTX BIT(16)
+#define XILINX_PCIE_INTR_MSI BIT(17)
+#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20)
+#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21)
+#define XILINX_PCIE_INTR_SLV_COMPL BIT(22)
+#define XILINX_PCIE_INTR_SLV_ERRP BIT(23)
+#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24)
+#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25)
+#define XILINX_PCIE_INTR_MST_DECERR BIT(26)
+#define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
+#define XILINX_PCIE_INTR_MST_ERRP BIT(28)
+#define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED
+#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
+
+/* Root Port Error FIFO Read Register definitions */
+#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18)
+#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
+#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
+
+/* Root Port Interrupt FIFO Read Register 1 definitions */
+#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31)
+#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30)
+#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27)
+#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF
+#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27
+
+/* Bridge Info Register definitions */
+#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16)
+#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16
+
+/* Root Port Interrupt FIFO Read Register 2 definitions */
+#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0)
+
+/* Root Port Status/control Register definitions */
+#define XILINX_PCIE_REG_RPSC_BEN BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
+
+/* ECAM definitions */
+#define ECAM_BUS_NUM_SHIFT 20
+#define ECAM_DEV_NUM_SHIFT 12
+
+/* Number of MSI IRQs */
+#define XILINX_NUM_MSI_IRQS 128
+
+/* Number of Memory Resources */
+#define XILINX_MAX_NUM_RESOURCES 3
+
+/**
+ * struct xilinx_pcie_port - PCIe port information
+ * @reg_base: IO Mapped Register Base
+ * @irq: Interrupt number
+ * @msi_pages: MSI pages
+ * @root_busno: Root Bus number
+ * @dev: Device pointer
+ * @irq_domain: IRQ domain pointer
+ * @bus_range: Bus range
+ * @resources: Bus Resources
+ */
+struct xilinx_pcie_port {
+ void __iomem *reg_base;
+ u32 irq;
+ unsigned long msi_pages;
+ u8 root_busno;
+ struct device *dev;
+ struct irq_domain *irq_domain;
+ struct resource bus_range;
+ struct list_head resources;
+};
+
+static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
+
+static inline struct xilinx_pcie_port *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
+{
+ return readl(port->reg_base + reg);
+}
+
+static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
+{
+ writel(val, port->reg_base + reg);
+}
+
+static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
+{
+ return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
+ XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
+}
+
+/**
+ * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
+{
+ u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
+
+ if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
+ dev_dbg(port->dev, "Requester ID %d\n",
+ val & XILINX_PCIE_RPEFR_REQ_ID);
+ pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
+ XILINX_PCIE_REG_RPEFR);
+ }
+}
+
+/**
+ * xilinx_pcie_valid_device - Check if a valid device is present on bus
+ * @bus: PCI Bus structure
+ * @devfn: device/function
+ *
+ * Return: 'true' on success and 'false' if invalid device is found
+ */
+static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
+
+ /* Check if link is up when trying to access downstream ports */
+ if (bus->number != port->root_busno)
+ if (!xilinx_pcie_link_is_up(port))
+ return false;
+
+ /* Only one device down on each root port */
+ if (bus->number == port->root_busno && devfn > 0)
+ return false;
+
+ /*
+ * Do not read more than one device on the bus directly attached
+ * to RC.
+ */
+ if (bus->primary == port->root_busno && devfn > 0)
+ return false;
+
+ return true;
+}
+
+/**
+ * xilinx_pcie_config_base - Get configuration base
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ *
+ * Return: Base address of the configuration space needed to be
+ * accessed.
+ */
+static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
+ int relbus;
+
+ relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
+ (devfn << ECAM_DEV_NUM_SHIFT);
+
+ return port->reg_base + relbus + where;
+}
+
+/**
+ * xilinx_pcie_read_config - Read configuration space
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ * @size: Byte/word/dword
+ * @val: Value to be read
+ *
+ * Return: PCIBIOS_SUCCESSFUL on success
+ * PCIBIOS_DEVICE_NOT_FOUND on failure
+ */
+static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr;
+
+ if (!xilinx_pcie_valid_device(bus, devfn)) {
+ *val = 0xFFFFFFFF;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ addr = xilinx_pcie_config_base(bus, devfn, where);
+
+ switch (size) {
+ case 1:
+ *val = readb(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ default:
+ *val = readl(addr);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/**
+ * xilinx_pcie_write_config - Write configuration space
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ * @size: Byte/word/dword
+ * @val: Value to be written to device
+ *
+ * Return: PCIBIOS_SUCCESSFUL on success
+ * PCIBIOS_DEVICE_NOT_FOUND on failure
+ */
+static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ void __iomem *addr;
+
+ if (!xilinx_pcie_valid_device(bus, devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ addr = xilinx_pcie_config_base(bus, devfn, where);
+
+ switch (size) {
+ case 1:
+ writeb(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ default:
+ writel(val, addr);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* PCIe operations */
+static struct pci_ops xilinx_pcie_ops = {
+ .read = xilinx_pcie_read_config,
+ .write = xilinx_pcie_write_config,
+};
+
+/* MSI functions */
+
+/**
+ * xilinx_pcie_destroy_msi - Free MSI number
+ * @irq: IRQ to be freed
+ */
+static void xilinx_pcie_destroy_msi(unsigned int irq)
+{
+ struct irq_desc *desc;
+ struct msi_desc *msi;
+ struct xilinx_pcie_port *port;
+
+ desc = irq_to_desc(irq);
+ msi = irq_desc_get_msi_desc(desc);
+ port = sys_to_pcie(msi->dev->bus->sysdata);
+
+ if (!test_bit(irq, msi_irq_in_use))
+ dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
+ else
+ clear_bit(irq, msi_irq_in_use);
+}
+
+/**
+ * xilinx_pcie_assign_msi - Allocate MSI number
+ * @port: PCIe port structure
+ *
+ * Return: A valid IRQ on success and error value on failure.
+ */
+static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
+{
+ int pos;
+
+ pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
+ if (pos < XILINX_NUM_MSI_IRQS)
+ set_bit(pos, msi_irq_in_use);
+ else
+ return -ENOSPC;
+
+ return pos;
+}
+
+/**
+ * xilinx_msi_teardown_irq - Destroy the MSI
+ * @chip: MSI Chip descriptor
+ * @irq: MSI IRQ to destroy
+ */
+static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
+{
+ xilinx_pcie_destroy_msi(irq);
+}
+
+/**
+ * xilinx_pcie_msi_setup_irq - Setup MSI request
+ * @chip: MSI chip pointer
+ * @pdev: PCIe device pointer
+ * @desc: MSI descriptor pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip,
+ struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct xilinx_pcie_port *port = sys_to_pcie(pdev->bus->sysdata);
+ unsigned int irq;
+ int hwirq;
+ struct msi_msg msg;
+ phys_addr_t msg_addr;
+
+ hwirq = xilinx_pcie_assign_msi(port);
+ if (hwirq < 0)
+ return hwirq;
+
+ irq = irq_create_mapping(port->irq_domain, hwirq);
+ if (!irq)
+ return -EINVAL;
+
+ irq_set_msi_desc(irq, desc);
+
+ msg_addr = virt_to_phys((void *)port->msi_pages);
+
+ msg.address_hi = 0;
+ msg.address_lo = msg_addr;
+ msg.data = irq;
+
+ write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+/* MSI Chip Descriptor */
+static struct msi_chip xilinx_pcie_msi_chip = {
+ .setup_irq = xilinx_pcie_msi_setup_irq,
+ .teardown_irq = xilinx_msi_teardown_irq,
+};
+
+/* HW Interrupt Chip Descriptor */
+static struct irq_chip xilinx_msi_irq_chip = {
+ .name = "Xilinx PCIe MSI",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+/**
+ * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+/* IRQ Domain operations */
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = xilinx_pcie_msi_map,
+};
+
+/**
+ * xilinx_pcie_enable_msi - Enable MSI support
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+{
+ phys_addr_t msg_addr;
+
+ port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
+ msg_addr = virt_to_phys((void *)port->msi_pages);
+ pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
+ pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
+}
+
+/**
+ * xilinx_pcie_add_bus - Add MSI chip info to PCIe bus
+ * @bus: PCIe bus
+ */
+static void xilinx_pcie_add_bus(struct pci_bus *bus)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
+
+ xilinx_pcie_msi_chip.dev = port->dev;
+ bus->msi = &xilinx_pcie_msi_chip;
+ }
+}
+
+/* INTx Functions */
+
+/**
+ * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = xilinx_pcie_intx_map,
+};
+
+/* PCIe HW Functions */
+
+/**
+ * xilinx_pcie_intr_handler - Interrupt Service Handler
+ * @irq: IRQ number
+ * @data: PCIe port information
+ *
+ * Return: IRQ_HANDLED on success and IRQ_NONE on failure
+ */
+static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
+{
+ struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
+ u32 val, mask, status, msi_data;
+
+ /* Read interrupt decode and mask registers */
+ val = pcie_read(port, XILINX_PCIE_REG_IDR);
+ mask = pcie_read(port, XILINX_PCIE_REG_IMR);
+
+ status = val & mask;
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XILINX_PCIE_INTR_LINK_DOWN)
+ dev_warn(port->dev, "Link Down\n");
+
+ if (status & XILINX_PCIE_INTR_ECRC_ERR)
+ dev_warn(port->dev, "ECRC failed\n");
+
+ if (status & XILINX_PCIE_INTR_STR_ERR)
+ dev_warn(port->dev, "Streaming error\n");
+
+ if (status & XILINX_PCIE_INTR_HOT_RESET)
+ dev_info(port->dev, "Hot reset\n");
+
+ if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
+ dev_warn(port->dev, "ECAM access timeout\n");
+
+ if (status & XILINX_PCIE_INTR_CORRECTABLE) {
+ dev_warn(port->dev, "Correctable error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_NONFATAL) {
+ dev_warn(port->dev, "Non fatal error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_FATAL) {
+ dev_warn(port->dev, "Fatal error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_INTX) {
+ /* INTx interrupt received */
+ val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
+
+ /* Check whether interrupt valid */
+ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
+ dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+ return IRQ_HANDLED;
+ }
+
+ /* Clear interrupt FIFO register 1 */
+ pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+ XILINX_PCIE_REG_RPIFR1);
+
+ /* Handle INTx Interrupt */
+ val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
+ XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
+ generic_handle_irq(irq_find_mapping(port->irq_domain, val));
+ }
+
+ if (status & XILINX_PCIE_INTR_MSI) {
+ /* MSI Interrupt */
+ val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
+
+ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
+ dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+ return IRQ_HANDLED;
+ }
+
+ if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
+ msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
+ XILINX_PCIE_RPIFR2_MSG_DATA;
+
+ /* Clear interrupt FIFO register 1 */
+ pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+ XILINX_PCIE_REG_RPIFR1);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ /* Handle MSI Interrupt */
+ generic_handle_irq(msi_data);
+ }
+ }
+ }
+
+ if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
+ dev_warn(port->dev, "Slave unsupported request\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_UNEXP)
+ dev_warn(port->dev, "Slave unexpected completion\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_COMPL)
+ dev_warn(port->dev, "Slave completion timeout\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_ERRP)
+ dev_warn(port->dev, "Slave Error Poison\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_CMPABT)
+ dev_warn(port->dev, "Slave Completer Abort\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
+ dev_warn(port->dev, "Slave Illegal Burst\n");
+
+ if (status & XILINX_PCIE_INTR_MST_DECERR)
+ dev_warn(port->dev, "Master decode error\n");
+
+ if (status & XILINX_PCIE_INTR_MST_SLVERR)
+ dev_warn(port->dev, "Master slave error\n");
+
+ if (status & XILINX_PCIE_INTR_MST_ERRP)
+ dev_warn(port->dev, "Master error poison\n");
+
+ /* Clear the Interrupt Decode register */
+ pcie_write(port, status, XILINX_PCIE_REG_IDR);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_pcie_free_irq_domain - Free IRQ domain
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port)
+{
+ int i;
+ u32 irq, num_irqs;
+
+ /* Free IRQ Domain */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+
+ free_pages(port->msi_pages, 0);
+
+ num_irqs = XILINX_NUM_MSI_IRQS;
+ } else {
+ /* INTx */
+ num_irqs = 4;
+ }
+
+ for (i = 0; i < num_irqs; i++) {
+ irq = irq_find_mapping(port->irq_domain, i);
+ if (irq > 0)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(port->irq_domain);
+}
+
+/**
+ * xilinx_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return PTR_ERR(pcie_intc_node);
+ }
+
+ port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
+ &intx_domain_ops,
+ port);
+ if (!port->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return PTR_ERR(port->irq_domain);
+ }
+
+ /* Setup MSI */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ port->irq_domain = irq_domain_add_linear(node,
+ XILINX_NUM_MSI_IRQS,
+ &msi_domain_ops,
+ &xilinx_pcie_msi_chip);
+ if (!port->irq_domain) {
+ dev_err(dev, "Failed to get a MSI IRQ domain\n");
+ return PTR_ERR(port->irq_domain);
+ }
+
+ xilinx_pcie_enable_msi(port);
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_init_port - Initialize hardware
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
+{
+ if (xilinx_pcie_link_is_up(port))
+ dev_info(port->dev, "PCIe Link is UP\n");
+ else
+ dev_info(port->dev, "PCIe Link is DOWN\n");
+
+ /* Disable all interrupts */
+ pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
+ XILINX_PCIE_REG_IMR);
+
+ /* Clear pending interrupts */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
+ XILINX_PCIE_IMR_ALL_MASK,
+ XILINX_PCIE_REG_IDR);
+
+ /* Enable all interrupts */
+ pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR);
+
+ /* Enable the Bridge enable bit */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
+ XILINX_PCIE_REG_RPSC_BEN,
+ XILINX_PCIE_REG_RPSC);
+}
+
+/**
+ * xilinx_pcie_setup - Setup memory resources
+ * @nr: Bus number
+ * @sys: Per controller structure
+ *
+ * Return: '1' on success and error value on failure
+ */
+static int xilinx_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct xilinx_pcie_port *port = sys_to_pcie(sys);
+
+ list_splice_init(&port->resources, &sys->resources);
+
+ return 1;
+}
+
+/**
+ * xilinx_pcie_scan_bus - Scan PCIe bus for devices
+ * @nr: Bus number
+ * @sys: Per controller structure
+ *
+ * Return: Valid Bus pointer on success and NULL on failure
+ */
+static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+{
+ struct xilinx_pcie_port *port = sys_to_pcie(sys);
+ struct pci_bus *bus;
+
+ port->root_busno = sys->busnr;
+ bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops,
+ sys, &sys->resources);
+
+ return bus;
+}
+
+/**
+ * xilinx_pcie_parse_and_add_res - Add resources by parsing ranges
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *mem;
+ resource_size_t offset;
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ struct pci_host_bridge_window *win;
+ int err = 0, mem_resno = 0;
+
+ /* Get the ranges */
+ if (of_pci_range_parser_init(&parser, node)) {
+ dev_err(dev, "missing \"ranges\" property\n");
+ return -EINVAL;
+ }
+
+ /* Parse the ranges and add the resources found to the list */
+ for_each_of_pci_range(&parser, &range) {
+
+ if (mem_resno >= XILINX_MAX_NUM_RESOURCES) {
+ dev_err(dev, "Maximum memory resources exceeded\n");
+ return -EINVAL;
+ }
+
+ mem = devm_kmalloc(dev, sizeof(*mem), GFP_KERNEL);
+ if (!mem) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+
+ of_pci_range_to_resource(&range, node, mem);
+
+ switch (mem->flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_MEM:
+ offset = range.cpu_addr - range.pci_addr;
+ mem_resno++;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err < 0) {
+ dev_warn(dev, "Invalid resource found %pR\n", mem);
+ continue;
+ }
+
+ err = request_resource(&iomem_resource, mem);
+ if (err)
+ goto free_resources;
+
+ pci_add_resource_offset(&port->resources, mem, offset);
+ }
+
+ /* Get the bus range */
+ if (of_pci_parse_bus_range(node, &port->bus_range)) {
+ u32 val = pcie_read(port, XILINX_PCIE_REG_BIR);
+ u8 last;
+
+ last = (val & XILINX_PCIE_BIR_ECAM_SZ_MASK) >>
+ XILINX_PCIE_BIR_ECAM_SZ_SHIFT;
+
+ port->bus_range = (struct resource) {
+ .name = node->name,
+ .start = 0,
+ .end = last,
+ .flags = IORESOURCE_BUS,
+ };
+ }
+
+ /* Register bus resource */
+ pci_add_resource(&port->resources, &port->bus_range);
+
+ return 0;
+
+free_resources:
+ release_child_resources(&iomem_resource);
+ list_for_each_entry(win, &port->resources, list)
+ devm_kfree(dev, win->res);
+ pci_free_resource_list(&port->resources);
+
+ return err;
+}
+
+/**
+ * xilinx_pcie_parse_dt - Parse Device tree
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct resource regs;
+ const char *type;
+ int err;
+
+ type = of_get_property(node, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ err = of_address_to_resource(node, 0, &regs);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+ return err;
+ }
+
+ port->reg_base = devm_ioremap_resource(dev, &regs);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+
+ port->irq = irq_of_parse_and_map(node, 0);
+ err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
+ IRQF_SHARED, "xilinx-pcie", port);
+ if (err) {
+ dev_err(dev, "unable to request irq %d\n", port->irq);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_probe - Probe function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_probe(struct platform_device *pdev)
+{
+ struct xilinx_pcie_port *port;
+ struct hw_pci hw;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->dev = dev;
+
+ err = xilinx_pcie_parse_dt(port);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ return err;
+ }
+
+ xilinx_pcie_init_port(port);
+
+ err = xilinx_pcie_init_irq_domain(port);
+ if (err) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return err;
+ }
+
+ /*
+ * Parse PCI ranges, configuration bus range and
+ * request their resources
+ */
+ INIT_LIST_HEAD(&port->resources);
+ err = xilinx_pcie_parse_and_add_res(port);
+ if (err) {
+ dev_err(dev, "Failed adding resources\n");
+ return err;
+ }
+
+ platform_set_drvdata(pdev, port);
+
+ /* Register the device */
+ memset(&hw, 0, sizeof(hw));
+ hw = (struct hw_pci) {
+ .nr_controllers = 1,
+ .private_data = (void **)&port,
+ .setup = xilinx_pcie_setup,
+ .map_irq = of_irq_parse_and_map_pci,
+ .add_bus = xilinx_pcie_add_bus,
+ .scan = xilinx_pcie_scan_bus,
+ .ops = &xilinx_pcie_ops,
+ };
+ pci_common_init_dev(dev, &hw);
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_remove - Remove function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' always
+ */
+static int xilinx_pcie_remove(struct platform_device *pdev)
+{
+ struct xilinx_pcie_port *port = platform_get_drvdata(pdev);
+
+ xilinx_pcie_free_irq_domain(port);
+
+ return 0;
+}
+
+static struct of_device_id xilinx_pcie_of_match[] = {
+ { .compatible = "xlnx,axi-pcie-host-1.00.a", },
+ {}
+};
+
+static struct platform_driver xilinx_pcie_driver = {
+ .driver = {
+ .name = "xilinx-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = xilinx_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xilinx_pcie_probe,
+ .remove = xilinx_pcie_remove,
+};
+module_platform_driver(xilinx_pcie_driver);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx AXI PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 3e6532b945c1..4a9aa08b08f1 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o
obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o
-pci_hotplug-objs := pci_hotplug_core.o pcihp_slot.o
+pci_hotplug-objs := pci_hotplug_core.o
ifdef CONFIG_HOTPLUG_PCI_CPCI
pci_hotplug-objs += cpci_hotplug_core.o \
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index a94d850ae228..876ccc620440 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -46,215 +46,6 @@
static bool debug_acpi;
-static acpi_status
-decode_type0_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
-{
- int i;
- union acpi_object *fields = record->package.elements;
- u32 revision = fields[1].integer.value;
-
- switch (revision) {
- case 1:
- if (record->package.count != 6)
- return AE_ERROR;
- for (i = 2; i < 6; i++)
- if (fields[i].type != ACPI_TYPE_INTEGER)
- return AE_ERROR;
- hpx->t0 = &hpx->type0_data;
- hpx->t0->revision = revision;
- hpx->t0->cache_line_size = fields[2].integer.value;
- hpx->t0->latency_timer = fields[3].integer.value;
- hpx->t0->enable_serr = fields[4].integer.value;
- hpx->t0->enable_perr = fields[5].integer.value;
- break;
- default:
- printk(KERN_WARNING
- "%s: Type 0 Revision %d record not supported\n",
- __func__, revision);
- return AE_ERROR;
- }
- return AE_OK;
-}
-
-static acpi_status
-decode_type1_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
-{
- int i;
- union acpi_object *fields = record->package.elements;
- u32 revision = fields[1].integer.value;
-
- switch (revision) {
- case 1:
- if (record->package.count != 5)
- return AE_ERROR;
- for (i = 2; i < 5; i++)
- if (fields[i].type != ACPI_TYPE_INTEGER)
- return AE_ERROR;
- hpx->t1 = &hpx->type1_data;
- hpx->t1->revision = revision;
- hpx->t1->max_mem_read = fields[2].integer.value;
- hpx->t1->avg_max_split = fields[3].integer.value;
- hpx->t1->tot_max_split = fields[4].integer.value;
- break;
- default:
- printk(KERN_WARNING
- "%s: Type 1 Revision %d record not supported\n",
- __func__, revision);
- return AE_ERROR;
- }
- return AE_OK;
-}
-
-static acpi_status
-decode_type2_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
-{
- int i;
- union acpi_object *fields = record->package.elements;
- u32 revision = fields[1].integer.value;
-
- switch (revision) {
- case 1:
- if (record->package.count != 18)
- return AE_ERROR;
- for (i = 2; i < 18; i++)
- if (fields[i].type != ACPI_TYPE_INTEGER)
- return AE_ERROR;
- hpx->t2 = &hpx->type2_data;
- hpx->t2->revision = revision;
- hpx->t2->unc_err_mask_and = fields[2].integer.value;
- hpx->t2->unc_err_mask_or = fields[3].integer.value;
- hpx->t2->unc_err_sever_and = fields[4].integer.value;
- hpx->t2->unc_err_sever_or = fields[5].integer.value;
- hpx->t2->cor_err_mask_and = fields[6].integer.value;
- hpx->t2->cor_err_mask_or = fields[7].integer.value;
- hpx->t2->adv_err_cap_and = fields[8].integer.value;
- hpx->t2->adv_err_cap_or = fields[9].integer.value;
- hpx->t2->pci_exp_devctl_and = fields[10].integer.value;
- hpx->t2->pci_exp_devctl_or = fields[11].integer.value;
- hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value;
- hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value;
- hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
- hpx->t2->sec_unc_err_sever_or = fields[15].integer.value;
- hpx->t2->sec_unc_err_mask_and = fields[16].integer.value;
- hpx->t2->sec_unc_err_mask_or = fields[17].integer.value;
- break;
- default:
- printk(KERN_WARNING
- "%s: Type 2 Revision %d record not supported\n",
- __func__, revision);
- return AE_ERROR;
- }
- return AE_OK;
-}
-
-static acpi_status
-acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
-{
- acpi_status status;
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- union acpi_object *package, *record, *fields;
- u32 type;
- int i;
-
- /* Clear the return buffer with zeros */
- memset(hpx, 0, sizeof(struct hotplug_params));
-
- status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
- if (ACPI_FAILURE(status))
- return status;
-
- package = (union acpi_object *)buffer.pointer;
- if (package->type != ACPI_TYPE_PACKAGE) {
- status = AE_ERROR;
- goto exit;
- }
-
- for (i = 0; i < package->package.count; i++) {
- record = &package->package.elements[i];
- if (record->type != ACPI_TYPE_PACKAGE) {
- status = AE_ERROR;
- goto exit;
- }
-
- fields = record->package.elements;
- if (fields[0].type != ACPI_TYPE_INTEGER ||
- fields[1].type != ACPI_TYPE_INTEGER) {
- status = AE_ERROR;
- goto exit;
- }
-
- type = fields[0].integer.value;
- switch (type) {
- case 0:
- status = decode_type0_hpx_record(record, hpx);
- if (ACPI_FAILURE(status))
- goto exit;
- break;
- case 1:
- status = decode_type1_hpx_record(record, hpx);
- if (ACPI_FAILURE(status))
- goto exit;
- break;
- case 2:
- status = decode_type2_hpx_record(record, hpx);
- if (ACPI_FAILURE(status))
- goto exit;
- break;
- default:
- printk(KERN_ERR "%s: Type %d record not supported\n",
- __func__, type);
- status = AE_ERROR;
- goto exit;
- }
- }
- exit:
- kfree(buffer.pointer);
- return status;
-}
-
-static acpi_status
-acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
-{
- acpi_status status;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *package, *fields;
- int i;
-
- memset(hpp, 0, sizeof(struct hotplug_params));
-
- status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
- if (ACPI_FAILURE(status))
- return status;
-
- package = (union acpi_object *) buffer.pointer;
- if (package->type != ACPI_TYPE_PACKAGE ||
- package->package.count != 4) {
- status = AE_ERROR;
- goto exit;
- }
-
- fields = package->package.elements;
- for (i = 0; i < 4; i++) {
- if (fields[i].type != ACPI_TYPE_INTEGER) {
- status = AE_ERROR;
- goto exit;
- }
- }
-
- hpp->t0 = &hpp->type0_data;
- hpp->t0->revision = 1;
- hpp->t0->cache_line_size = fields[0].integer.value;
- hpp->t0->latency_timer = fields[1].integer.value;
- hpp->t0->enable_serr = fields[2].integer.value;
- hpp->t0->enable_perr = fields[3].integer.value;
-
-exit:
- kfree(buffer.pointer);
- return status;
-}
-
-
-
/* acpi_run_oshp - get control of hotplug from the firmware
*
* @handle - the handle of the hotplug controller.
@@ -283,48 +74,6 @@ static acpi_status acpi_run_oshp(acpi_handle handle)
return status;
}
-/* pci_get_hp_params
- *
- * @dev - the pci_dev for which we want parameters
- * @hpp - allocated by the caller
- */
-int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
-{
- acpi_status status;
- acpi_handle handle, phandle;
- struct pci_bus *pbus;
-
- handle = NULL;
- for (pbus = dev->bus; pbus; pbus = pbus->parent) {
- handle = acpi_pci_get_bridge_handle(pbus);
- if (handle)
- break;
- }
-
- /*
- * _HPP settings apply to all child buses, until another _HPP is
- * encountered. If we don't find an _HPP for the input pci dev,
- * look for it in the parent device scope since that would apply to
- * this pci dev.
- */
- while (handle) {
- status = acpi_run_hpx(handle, hpp);
- if (ACPI_SUCCESS(status))
- return 0;
- status = acpi_run_hpp(handle, hpp);
- if (ACPI_SUCCESS(status))
- return 0;
- if (acpi_is_root_bridge(handle))
- break;
- status = acpi_get_parent(handle, &phandle);
- if (ACPI_FAILURE(status))
- break;
- handle = phandle;
- }
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(pci_get_hp_params);
-
/**
* acpi_get_hp_hw_control_from_firmware
* @dev: the pci_dev of the bridge that has a hotplug controller
@@ -433,7 +182,8 @@ int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle)
{
acpi_handle bridge_handle, parent_handle;
- if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus)))
+ bridge_handle = acpi_pci_get_bridge_handle(pbus);
+ if (!bridge_handle)
return 0;
if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle))))
return 0;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 6cd5160fc057..bcb90e4888dd 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -61,7 +61,6 @@ static DEFINE_MUTEX(bridge_mutex);
static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type);
static void acpiphp_post_dock_fixup(struct acpi_device *adev);
static void acpiphp_sanitize_bus(struct pci_bus *bus);
-static void acpiphp_set_hpp_values(struct pci_bus *bus);
static void hotplug_event(u32 type, struct acpiphp_context *context);
static void free_bridge(struct kref *kref);
@@ -510,7 +509,7 @@ static void enable_slot(struct acpiphp_slot *slot)
__pci_bus_assign_resources(bus, &add_list, NULL);
acpiphp_sanitize_bus(bus);
- acpiphp_set_hpp_values(bus);
+ pcie_bus_configure_settings(bus);
acpiphp_set_acpi_region(slot);
list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -698,14 +697,6 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
}
}
-static void acpiphp_set_hpp_values(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list)
- pci_configure_slot(dev);
-}
-
/*
* Remove devices for which we could not assign resources, call
* arch specific code to fix-up the bus
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 8dcccffd6e21..6ca23998ee8f 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -302,7 +302,7 @@ static int ibm_get_table_from_acpi(char **bufp)
goto read_table_done;
}
- for(size = 0, i = 0; i < package->package.count; i++) {
+ for (size = 0, i = 0; i < package->package.count; i++) {
if (package->package.elements[i].type != ACPI_TYPE_BUFFER) {
pr_err("%s: Invalid APCI element %d\n", __func__, i);
goto read_table_done;
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index e09cf7827d68..a5a7fd8332ac 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -125,7 +125,8 @@ disable_slot(struct hotplug_slot *hotplug_slot)
/* Unconfigure device */
dbg("%s - unconfiguring slot %s", __func__, slot_name(slot));
- if ((retval = cpci_unconfigure_slot(slot))) {
+ retval = cpci_unconfigure_slot(slot);
+ if (retval) {
err("%s - could not unconfigure slot %s",
__func__, slot_name(slot));
goto disable_error;
@@ -141,9 +142,11 @@ disable_slot(struct hotplug_slot *hotplug_slot)
}
cpci_led_on(slot);
- if (controller->ops->set_power)
- if ((retval = controller->ops->set_power(slot, 0)))
+ if (controller->ops->set_power) {
+ retval = controller->ops->set_power(slot, 0);
+ if (retval)
goto disable_error;
+ }
if (update_adapter_status(slot->hotplug_slot, 0))
warn("failure to update adapter file");
@@ -467,9 +470,9 @@ check_slots(void)
__func__, slot_name(slot), hs_csr);
if (!slot->extracting) {
- if (update_latch_status(slot->hotplug_slot, 0)) {
+ if (update_latch_status(slot->hotplug_slot, 0))
warn("failure to update latch file");
- }
+
slot->extracting = 1;
atomic_inc(&extracting);
}
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
index 04fcd7811400..66b7bbebe493 100644
--- a/drivers/pci/hotplug/cpcihp_generic.c
+++ b/drivers/pci/hotplug/cpcihp_generic.c
@@ -56,7 +56,7 @@
if (debug) \
printk (KERN_DEBUG "%s: " format "\n", \
MY_NAME , ## arg); \
- } while(0)
+ } while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
@@ -82,28 +82,28 @@ static int __init validate_parameters(void)
char *p;
unsigned long tmp;
- if(!bridge) {
+ if (!bridge) {
info("not configured, disabling.");
return -EINVAL;
}
str = bridge;
- if(!*str)
+ if (!*str)
return -EINVAL;
tmp = simple_strtoul(str, &p, 16);
- if(p == str || tmp > 0xff) {
+ if (p == str || tmp > 0xff) {
err("Invalid hotplug bus bridge device bus number");
return -EINVAL;
}
bridge_busnr = (u8) tmp;
dbg("bridge_busnr = 0x%02x", bridge_busnr);
- if(*p != ':') {
+ if (*p != ':') {
err("Invalid hotplug bus bridge device");
return -EINVAL;
}
str = p + 1;
tmp = simple_strtoul(str, &p, 16);
- if(p == str || tmp > 0x1f) {
+ if (p == str || tmp > 0x1f) {
err("Invalid hotplug bus bridge device slot number");
return -EINVAL;
}
@@ -112,18 +112,18 @@ static int __init validate_parameters(void)
dbg("first_slot = 0x%02x", first_slot);
dbg("last_slot = 0x%02x", last_slot);
- if(!(first_slot && last_slot)) {
+ if (!(first_slot && last_slot)) {
err("Need to specify first_slot and last_slot");
return -EINVAL;
}
- if(last_slot < first_slot) {
+ if (last_slot < first_slot) {
err("first_slot must be less than last_slot");
return -EINVAL;
}
dbg("port = 0x%04x", port);
dbg("enum_bit = 0x%02x", enum_bit);
- if(enum_bit > 7) {
+ if (enum_bit > 7) {
err("Invalid #ENUM bit");
return -EINVAL;
}
@@ -151,12 +151,12 @@ static int __init cpcihp_generic_init(void)
return status;
r = request_region(port, 1, "#ENUM hotswap signal register");
- if(!r)
+ if (!r)
return -EBUSY;
dev = pci_get_domain_bus_and_slot(0, bridge_busnr,
PCI_DEVFN(bridge_slot, 0));
- if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+ if (!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
err("Invalid bridge device %s", bridge);
pci_dev_put(dev);
return -EINVAL;
@@ -169,21 +169,21 @@ static int __init cpcihp_generic_init(void)
generic_hpc.ops = &generic_hpc_ops;
status = cpci_hp_register_controller(&generic_hpc);
- if(status != 0) {
+ if (status != 0) {
err("Could not register cPCI hotplug controller");
return -ENODEV;
}
dbg("registered controller");
status = cpci_hp_register_bus(bus, first_slot, last_slot);
- if(status != 0) {
+ if (status != 0) {
err("Could not register cPCI hotplug bus");
goto init_bus_register_error;
}
dbg("registered bus");
status = cpci_hp_start();
- if(status != 0) {
+ if (status != 0) {
err("Could not started cPCI hotplug system");
goto init_start_error;
}
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index 6757b3ef7e10..7ecf34e76a61 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -51,7 +51,7 @@
if (debug) \
printk (KERN_DEBUG "%s: " format "\n", \
MY_NAME , ## arg); \
- } while(0)
+ } while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
@@ -82,13 +82,13 @@ static int zt5550_hc_config(struct pci_dev *pdev)
int ret;
/* Since we know that no boards exist with two HC chips, treat it as an error */
- if(hc_dev) {
+ if (hc_dev) {
err("too many host controller devices?");
return -EBUSY;
}
ret = pci_enable_device(pdev);
- if(ret) {
+ if (ret) {
err("cannot enable %s\n", pci_name(pdev));
return ret;
}
@@ -98,7 +98,7 @@ static int zt5550_hc_config(struct pci_dev *pdev)
dbg("pci resource start %llx", (unsigned long long)pci_resource_start(hc_dev, 1));
dbg("pci resource len %llx", (unsigned long long)pci_resource_len(hc_dev, 1));
- if(!request_mem_region(pci_resource_start(hc_dev, 1),
+ if (!request_mem_region(pci_resource_start(hc_dev, 1),
pci_resource_len(hc_dev, 1), MY_NAME)) {
err("cannot reserve MMIO region");
ret = -ENOMEM;
@@ -107,7 +107,7 @@ static int zt5550_hc_config(struct pci_dev *pdev)
hc_registers =
ioremap(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1));
- if(!hc_registers) {
+ if (!hc_registers) {
err("cannot remap MMIO region %llx @ %llx",
(unsigned long long)pci_resource_len(hc_dev, 1),
(unsigned long long)pci_resource_start(hc_dev, 1));
@@ -146,7 +146,7 @@ exit_disable_device:
static int zt5550_hc_cleanup(void)
{
- if(!hc_dev)
+ if (!hc_dev)
return -ENODEV;
iounmap(hc_registers);
@@ -170,9 +170,9 @@ static int zt5550_hc_check_irq(void *dev_id)
u8 reg;
ret = 0;
- if(dev_id == zt5550_hpc.dev_id) {
+ if (dev_id == zt5550_hpc.dev_id) {
reg = readb(csr_int_status);
- if(reg)
+ if (reg)
ret = 1;
}
return ret;
@@ -182,9 +182,9 @@ static int zt5550_hc_enable_irq(void)
{
u8 reg;
- if(hc_dev == NULL) {
+ if (hc_dev == NULL)
return -ENODEV;
- }
+
reg = readb(csr_int_mask);
reg = reg & ~ENUM_INT_MASK;
writeb(reg, csr_int_mask);
@@ -195,9 +195,8 @@ static int zt5550_hc_disable_irq(void)
{
u8 reg;
- if(hc_dev == NULL) {
+ if (hc_dev == NULL)
return -ENODEV;
- }
reg = readb(csr_int_mask);
reg = reg | ENUM_INT_MASK;
@@ -210,15 +209,15 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
int status;
status = zt5550_hc_config(pdev);
- if(status != 0) {
+ if (status != 0)
return status;
- }
+
dbg("returned from zt5550_hc_config");
memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
zt5550_hpc.ops = &zt5550_hpc_ops;
- if(!poll) {
+ if (!poll) {
zt5550_hpc.irq = hc_dev->irq;
zt5550_hpc.irq_flags = IRQF_SHARED;
zt5550_hpc.dev_id = hc_dev;
@@ -231,15 +230,16 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
}
status = cpci_hp_register_controller(&zt5550_hpc);
- if(status != 0) {
+ if (status != 0) {
err("could not register cPCI hotplug controller");
goto init_hc_error;
}
dbg("registered controller");
/* Look for first device matching cPCI bus's bridge vendor and device IDs */
- if(!(bus0_dev = pci_get_device(PCI_VENDOR_ID_DEC,
- PCI_DEVICE_ID_DEC_21154, NULL))) {
+ bus0_dev = pci_get_device(PCI_VENDOR_ID_DEC,
+ PCI_DEVICE_ID_DEC_21154, NULL);
+ if (!bus0_dev) {
status = -ENODEV;
goto init_register_error;
}
@@ -247,14 +247,14 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
pci_dev_put(bus0_dev);
status = cpci_hp_register_bus(bus0, 0x0a, 0x0f);
- if(status != 0) {
+ if (status != 0) {
err("could not register cPCI hotplug bus");
goto init_register_error;
}
dbg("registered bus");
status = cpci_hp_start();
- if(status != 0) {
+ if (status != 0) {
err("could not started cPCI hotplug system");
cpci_hp_unregister_bus(bus0);
goto init_register_error;
@@ -300,11 +300,11 @@ static int __init zt5550_init(void)
info(DRIVER_DESC " version: " DRIVER_VERSION);
r = request_region(ENUM_PORT, 1, "#ENUM hotswap signal register");
- if(!r)
+ if (!r)
return -EBUSY;
rc = pci_register_driver(&zt5550_hc_driver);
- if(rc < 0)
+ if (rc < 0)
release_region(ENUM_PORT, 1);
return rc;
}
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 0450f405807d..b28b2d2184cd 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -690,7 +690,7 @@ static inline int cpq_get_latch_status(struct controller *ctrl,
status = (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot));
- return(status == 0) ? 1 : 0;
+ return (status == 0) ? 1 : 0;
}
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 4aaee746df88..a53084ddc118 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -1096,9 +1096,8 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* initialize our threads if they haven't already been started up */
rc = one_time_init();
- if (rc) {
+ if (rc)
goto err_free_bus;
- }
dbg("pdev = %p\n", pdev);
dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index bde47fce3248..c5cbefee5236 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -705,9 +705,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
if (temp == max) {
*head = max->next;
} else {
- while (temp && temp->next != max) {
+ while (temp && temp->next != max)
temp = temp->next;
- }
if (temp)
temp->next = max->next;
@@ -903,9 +902,8 @@ irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
/*
* Check to see if it was our interrupt
*/
- if (!(misc & 0x000C)) {
+ if (!(misc & 0x000C))
return IRQ_NONE;
- }
if (misc & 0x0004) {
/*
@@ -1143,7 +1141,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
/* We don't allow freq/mode changes if we find another adapter running
* in another slot on this controller
*/
- for(slot = ctrl->slot; slot; slot = slot->next) {
+ for (slot = ctrl->slot; slot; slot = slot->next) {
if (slot->device == (hp_slot + ctrl->slot_device_offset))
continue;
if (!slot->hotplug_slot || !slot->hotplug_slot->info)
@@ -1193,7 +1191,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
reg16 &= ~0x000F;
- switch(adapter_speed) {
+ switch (adapter_speed) {
case(PCI_SPEED_133MHz_PCIX):
reg = 0x75;
reg16 |= 0xB;
@@ -2006,9 +2004,8 @@ int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
/* Check to see if the interlock is closed */
tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
- if (tempdword & (0x01 << hp_slot)) {
+ if (tempdword & (0x01 << hp_slot))
return 1;
- }
if (func->is_a_board) {
rc = board_replaced(func, ctrl);
@@ -2070,9 +2067,8 @@ int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
}
}
- if (rc) {
+ if (rc)
dbg("%s: rc = %d\n", __func__, rc);
- }
if (p_slot)
update_slot_info(ctrl, p_slot);
@@ -2095,9 +2091,8 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
device = func->device;
func = cpqhp_slot_find(ctrl->bus, device, index++);
p_slot = cpqhp_find_slot(ctrl, device);
- if (p_slot) {
+ if (p_slot)
physical_slot = p_slot->number;
- }
/* Make sure there are no video controllers here */
while (func && !rc) {
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index 0968a9bcb345..1e08ff8c229c 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -204,9 +204,8 @@ static int load_HRT (void __iomem *rom_start)
u8 temp_byte = 0xFF;
u32 rc;
- if (!check_for_compaq_ROM(rom_start)) {
+ if (!check_for_compaq_ROM(rom_start))
return -ENODEV;
- }
available = 1024;
@@ -250,9 +249,8 @@ static u32 store_HRT (void __iomem *rom_start)
available = 1024;
- if (!check_for_compaq_ROM(rom_start)) {
+ if (!check_for_compaq_ROM(rom_start))
return(1);
- }
buffer = (u32*) evbuffer;
@@ -427,9 +425,9 @@ static u32 store_HRT (void __iomem *rom_start)
void compaq_nvram_init (void __iomem *rom_start)
{
- if (rom_start) {
+ if (rom_start)
compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
- }
+
dbg("int15 entry = %p\n", compaq_int15_entry_point);
/* initialize our int15 lock */
@@ -661,9 +659,8 @@ int compaq_nvram_store (void __iomem *rom_start)
if (evbuffer_init) {
rc = store_HRT(rom_start);
- if (rc) {
+ if (rc)
err(msg_unable_to_save);
- }
}
return rc;
}
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index f7b8684a7739..3efaf4c38528 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1023,7 +1023,8 @@ static int enable_slot(struct hotplug_slot *hs)
debug("ENABLING SLOT........\n");
slot_cur = hs->private;
- if ((rc = validate(slot_cur, ENABLE))) {
+ rc = validate(slot_cur, ENABLE);
+ if (rc) {
err("validate function failed\n");
goto error_nopower;
}
@@ -1199,9 +1200,8 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
debug("DISABLING SLOT...\n");
- if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) {
+ if ((slot_cur == NULL) || (slot_cur->ctrl == NULL))
return -ENODEV;
- }
flag = slot_cur->flag;
slot_cur->flag = 1;
@@ -1336,17 +1336,20 @@ static int __init ibmphp_init(void)
for (i = 0; i < 16; i++)
irqs[i] = 0;
- if ((rc = ibmphp_access_ebda()))
+ rc = ibmphp_access_ebda();
+ if (rc)
goto error;
debug("after ibmphp_access_ebda()\n");
- if ((rc = ibmphp_rsrc_init()))
+ rc = ibmphp_rsrc_init();
+ if (rc)
goto error;
debug("AFTER Resource & EBDA INITIALIZATIONS\n");
max_slots = get_max_slots();
- if ((rc = ibmphp_register_pci()))
+ rc = ibmphp_register_pci();
+ if (rc)
goto error;
if (init_ops()) {
@@ -1355,9 +1358,9 @@ static int __init ibmphp_init(void)
}
ibmphp_print_test();
- if ((rc = ibmphp_hpc_start_poll_thread())) {
+ rc = ibmphp_hpc_start_poll_thread();
+ if (rc)
goto error;
- }
exit:
return rc;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 0f65ac555434..d9b197d5c6b4 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -215,9 +215,8 @@ static void __init print_ebda_hpc (void)
debug ("%s - cap of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_cap);
}
- for (index = 0; index < hpc_ptr->bus_count; index++) {
+ for (index = 0; index < hpc_ptr->bus_count; index++)
debug ("%s - bus# of each bus controlled by this ctlr: %x\n", __func__, hpc_ptr->buses[index].bus_num);
- }
debug ("%s - type of hpc: %x\n", __func__, hpc_ptr->ctlr_type);
switch (hpc_ptr->ctlr_type) {
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index a936022956e6..220876715a08 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -997,9 +997,8 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
rc = ibmphp_do_disable_slot (pslot);
}
- if (update || disable) {
+ if (update || disable)
ibmphp_update_slot_info (pslot);
- }
debug ("%s - Exit rc[%d] disable[%x] update[%x]\n", __func__, rc, disable, update);
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index 2fd296706ce7..814cea22a9fa 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -145,7 +145,8 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
case PCI_HEADER_TYPE_NORMAL:
debug ("single device case.... vendor id = %x, hdr_type = %x, class = %x\n", vendor_id, hdr_type, class);
assign_alt_irq (cur_func, class_code);
- if ((rc = configure_device (cur_func)) < 0) {
+ rc = configure_device(cur_func);
+ if (rc < 0) {
/* We need to do this in case some other BARs were properly inserted */
err ("was not able to configure devfunc %x on bus %x.\n",
cur_func->device, cur_func->busno);
@@ -157,7 +158,8 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
break;
case PCI_HEADER_TYPE_MULTIDEVICE:
assign_alt_irq (cur_func, class_code);
- if ((rc = configure_device (cur_func)) < 0) {
+ rc = configure_device(cur_func);
+ if (rc < 0) {
/* We need to do this in case some other BARs were properly inserted */
err ("was not able to configure devfunc %x on bus %x...bailing out\n",
cur_func->device, cur_func->busno);
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index f34745abd5b6..219ba8090a37 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -224,7 +224,8 @@ int __init ibmphp_rsrc_init (void)
if ((curr->rsrc_type & RESTYPE) == MMASK) {
/* no bus structure exists in place yet */
if (list_empty (&gbuses)) {
- if ((rc = alloc_bus_range (&newbus, &newrange, curr, MEM, 1)))
+ rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1);
+ if (rc)
return rc;
list_add_tail (&newbus->bus_list, &gbuses);
debug ("gbuses = NULL, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -237,7 +238,8 @@ int __init ibmphp_rsrc_init (void)
return rc;
} else {
/* went through all the buses and didn't find ours, need to create a new bus node */
- if ((rc = alloc_bus_range (&newbus, &newrange, curr, MEM, 1)))
+ rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1);
+ if (rc)
return rc;
list_add_tail (&newbus->bus_list, &gbuses);
@@ -248,7 +250,8 @@ int __init ibmphp_rsrc_init (void)
/* prefetchable memory */
if (list_empty (&gbuses)) {
/* no bus structure exists in place yet */
- if ((rc = alloc_bus_range (&newbus, &newrange, curr, PFMEM, 1)))
+ rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1);
+ if (rc)
return rc;
list_add_tail (&newbus->bus_list, &gbuses);
debug ("gbuses = NULL, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -261,7 +264,8 @@ int __init ibmphp_rsrc_init (void)
return rc;
} else {
/* went through all the buses and didn't find ours, need to create a new bus node */
- if ((rc = alloc_bus_range (&newbus, &newrange, curr, PFMEM, 1)))
+ rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1);
+ if (rc)
return rc;
list_add_tail (&newbus->bus_list, &gbuses);
debug ("1st Bus, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -271,7 +275,8 @@ int __init ibmphp_rsrc_init (void)
/* IO */
if (list_empty (&gbuses)) {
/* no bus structure exists in place yet */
- if ((rc = alloc_bus_range (&newbus, &newrange, curr, IO, 1)))
+ rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1);
+ if (rc)
return rc;
list_add_tail (&newbus->bus_list, &gbuses);
debug ("gbuses = NULL, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -283,7 +288,8 @@ int __init ibmphp_rsrc_init (void)
return rc;
} else {
/* went through all the buses and didn't find ours, need to create a new bus node */
- if ((rc = alloc_bus_range (&newbus, &newrange, curr, IO, 1)))
+ rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1);
+ if (rc)
return rc;
list_add_tail (&newbus->bus_list, &gbuses);
debug ("1st Bus, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -1038,7 +1044,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
/* found our range */
if (!res_prev) {
/* first time in the loop */
- if ((res_cur->start != range->start) && ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) {
+ len_tmp = res_cur->start - 1 - range->start;
+
+ if ((res_cur->start != range->start) && (len_tmp >= res->len)) {
debug ("len_tmp = %x\n", len_tmp);
if ((len_tmp < len_cur) || (len_cur == 0)) {
@@ -1078,7 +1086,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
}
if (!res_cur->next) {
/* last device on the range */
- if ((range->end != res_cur->end) && ((len_tmp = range->end - (res_cur->end + 1)) >= res->len)) {
+ len_tmp = range->end - (res_cur->end + 1);
+
+ if ((range->end != res_cur->end) && (len_tmp >= res->len)) {
debug ("len_tmp = %x\n", len_tmp);
if ((len_tmp < len_cur) || (len_cur == 0)) {
@@ -1117,8 +1127,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
if (res_prev) {
if (res_prev->rangeno != res_cur->rangeno) {
/* 1st device on this range */
- if ((res_cur->start != range->start) &&
- ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) {
+ len_tmp = res_cur->start - 1 - range->start;
+
+ if ((res_cur->start != range->start) && (len_tmp >= res->len)) {
if ((len_tmp < len_cur) || (len_cur == 0)) {
if ((range->start % tmp_divide) == 0) {
/* just perfect, starting address is divisible by length */
@@ -1153,7 +1164,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
}
} else {
/* in the same range */
- if ((len_tmp = res_cur->start - 1 - res_prev->end - 1) >= res->len) {
+ len_tmp = res_cur->start - 1 - res_prev->end - 1;
+
+ if (len_tmp >= res->len) {
if ((len_tmp < len_cur) || (len_cur == 0)) {
if (((res_prev->end + 1) % tmp_divide) == 0) {
/* just perfect, starting address's divisible by length */
@@ -1212,7 +1225,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
break;
}
while (range) {
- if ((len_tmp = range->end - range->start) >= res->len) {
+ len_tmp = range->end - range->start;
+
+ if (len_tmp >= res->len) {
if ((len_tmp < len_cur) || (len_cur == 0)) {
if ((range->start % tmp_divide) == 0) {
/* just perfect, starting address's divisible by length */
@@ -1276,7 +1291,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
break;
}
while (range) {
- if ((len_tmp = range->end - range->start) >= res->len) {
+ len_tmp = range->end - range->start;
+
+ if (len_tmp >= res->len) {
if ((len_tmp < len_cur) || (len_cur == 0)) {
if ((range->start % tmp_divide) == 0) {
/* just perfect, starting address's divisible by length */
@@ -1335,7 +1352,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
return -EINVAL;
}
}
- } /* end if(!res_cur) */
+ } /* end if (!res_cur) */
return -EINVAL;
}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 9e5a9fbb93d7..b11521953485 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -92,7 +92,7 @@ struct controller {
struct slot *slot;
wait_queue_head_t queue; /* sleep & wake process */
u32 slot_cap;
- u32 slot_ctrl;
+ u16 slot_ctrl;
struct timer_list poll_timer;
unsigned long cmd_started; /* jiffies */
unsigned int cmd_busy:1;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 07aa722bb12c..3a5e7e28b874 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -262,6 +262,13 @@ static int pciehp_probe(struct pcie_device *dev)
goto err_out_none;
}
+ if (!dev->port->subordinate) {
+ /* Can happen if we run out of bus numbers during probe */
+ dev_err(&dev->device,
+ "Hotplug bridge without secondary bus, ignoring\n");
+ goto err_out_none;
+ }
+
ctrl = pcie_init(dev);
if (!ctrl) {
dev_err(&dev->device, "Controller initialization failed\n");
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 2a412fa3b338..0ebf754fc177 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -171,9 +171,9 @@ static void pcie_wait_cmd(struct controller *ctrl)
* interrupts.
*/
if (!rc)
- ctrl_info(ctrl, "Timeout on hotplug command %#010x (issued %u msec ago)\n",
+ ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
ctrl->slot_ctrl,
- jiffies_to_msecs(now - ctrl->cmd_started));
+ jiffies_to_msecs(jiffies - ctrl->cmd_started));
}
/**
@@ -422,9 +422,9 @@ void pciehp_set_attention_status(struct slot *slot, u8 value)
default:
return;
}
+ pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
- pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
}
void pciehp_green_led_on(struct slot *slot)
@@ -614,6 +614,8 @@ void pcie_enable_notification(struct controller *ctrl)
PCI_EXP_SLTCTL_DLLSCE);
pcie_write_cmd(ctrl, cmd, mask);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
}
static void pcie_disable_notification(struct controller *ctrl)
@@ -625,6 +627,8 @@ static void pcie_disable_notification(struct controller *ctrl)
PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
PCI_EXP_SLTCTL_DLLSCE);
pcie_write_cmd(ctrl, 0, mask);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
}
/*
@@ -652,6 +656,8 @@ int pciehp_reset_slot(struct slot *slot, int probe)
stat_mask |= PCI_EXP_SLTSTA_DLLSC;
pcie_write_cmd(ctrl, 0, ctrl_mask);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
if (pciehp_poll_mode)
del_timer_sync(&ctrl->poll_timer);
@@ -659,6 +665,8 @@ int pciehp_reset_slot(struct slot *slot, int probe)
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
if (pciehp_poll_mode)
int_poll_timeout(ctrl->poll_timer.data);
@@ -797,9 +805,6 @@ struct controller *pcie_init(struct pcie_device *dev)
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
- /* Disable software notification */
- pcie_disable_notification(ctrl);
-
ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 5f871f4c4af1..9e69403be632 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -65,14 +65,7 @@ int pciehp_configure_device(struct slot *p_slot)
pci_hp_add_bridge(dev);
pci_assign_unassigned_bridge_resources(bridge);
-
- list_for_each_entry(dev, &parent->devices, bus_list) {
- if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
- continue;
-
- pci_configure_slot(dev);
- }
-
+ pcie_bus_configure_settings(parent);
pci_bus_add_devices(parent);
out:
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
deleted file mode 100644
index 3e36ec8d708a..000000000000
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright (C) 1995,2001 Compaq Computer Corporation
- * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
- * Copyright (C) 2001 IBM Corp.
- * Copyright (C) 2003-2004 Intel Corporation
- * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/pci.h>
-#include <linux/export.h>
-#include <linux/pci_hotplug.h>
-
-static struct hpp_type0 pci_default_type0 = {
- .revision = 1,
- .cache_line_size = 8,
- .latency_timer = 0x40,
- .enable_serr = 0,
- .enable_perr = 0,
-};
-
-static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
-{
- u16 pci_cmd, pci_bctl;
-
- if (!hpp) {
- /*
- * Perhaps we *should* use default settings for PCIe, but
- * pciehp didn't, so we won't either.
- */
- if (pci_is_pcie(dev))
- return;
- hpp = &pci_default_type0;
- }
-
- if (hpp->revision > 1) {
- dev_warn(&dev->dev,
- "PCI settings rev %d not supported; using defaults\n",
- hpp->revision);
- hpp = &pci_default_type0;
- }
-
- pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
- pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
- if (hpp->enable_serr)
- pci_cmd |= PCI_COMMAND_SERR;
- else
- pci_cmd &= ~PCI_COMMAND_SERR;
- if (hpp->enable_perr)
- pci_cmd |= PCI_COMMAND_PARITY;
- else
- pci_cmd &= ~PCI_COMMAND_PARITY;
- pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
-
- /* Program bridge control value */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
- pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
- hpp->latency_timer);
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
- if (hpp->enable_serr)
- pci_bctl |= PCI_BRIDGE_CTL_SERR;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
- if (hpp->enable_perr)
- pci_bctl |= PCI_BRIDGE_CTL_PARITY;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
- }
-}
-
-static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
-{
- if (hpp)
- dev_warn(&dev->dev, "PCI-X settings not supported\n");
-}
-
-static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
-{
- int pos;
- u32 reg32;
-
- if (!hpp)
- return;
-
- if (hpp->revision > 1) {
- dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
- hpp->revision);
- return;
- }
-
- /* Initialize Device Control Register */
- pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
- ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
-
- /* Initialize Link Control Register */
- if (dev->subordinate)
- pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
- ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
-
- /* Find Advanced Error Reporting Enhanced Capability */
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- if (!pos)
- return;
-
- /* Initialize Uncorrectable Error Mask Register */
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
- reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
-
- /* Initialize Uncorrectable Error Severity Register */
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
- reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
-
- /* Initialize Correctable Error Mask Register */
- pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
- reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
- pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
-
- /* Initialize Advanced Error Capabilities and Control Register */
- pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
- reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
- pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
-
- /*
- * FIXME: The following two registers are not supported yet.
- *
- * o Secondary Uncorrectable Error Severity Register
- * o Secondary Uncorrectable Error Mask Register
- */
-}
-
-void pci_configure_slot(struct pci_dev *dev)
-{
- struct pci_dev *cdev;
- struct hotplug_params hpp;
-
- if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
- (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
- return;
-
- pcie_bus_configure_settings(dev->bus);
-
- memset(&hpp, 0, sizeof(hpp));
- pci_get_hp_params(dev, &hpp);
-
- program_hpp_type2(dev, hpp.t2);
- program_hpp_type1(dev, hpp.t1);
- program_hpp_type0(dev, hpp.t0);
-
- if (dev->subordinate) {
- list_for_each_entry(cdev, &dev->subordinate->devices,
- bus_list)
- pci_configure_slot(cdev);
- }
-}
-EXPORT_SYMBOL_GPL(pci_configure_slot);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index a81fb67ea9a1..10c7927599b3 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -195,7 +195,8 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
int rc = 0;
ctrl_dbg(ctrl, "Change speed to %d\n", speed);
- if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) {
+ rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed);
+ if (rc) {
ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
__func__);
return WRONG_BUS_FREQUENCY;
@@ -261,14 +262,16 @@ static int board_added(struct slot *p_slot)
}
if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
- if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) {
+ rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
+ if (rc) {
ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
__func__);
return WRONG_BUS_FREQUENCY;
}
/* turn on board, blink green LED, turn off Amber LED */
- if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) {
+ rc = p_slot->hpc_ops->slot_enable(p_slot);
+ if (rc) {
ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
return rc;
}
@@ -296,7 +299,8 @@ static int board_added(struct slot *p_slot)
return rc;
/* turn on board, blink green LED, turn off Amber LED */
- if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) {
+ rc = p_slot->hpc_ops->slot_enable(p_slot);
+ if (rc) {
ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
return rc;
}
@@ -595,7 +599,7 @@ static int shpchp_enable_slot (struct slot *p_slot)
ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
- if(((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
+ if (((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
(p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458))
&& p_slot->ctrl->num_slots == 1) {
/* handle amd pogo errata; this must be done before enable */
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 29e22352822c..7d223e9080ef 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -466,7 +466,8 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
u8 m66_cap = !!(slot_reg & MHZ66_CAP);
u8 pi, pcix_cap;
- if ((retval = hpc_get_prog_int(slot, &pi)))
+ retval = hpc_get_prog_int(slot, &pi);
+ if (retval)
return retval;
switch (pi) {
@@ -798,7 +799,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
ctrl_dbg(ctrl, "%s: intr_loc = %x\n", __func__, intr_loc);
- if(!shpchp_poll_mode) {
+ if (!shpchp_poll_mode) {
/*
* Mask Global Interrupt Mask - see implementation
* note on p. 139 of SHPC spec rev 1.0
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 469454e0cc48..f8cd3a27e351 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -69,13 +69,7 @@ int shpchp_configure_device(struct slot *p_slot)
}
pci_assign_unassigned_bridge_resources(bridge);
-
- list_for_each_entry(dev, &parent->devices, bus_list) {
- if (PCI_SLOT(dev->devfn) != p_slot->device)
- continue;
- pci_configure_slot(dev);
- }
-
+ pcie_bus_configure_settings(parent);
pci_bus_add_devices(parent);
out:
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index cb6f24740ee3..4d109c07294a 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -633,7 +633,7 @@ int pci_vfs_assigned(struct pci_dev *dev)
* our dev as the physical function and the assigned bit is set
*/
if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
- (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
+ pci_is_dev_assigned(vfdev))
vfs_assigned++;
vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 5a40516444f3..2f7c92c4757a 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -56,16 +56,6 @@ void __weak arch_teardown_msi_irq(unsigned int irq)
chip->teardown_irq(chip, irq);
}
-int __weak arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
-{
- struct msi_chip *chip = dev->bus->msi;
-
- if (!chip || !chip->check_device)
- return 0;
-
- return chip->check_device(chip, dev, nvec, type);
-}
-
int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct msi_desc *entry;
@@ -130,7 +120,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq)
}
if (entry)
- write_msi_msg(irq, &entry->msg);
+ __write_msi_msg(entry, &entry->msg);
}
void __weak arch_restore_msi_irqs(struct pci_dev *dev)
@@ -384,17 +374,6 @@ static void free_msi_irqs(struct pci_dev *dev)
iounmap(entry->mask_base);
}
- /*
- * Its possible that we get into this path
- * When populate_msi_sysfs fails, which means the entries
- * were not registered with sysfs. In that case don't
- * unregister them.
- */
- if (entry->kobj.parent) {
- kobject_del(&entry->kobj);
- kobject_put(&entry->kobj);
- }
-
list_del(&entry->list);
kfree(entry);
}
@@ -595,7 +574,6 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev)
entry->msi_attrib.entry_nr = 0;
entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
- entry->msi_attrib.pos = dev->msi_cap;
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
if (control & PCI_MSI_FLAGS_64BIT)
@@ -699,7 +677,6 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.is_64 = 1;
entry->msi_attrib.entry_nr = entries[i].entry;
entry->msi_attrib.default_irq = dev->irq;
- entry->msi_attrib.pos = dev->msix_cap;
entry->mask_base = base;
list_add_tail(&entry->list, &dev->msi_list);
@@ -806,23 +783,24 @@ out_free:
}
/**
- * pci_msi_check_device - check whether MSI may be enabled on a device
+ * pci_msi_supported - check whether MSI may be enabled on a device
* @dev: pointer to the pci_dev data structure of MSI device function
* @nvec: how many MSIs have been requested ?
- * @type: are we checking for MSI or MSI-X ?
*
* Look at global flags, the device itself, and its parent buses
* to determine if MSI/-X are supported for the device. If MSI/-X is
- * supported return 0, else return an error code.
+ * supported return 1, else return 0.
**/
-static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
+static int pci_msi_supported(struct pci_dev *dev, int nvec)
{
struct pci_bus *bus;
- int ret;
/* MSI must be globally enabled and supported by the device */
- if (!pci_msi_enable || !dev || dev->no_msi)
- return -EINVAL;
+ if (!pci_msi_enable)
+ return 0;
+
+ if (!dev || dev->no_msi || dev->current_state != PCI_D0)
+ return 0;
/*
* You can't ask to have 0 or less MSIs configured.
@@ -830,7 +808,7 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
* b) the list manipulation code assumes nvec >= 1.
*/
if (nvec < 1)
- return -ERANGE;
+ return 0;
/*
* Any bridge which does NOT route MSI transactions from its
@@ -841,13 +819,9 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
*/
for (bus = dev->bus; bus; bus = bus->parent)
if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
- return -EINVAL;
-
- ret = arch_msi_check_device(dev, nvec, type);
- if (ret)
- return ret;
+ return 0;
- return 0;
+ return 1;
}
/**
@@ -946,15 +920,14 @@ EXPORT_SYMBOL(pci_msix_vec_count);
**/
int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
{
- int status, nr_entries;
+ int nr_entries;
int i, j;
- if (!entries || !dev->msix_cap || dev->current_state != PCI_D0)
+ if (!pci_msi_supported(dev, nvec))
return -EINVAL;
- status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
- if (status)
- return status;
+ if (!entries)
+ return -EINVAL;
nr_entries = pci_msix_vec_count(dev);
if (nr_entries < 0)
@@ -978,8 +951,7 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
return -EINVAL;
}
- status = msix_capability_init(dev, entries, nvec);
- return status;
+ return msix_capability_init(dev, entries, nvec);
}
EXPORT_SYMBOL(pci_enable_msix);
@@ -1062,7 +1034,7 @@ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
int nvec;
int rc;
- if (dev->current_state != PCI_D0)
+ if (!pci_msi_supported(dev, minvec))
return -EINVAL;
WARN_ON(!!dev->msi_enabled);
@@ -1086,17 +1058,6 @@ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
nvec = maxvec;
do {
- rc = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
- if (rc < 0) {
- return rc;
- } else if (rc > 0) {
- if (rc < minvec)
- return -ENOSPC;
- nvec = rc;
- }
- } while (rc);
-
- do {
rc = msi_capability_init(dev, nvec);
if (rc < 0) {
return rc;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 37263b0ebfe3..6ebf8edc5f3c 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
#include <linux/module.h>
#include <linux/pci-aspm.h>
#include <linux/pci-acpi.h>
@@ -17,6 +18,267 @@
#include <linux/pm_qos.h>
#include "pci.h"
+phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
+{
+ acpi_status status = AE_NOT_EXIST;
+ unsigned long long mcfg_addr;
+
+ if (handle)
+ status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
+ NULL, &mcfg_addr);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ return (phys_addr_t)mcfg_addr;
+}
+
+static acpi_status decode_type0_hpx_record(union acpi_object *record,
+ struct hotplug_params *hpx)
+{
+ int i;
+ union acpi_object *fields = record->package.elements;
+ u32 revision = fields[1].integer.value;
+
+ switch (revision) {
+ case 1:
+ if (record->package.count != 6)
+ return AE_ERROR;
+ for (i = 2; i < 6; i++)
+ if (fields[i].type != ACPI_TYPE_INTEGER)
+ return AE_ERROR;
+ hpx->t0 = &hpx->type0_data;
+ hpx->t0->revision = revision;
+ hpx->t0->cache_line_size = fields[2].integer.value;
+ hpx->t0->latency_timer = fields[3].integer.value;
+ hpx->t0->enable_serr = fields[4].integer.value;
+ hpx->t0->enable_perr = fields[5].integer.value;
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Type 0 Revision %d record not supported\n",
+ __func__, revision);
+ return AE_ERROR;
+ }
+ return AE_OK;
+}
+
+static acpi_status decode_type1_hpx_record(union acpi_object *record,
+ struct hotplug_params *hpx)
+{
+ int i;
+ union acpi_object *fields = record->package.elements;
+ u32 revision = fields[1].integer.value;
+
+ switch (revision) {
+ case 1:
+ if (record->package.count != 5)
+ return AE_ERROR;
+ for (i = 2; i < 5; i++)
+ if (fields[i].type != ACPI_TYPE_INTEGER)
+ return AE_ERROR;
+ hpx->t1 = &hpx->type1_data;
+ hpx->t1->revision = revision;
+ hpx->t1->max_mem_read = fields[2].integer.value;
+ hpx->t1->avg_max_split = fields[3].integer.value;
+ hpx->t1->tot_max_split = fields[4].integer.value;
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Type 1 Revision %d record not supported\n",
+ __func__, revision);
+ return AE_ERROR;
+ }
+ return AE_OK;
+}
+
+static acpi_status decode_type2_hpx_record(union acpi_object *record,
+ struct hotplug_params *hpx)
+{
+ int i;
+ union acpi_object *fields = record->package.elements;
+ u32 revision = fields[1].integer.value;
+
+ switch (revision) {
+ case 1:
+ if (record->package.count != 18)
+ return AE_ERROR;
+ for (i = 2; i < 18; i++)
+ if (fields[i].type != ACPI_TYPE_INTEGER)
+ return AE_ERROR;
+ hpx->t2 = &hpx->type2_data;
+ hpx->t2->revision = revision;
+ hpx->t2->unc_err_mask_and = fields[2].integer.value;
+ hpx->t2->unc_err_mask_or = fields[3].integer.value;
+ hpx->t2->unc_err_sever_and = fields[4].integer.value;
+ hpx->t2->unc_err_sever_or = fields[5].integer.value;
+ hpx->t2->cor_err_mask_and = fields[6].integer.value;
+ hpx->t2->cor_err_mask_or = fields[7].integer.value;
+ hpx->t2->adv_err_cap_and = fields[8].integer.value;
+ hpx->t2->adv_err_cap_or = fields[9].integer.value;
+ hpx->t2->pci_exp_devctl_and = fields[10].integer.value;
+ hpx->t2->pci_exp_devctl_or = fields[11].integer.value;
+ hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value;
+ hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value;
+ hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
+ hpx->t2->sec_unc_err_sever_or = fields[15].integer.value;
+ hpx->t2->sec_unc_err_mask_and = fields[16].integer.value;
+ hpx->t2->sec_unc_err_mask_or = fields[17].integer.value;
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Type 2 Revision %d record not supported\n",
+ __func__, revision);
+ return AE_ERROR;
+ }
+ return AE_OK;
+}
+
+static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *package, *record, *fields;
+ u32 type;
+ int i;
+
+ /* Clear the return buffer with zeros */
+ memset(hpx, 0, sizeof(struct hotplug_params));
+
+ status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ package = (union acpi_object *)buffer.pointer;
+ if (package->type != ACPI_TYPE_PACKAGE) {
+ status = AE_ERROR;
+ goto exit;
+ }
+
+ for (i = 0; i < package->package.count; i++) {
+ record = &package->package.elements[i];
+ if (record->type != ACPI_TYPE_PACKAGE) {
+ status = AE_ERROR;
+ goto exit;
+ }
+
+ fields = record->package.elements;
+ if (fields[0].type != ACPI_TYPE_INTEGER ||
+ fields[1].type != ACPI_TYPE_INTEGER) {
+ status = AE_ERROR;
+ goto exit;
+ }
+
+ type = fields[0].integer.value;
+ switch (type) {
+ case 0:
+ status = decode_type0_hpx_record(record, hpx);
+ if (ACPI_FAILURE(status))
+ goto exit;
+ break;
+ case 1:
+ status = decode_type1_hpx_record(record, hpx);
+ if (ACPI_FAILURE(status))
+ goto exit;
+ break;
+ case 2:
+ status = decode_type2_hpx_record(record, hpx);
+ if (ACPI_FAILURE(status))
+ goto exit;
+ break;
+ default:
+ printk(KERN_ERR "%s: Type %d record not supported\n",
+ __func__, type);
+ status = AE_ERROR;
+ goto exit;
+ }
+ }
+ exit:
+ kfree(buffer.pointer);
+ return status;
+}
+
+static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *package, *fields;
+ int i;
+
+ memset(hpp, 0, sizeof(struct hotplug_params));
+
+ status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ package = (union acpi_object *) buffer.pointer;
+ if (package->type != ACPI_TYPE_PACKAGE ||
+ package->package.count != 4) {
+ status = AE_ERROR;
+ goto exit;
+ }
+
+ fields = package->package.elements;
+ for (i = 0; i < 4; i++) {
+ if (fields[i].type != ACPI_TYPE_INTEGER) {
+ status = AE_ERROR;
+ goto exit;
+ }
+ }
+
+ hpp->t0 = &hpp->type0_data;
+ hpp->t0->revision = 1;
+ hpp->t0->cache_line_size = fields[0].integer.value;
+ hpp->t0->latency_timer = fields[1].integer.value;
+ hpp->t0->enable_serr = fields[2].integer.value;
+ hpp->t0->enable_perr = fields[3].integer.value;
+
+exit:
+ kfree(buffer.pointer);
+ return status;
+}
+
+/* pci_get_hp_params
+ *
+ * @dev - the pci_dev for which we want parameters
+ * @hpp - allocated by the caller
+ */
+int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
+{
+ acpi_status status;
+ acpi_handle handle, phandle;
+ struct pci_bus *pbus;
+
+ handle = NULL;
+ for (pbus = dev->bus; pbus; pbus = pbus->parent) {
+ handle = acpi_pci_get_bridge_handle(pbus);
+ if (handle)
+ break;
+ }
+
+ /*
+ * _HPP settings apply to all child buses, until another _HPP is
+ * encountered. If we don't find an _HPP for the input pci dev,
+ * look for it in the parent device scope since that would apply to
+ * this pci dev.
+ */
+ while (handle) {
+ status = acpi_run_hpx(handle, hpp);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ status = acpi_run_hpp(handle, hpp);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ if (acpi_is_root_bridge(handle))
+ break;
+ status = acpi_get_parent(handle, &phandle);
+ if (ACPI_FAILURE(status))
+ break;
+ handle = phandle;
+ }
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(pci_get_hp_params);
+
/**
* pci_acpi_wake_bus - Root bus wakeup notification fork function.
* @work: Work item to handle.
@@ -84,20 +346,6 @@ acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
}
-phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
-{
- acpi_status status = AE_NOT_EXIST;
- unsigned long long mcfg_addr;
-
- if (handle)
- status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
- NULL, &mcfg_addr);
- if (ACPI_FAILURE(status))
- return 0;
-
- return (phys_addr_t)mcfg_addr;
-}
-
/*
* _SxD returns the D-state with the highest power
* (lowest D-state number) supported in the S-state "x".
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d04c5adafc16..2b3c89425bb5 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -55,7 +55,6 @@ int pci_add_dynid(struct pci_driver *drv,
unsigned long driver_data)
{
struct pci_dynid *dynid;
- int retval;
dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
if (!dynid)
@@ -73,9 +72,7 @@ int pci_add_dynid(struct pci_driver *drv,
list_add_tail(&dynid->node, &drv->dynids.list);
spin_unlock(&drv->dynids.lock);
- retval = driver_attach(&drv->driver);
-
- return retval;
+ return driver_attach(&drv->driver);
}
EXPORT_SYMBOL_GPL(pci_add_dynid);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 9ff0a901ecf7..92b6d9ab00e4 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -177,7 +177,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n",
+ return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
pci_dev->vendor, pci_dev->device,
pci_dev->subsystem_vendor, pci_dev->subsystem_device,
(u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
@@ -250,46 +250,45 @@ static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus *subordinate = pdev->subordinate;
- if (!pdev->subordinate)
- return 0;
-
- return sprintf(buf, "%u\n",
- !(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI));
+ return sprintf(buf, "%u\n", subordinate ?
+ !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
+ : !pdev->no_msi);
}
static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus *subordinate = pdev->subordinate;
unsigned long val;
if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
- /*
- * Bad things may happen if the no_msi flag is changed
- * while drivers are loaded.
- */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/*
- * Maybe devices without subordinate buses shouldn't have this
- * attribute in the first place?
+ * "no_msi" and "bus_flags" only affect what happens when a driver
+ * requests MSI or MSI-X. They don't affect any drivers that have
+ * already requested MSI or MSI-X.
*/
- if (!pdev->subordinate)
+ if (!subordinate) {
+ pdev->no_msi = !val;
+ dev_info(&pdev->dev, "MSI/MSI-X %s for future drivers\n",
+ val ? "allowed" : "disallowed");
return count;
-
- /* Is the flag going to change, or keep the value it already had? */
- if (!(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) ^
- !!val) {
- pdev->subordinate->bus_flags ^= PCI_BUS_FLAGS_NO_MSI;
-
- dev_warn(&pdev->dev, "forced subordinate bus to%s support MSI, bad things could happen\n",
- val ? "" : " not");
}
+ if (val)
+ subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
+ else
+ subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
+
+ dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
+ val ? "allowed" : "disallowed");
return count;
}
static DEVICE_ATTR_RW(msi_bus);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 2c9ac70254e2..625a4ace10b4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1003,12 +1003,19 @@ int pci_save_state(struct pci_dev *dev)
for (i = 0; i < 16; i++)
pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
dev->state_saved = true;
- if ((i = pci_save_pcie_state(dev)) != 0)
+
+ i = pci_save_pcie_state(dev);
+ if (i != 0)
return i;
- if ((i = pci_save_pcix_state(dev)) != 0)
+
+ i = pci_save_pcix_state(dev);
+ if (i != 0)
return i;
- if ((i = pci_save_vc_state(dev)) != 0)
+
+ i = pci_save_vc_state(dev);
+ if (i != 0)
return i;
+
return 0;
}
EXPORT_SYMBOL(pci_save_state);
@@ -1907,10 +1914,6 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
- /* D3cold during system suspend/hibernate is not supported */
- if (target_state > PCI_D3hot)
- target_state = PCI_D3hot;
-
pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
error = pci_set_power_state(dev, target_state);
@@ -2704,6 +2707,37 @@ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
}
EXPORT_SYMBOL(pci_request_regions_exclusive);
+/**
+ * pci_remap_iospace - Remap the memory mapped I/O space
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
+ *
+ * Remap the memory mapped I/O space described by the @res
+ * and the CPU physical address @phys_addr into virtual address space.
+ * Only architectures that have memory mapped IO functions defined
+ * (and the PCI_IOBASE value defined) should call this function.
+ */
+int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
+{
+#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+ unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
+
+ if (!(res->flags & IORESOURCE_IO))
+ return -EINVAL;
+
+ if (res->end > IO_SPACE_LIMIT)
+ return -EINVAL;
+
+ return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+ pgprot_device(PAGE_KERNEL));
+#else
+ /* this architecture does not have memory mapped I/O space,
+ so this function should never be called */
+ WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
+ return -ENODEV;
+#endif
+}
+
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
@@ -4406,6 +4440,15 @@ static void pci_no_domains(void)
#endif
}
+#ifdef CONFIG_PCI_DOMAINS
+static atomic_t __domain_nr = ATOMIC_INIT(-1);
+
+int pci_get_new_domain_nr(void)
+{
+ return atomic_inc_return(&__domain_nr);
+}
+#endif
+
/**
* pci_ext_cfg_avail - can we access extended PCI config space?
*
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 35d06e177917..c6849d9e86ce 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -89,15 +89,17 @@ static const char *aer_correctable_error_string[] = {
NULL,
"Replay Timer Timeout", /* Bit Position 12 */
"Advisory Non-Fatal", /* Bit Position 13 */
+ "Corrected Internal Error", /* Bit Position 14 */
+ "Header Log Overflow", /* Bit Position 15 */
};
static const char *aer_uncorrectable_error_string[] = {
- NULL,
+ "Undefined", /* Bit Position 0 */
NULL,
NULL,
NULL,
"Data Link Protocol", /* Bit Position 4 */
- NULL,
+ "Surprise Down Error", /* Bit Position 5 */
NULL,
NULL,
NULL,
@@ -113,6 +115,11 @@ static const char *aer_uncorrectable_error_string[] = {
"Malformed TLP", /* Bit Position 18 */
"ECRC", /* Bit Position 19 */
"Unsupported Request", /* Bit Position 20 */
+ "ACS Violation", /* Bit Position 21 */
+ "Uncorrectable Internal Error", /* Bit Position 22 */
+ "MC Blocked TLP", /* Bit Position 23 */
+ "AtomicOp Egress Blocked", /* Bit Position 24 */
+ "TLP Prefix Blocked Error", /* Bit Position 25 */
};
static const char *aer_agent_string[] = {
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 2ccc9b926ea7..be35da2e105e 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -93,77 +93,6 @@ static int pcie_port_resume_noirq(struct device *dev)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
-struct d3cold_info {
- bool no_d3cold;
- unsigned int d3cold_delay;
-};
-
-static int pci_dev_d3cold_info(struct pci_dev *pdev, void *data)
-{
- struct d3cold_info *info = data;
-
- info->d3cold_delay = max_t(unsigned int, pdev->d3cold_delay,
- info->d3cold_delay);
- if (pdev->no_d3cold)
- info->no_d3cold = true;
- return 0;
-}
-
-static int pcie_port_runtime_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct d3cold_info d3cold_info = {
- .no_d3cold = false,
- .d3cold_delay = PCI_PM_D3_WAIT,
- };
-
- /*
- * If any subordinate device disable D3cold, we should not put
- * the port into D3cold. The D3cold delay of port should be
- * the max of that of all subordinate devices.
- */
- pci_walk_bus(pdev->subordinate, pci_dev_d3cold_info, &d3cold_info);
- pdev->no_d3cold = d3cold_info.no_d3cold;
- pdev->d3cold_delay = d3cold_info.d3cold_delay;
- return 0;
-}
-
-static int pcie_port_runtime_resume(struct device *dev)
-{
- return 0;
-}
-
-static int pci_dev_pme_poll(struct pci_dev *pdev, void *data)
-{
- bool *pme_poll = data;
-
- if (pdev->pme_poll)
- *pme_poll = true;
- return 0;
-}
-
-static int pcie_port_runtime_idle(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- bool pme_poll = false;
-
- /*
- * If any subordinate device needs pme poll, we should keep
- * the port in D0, because we need port in D0 to poll it.
- */
- pci_walk_bus(pdev->subordinate, pci_dev_pme_poll, &pme_poll);
- /* Delay for a short while to prevent too frequent suspend/resume */
- if (!pme_poll)
- pm_schedule_suspend(dev, 10);
- return -EBUSY;
-}
-#else
-#define pcie_port_runtime_suspend NULL
-#define pcie_port_runtime_resume NULL
-#define pcie_port_runtime_idle NULL
-#endif
-
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
.resume = pcie_port_device_resume,
@@ -172,9 +101,6 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.poweroff = pcie_port_device_suspend,
.restore = pcie_port_device_resume,
.resume_noirq = pcie_port_resume_noirq,
- .runtime_suspend = pcie_port_runtime_suspend,
- .runtime_resume = pcie_port_runtime_resume,
- .runtime_idle = pcie_port_runtime_idle,
};
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 4170113cde61..5ed99309c758 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -6,6 +6,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/cpumask.h>
@@ -485,7 +486,7 @@ void pci_read_bridge_bases(struct pci_bus *child)
}
}
-static struct pci_bus *pci_alloc_bus(void)
+static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
{
struct pci_bus *b;
@@ -500,6 +501,10 @@ static struct pci_bus *pci_alloc_bus(void)
INIT_LIST_HEAD(&b->resources);
b->max_bus_speed = PCI_SPEED_UNKNOWN;
b->cur_bus_speed = PCI_SPEED_UNKNOWN;
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ if (parent)
+ b->domain_nr = parent->domain_nr;
+#endif
return b;
}
@@ -671,7 +676,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
/*
* Allocate a new bus, and inherit stuff from the parent..
*/
- child = pci_alloc_bus();
+ child = pci_alloc_bus(parent);
if (!child)
return NULL;
@@ -740,6 +745,17 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
}
EXPORT_SYMBOL(pci_add_new_bus);
+static void pci_enable_crs(struct pci_dev *pdev)
+{
+ u16 root_cap = 0;
+
+ /* Enable CRS Software Visibility if supported */
+ pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
+ if (root_cap & PCI_EXP_RTCAP_CRSVIS)
+ pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
+ PCI_EXP_RTCTL_CRSSVE);
+}
+
/*
* If it's a bridge, configure it and scan the bus behind it.
* For CardBus bridges, we don't scan behind as the devices will
@@ -787,6 +803,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
+ pci_enable_crs(dev);
+
if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
!is_cardbus && !broken) {
unsigned int cmax;
@@ -1226,6 +1244,137 @@ int pci_setup_device(struct pci_dev *dev)
return 0;
}
+static struct hpp_type0 pci_default_type0 = {
+ .revision = 1,
+ .cache_line_size = 8,
+ .latency_timer = 0x40,
+ .enable_serr = 0,
+ .enable_perr = 0,
+};
+
+static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
+{
+ u16 pci_cmd, pci_bctl;
+
+ if (!hpp)
+ hpp = &pci_default_type0;
+
+ if (hpp->revision > 1) {
+ dev_warn(&dev->dev,
+ "PCI settings rev %d not supported; using defaults\n",
+ hpp->revision);
+ hpp = &pci_default_type0;
+ }
+
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
+ pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
+ if (hpp->enable_serr)
+ pci_cmd |= PCI_COMMAND_SERR;
+ if (hpp->enable_perr)
+ pci_cmd |= PCI_COMMAND_PARITY;
+ pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
+
+ /* Program bridge control value */
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
+ hpp->latency_timer);
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
+ if (hpp->enable_serr)
+ pci_bctl |= PCI_BRIDGE_CTL_SERR;
+ if (hpp->enable_perr)
+ pci_bctl |= PCI_BRIDGE_CTL_PARITY;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
+ }
+}
+
+static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
+{
+ if (hpp)
+ dev_warn(&dev->dev, "PCI-X settings not supported\n");
+}
+
+static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+{
+ int pos;
+ u32 reg32;
+
+ if (!hpp)
+ return;
+
+ if (hpp->revision > 1) {
+ dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
+ hpp->revision);
+ return;
+ }
+
+ /*
+ * Don't allow _HPX to change MPS or MRRS settings. We manage
+ * those to make sure they're consistent with the rest of the
+ * platform.
+ */
+ hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
+ PCI_EXP_DEVCTL_READRQ;
+ hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
+ PCI_EXP_DEVCTL_READRQ);
+
+ /* Initialize Device Control Register */
+ pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
+ ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
+
+ /* Initialize Link Control Register */
+ if (dev->subordinate)
+ pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
+ ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
+
+ /* Find Advanced Error Reporting Enhanced Capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return;
+
+ /* Initialize Uncorrectable Error Mask Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
+ reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
+
+ /* Initialize Uncorrectable Error Severity Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
+ reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
+
+ /* Initialize Correctable Error Mask Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
+ reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
+
+ /* Initialize Advanced Error Capabilities and Control Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ /*
+ * FIXME: The following two registers are not supported yet.
+ *
+ * o Secondary Uncorrectable Error Severity Register
+ * o Secondary Uncorrectable Error Mask Register
+ */
+}
+
+static void pci_configure_device(struct pci_dev *dev)
+{
+ struct hotplug_params hpp;
+ int ret;
+
+ memset(&hpp, 0, sizeof(hpp));
+ ret = pci_get_hp_params(dev, &hpp);
+ if (ret)
+ return;
+
+ program_hpp_type2(dev, hpp.t2);
+ program_hpp_type1(dev, hpp.t1);
+ program_hpp_type0(dev, hpp.t0);
+}
+
static void pci_release_capabilities(struct pci_dev *dev)
{
pci_vpd_release(dev);
@@ -1282,8 +1431,13 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
*l == 0x0000ffff || *l == 0xffff0000)
return false;
- /* Configuration request Retry Status */
- while (*l == 0xffff0001) {
+ /*
+ * Configuration Request Retry Status. Some root ports return the
+ * actual device ID instead of the synthetic ID (0xFFFF) required
+ * by the PCIe spec. Ignore the device ID and only check for
+ * (vendor id == 1).
+ */
+ while ((*l & 0xffff) == 0x0001) {
if (!crs_timeout)
return false;
@@ -1363,6 +1517,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
{
int ret;
+ pci_configure_device(dev);
+
device_initialize(&dev->dev);
dev->dev.release = pci_release_dev;
@@ -1751,13 +1907,14 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
char bus_addr[64];
char *fmt;
- b = pci_alloc_bus();
+ b = pci_alloc_bus(NULL);
if (!b)
return NULL;
b->sysdata = sysdata;
b->ops = ops;
b->number = b->busn_res.start = bus;
+ pci_bus_assign_domain_nr(b, parent);
b2 = pci_find_bus(pci_domain_nr(b), bus);
if (b2) {
/* If we already got to this bus through a different bridge, ignore it */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 80c2d014283d..90acb32c85b1 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,7 @@
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/ktime.h>
+#include <linux/mm.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
@@ -287,6 +288,25 @@ static void quirk_citrine(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
+/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
+static void quirk_extend_bar_to_page(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
+ r->end = PAGE_SIZE - 1;
+ r->start = 0;
+ r->flags |= IORESOURCE_UNSET;
+ dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
+ i, r);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
+
/*
* S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
* If it's needed, re-allocate the region.
@@ -2985,6 +3005,8 @@ DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
+ quirk_broken_intx_masking);
#ifdef CONFIG_ACPI
/*
@@ -3512,57 +3534,6 @@ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
-static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
-{
- if (!PCI_FUNC(dev->devfn))
- return pci_dev_get(dev);
-
- return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
-}
-
-static const struct pci_dev_dma_source {
- u16 vendor;
- u16 device;
- struct pci_dev *(*dma_source)(struct pci_dev *dev);
-} pci_dev_dma_source[] = {
- /*
- * https://bugzilla.redhat.com/show_bug.cgi?id=605888
- *
- * Some Ricoh devices use the function 0 source ID for DMA on
- * other functions of a multifunction device. The DMA devices
- * is therefore function 0, which will have implications of the
- * iommu grouping of these devices.
- */
- { PCI_VENDOR_ID_RICOH, 0xe822, pci_func_0_dma_source },
- { PCI_VENDOR_ID_RICOH, 0xe230, pci_func_0_dma_source },
- { PCI_VENDOR_ID_RICOH, 0xe832, pci_func_0_dma_source },
- { PCI_VENDOR_ID_RICOH, 0xe476, pci_func_0_dma_source },
- { 0 }
-};
-
-/*
- * IOMMUs with isolation capabilities need to be programmed with the
- * correct source ID of a device. In most cases, the source ID matches
- * the device doing the DMA, but sometimes hardware is broken and will
- * tag the DMA as being sourced from a different device. This function
- * allows that translation. Note that the reference count of the
- * returned device is incremented on all paths.
- */
-struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
-{
- const struct pci_dev_dma_source *i;
-
- for (i = pci_dev_dma_source; i->dma_source; i++) {
- if ((i->vendor == dev->vendor ||
- i->vendor == (u16)PCI_ANY_ID) &&
- (i->device == dev->device ||
- i->device == (u16)PCI_ANY_ID))
- return i->dma_source(dev);
- }
-
- return pci_dev_get(dev);
-}
-
/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
@@ -3582,6 +3553,11 @@ struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
* 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller
* 1002:4384 SBx00 PCI to PCI Bridge
* 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=81841#c15
+ *
+ * 1022:780f [AMD] FCH PCI Bridge
+ * 1022:7809 [AMD] FCH USB OHCI Controller
*/
static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
{
@@ -3664,6 +3640,23 @@ static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
return acs_flags & ~flags ? 0 : 1;
}
+static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * SV, TB, and UF are not relevant to multifunction endpoints.
+ *
+ * Multifunction devices are only required to implement RR, CR, and DT
+ * in their ACS capability if they support peer-to-peer transactions.
+ * Devices matching this quirk have been verified by the vendor to not
+ * perform peer-to-peer with other functions, allowing us to mask out
+ * these bits as if they were unimplemented in the ACS capability.
+ */
+ acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+ PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+
+ return acs_flags ? 0 : 1;
+}
+
static const struct pci_dev_acs_enabled {
u16 vendor;
u16 device;
@@ -3675,6 +3668,30 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ 0 }
};
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 827ad831f1dd..a81f413083e4 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -103,40 +103,6 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
return ret;
}
-/*
- * find the upstream PCIe-to-PCI bridge of a PCI device
- * if the device is PCIE, return NULL
- * if the device isn't connected to a PCIe bridge (that is its parent is a
- * legacy PCI bridge and the bridge is directly connected to bus 0), return its
- * parent
- */
-struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
-{
- struct pci_dev *tmp = NULL;
-
- if (pci_is_pcie(pdev))
- return NULL;
- while (1) {
- if (pci_is_root_bus(pdev->bus))
- break;
- pdev = pdev->bus->self;
- /* a p2p bridge */
- if (!pci_is_pcie(pdev)) {
- tmp = pdev;
- continue;
- }
- /* PCI device should connect to a PCIe bridge */
- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_PCI_BRIDGE) {
- /* Busted hardware? */
- WARN_ON_ONCE(1);
- return NULL;
- }
- return pdev;
- }
-
- return tmp;
-}
-
static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
{
struct pci_bus *child;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 6373985ad3f7..0482235eee92 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1652,7 +1652,7 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
struct pci_dev_resource *fail_res;
int retval;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH;
+ IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
again:
__pci_bus_size_bridges(parent, &add_list);
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index ce4588851274..ee16f0c5c47d 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -32,7 +32,6 @@
#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
-#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
/*
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index e50790e91f76..1de3f94aa7de 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -727,7 +727,7 @@ static int __init init_pci_ext_cap_err_perm(struct perm_bits *perm)
p_setd(perm, 0, ALL_VIRT, NO_WRITE);
/* Writable bits mask */
- mask = PCI_ERR_UNC_TRAIN | /* Training */
+ mask = PCI_ERR_UNC_UND | /* Undefined */
PCI_ERR_UNC_DLP | /* Data Link Protocol */
PCI_ERR_UNC_SURPDN | /* Surprise Down */
PCI_ERR_UNC_POISON_TLP | /* Poisoned TLP */
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 259ba2661543..017069a455d4 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -133,7 +133,7 @@ static void pcistub_device_release(struct kref *kref)
xen_pcibk_config_free_dyn_fields(dev);
xen_pcibk_config_free_dev(dev);
- dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+ pci_clear_dev_assigned(dev);
pci_dev_put(dev);
kfree(psdev);
@@ -413,7 +413,7 @@ static int pcistub_init_device(struct pci_dev *dev)
dev_dbg(&dev->dev, "reset device\n");
xen_pcibk_reset_device(dev);
- dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
+ pci_set_dev_assigned(dev);
return 0;
config_release:
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 975e1cc75edb..b8fdc57a7335 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -331,7 +331,7 @@ static inline void iounmap(void __iomem *addr)
#ifndef CONFIG_GENERIC_IOMAP
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
- return (void __iomem *) port;
+ return PCI_IOBASE + (port & IO_SPACE_LIMIT);
}
static inline void ioport_unmap(void __iomem *p)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 53b2acc38213..977e545a64c3 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -249,6 +249,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#define pgprot_writecombine pgprot_noncached
#endif
+#ifndef pgprot_device
+#define pgprot_device pgprot_noncached
+#endif
+
/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
diff --git a/include/linux/aer.h b/include/linux/aer.h
index c826d1c28f9c..4fef65e57023 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -7,6 +7,8 @@
#ifndef _AER_H_
#define _AER_H_
+#include <linux/types.h>
+
#define AER_NONFATAL 0
#define AER_FATAL 1
#define AER_CORRECTABLE 2
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 142ec544167c..2c5250222278 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -215,6 +215,11 @@ static inline int __deprecated check_region(resource_size_t s,
/* Wrappers for managed devices */
struct device;
+
+extern int devm_request_resource(struct device *dev, struct resource *root,
+ struct resource *new);
+extern void devm_release_resource(struct device *dev, struct resource *new);
+
#define devm_request_region(dev,start,n,name) \
__devm_request_region(dev, &ioport_resource, (start), (n), (name))
#define devm_request_mem_region(dev,start,n,name) \
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8103f32f6d87..44f4746d033b 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -29,7 +29,6 @@ struct msi_desc {
__u8 multi_cap : 3; /* log2 num of messages supported */
__u8 maskbit : 1; /* mask-pending bit supported ? */
__u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
- __u8 pos; /* Location of the msi capability */
__u16 entry_nr; /* specific enabled entry */
unsigned default_irq; /* default pre-assigned irq */
} msi_attrib;
@@ -47,8 +46,6 @@ struct msi_desc {
/* Last set MSI message */
struct msi_msg msg;
-
- struct kobject kobj;
};
/*
@@ -60,7 +57,6 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
void arch_teardown_msi_irq(unsigned int irq);
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void arch_teardown_msi_irqs(struct pci_dev *dev);
-int arch_msi_check_device(struct pci_dev* dev, int nvec, int type);
void arch_restore_msi_irqs(struct pci_dev *dev);
void default_teardown_msi_irqs(struct pci_dev *dev);
@@ -77,8 +73,6 @@ struct msi_chip {
int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev,
struct msi_desc *desc);
void (*teardown_irq)(struct msi_chip *chip, unsigned int irq);
- int (*check_device)(struct msi_chip *chip, struct pci_dev *dev,
- int nvec, int type);
};
#endif /* LINUX_MSI_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index fb7b7221e063..8cb14eb393d6 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -23,17 +23,6 @@ struct of_pci_range {
#define for_each_of_pci_range(parser, range) \
for (; of_pci_range_parser_one(parser, range);)
-static inline void of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res)
-{
- res->flags = range->flags;
- res->start = range->cpu_addr;
- res->end = range->cpu_addr + range->size - 1;
- res->parent = res->child = res->sibling = NULL;
- res->name = np->full_name;
-}
-
/* Translate a DMA address from device space to CPU space */
extern u64 of_translate_dma_address(struct device_node *dev,
const __be32 *in_addr);
@@ -55,7 +44,9 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
extern const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags);
+extern int pci_register_io_range(phys_addr_t addr, resource_size_t size);
extern unsigned long pci_address_to_pio(phys_addr_t addr);
+extern phys_addr_t pci_pio_to_address(unsigned long pio);
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
@@ -80,6 +71,11 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index,
return NULL;
}
+static inline phys_addr_t pci_pio_to_address(unsigned long pio)
+{
+ return 0;
+}
+
static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node)
{
@@ -138,6 +134,9 @@ extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
u64 *size, unsigned int *flags);
extern int of_pci_address_to_resource(struct device_node *dev, int bar,
struct resource *r);
+extern int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res);
#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
struct resource *r)
@@ -150,6 +149,12 @@ static inline const __be32 *of_get_pci_address(struct device_node *dev,
{
return NULL;
}
+static inline int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
#endif /* __OF_ADDRESS_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index dde3a4a0fa5d..1fd207e7a847 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -15,6 +15,7 @@ struct device_node *of_pci_find_child_device(struct device_node *parent,
int of_pci_get_devfn(struct device_node *np);
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
+int of_get_pci_domain_nr(struct device_node *node);
#else
static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
@@ -43,6 +44,18 @@ of_pci_parse_bus_range(struct device_node *node, struct resource *res)
{
return -EINVAL;
}
+
+static inline int
+of_get_pci_domain_nr(struct device_node *node)
+{
+ return -1;
+}
+#endif
+
+#if defined(CONFIG_OF_ADDRESS)
+int of_pci_get_host_bridge_resources(struct device_node *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base);
#endif
#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 96453f9bc8ba..5be8db45e368 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -45,7 +45,7 @@
* In the interest of not exposing interfaces to user-space unnecessarily,
* the following kernel-only defines are being added here.
*/
-#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn)
+#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
@@ -457,6 +457,9 @@ struct pci_bus {
unsigned char primary; /* number of primary bridge */
unsigned char max_bus_speed; /* enum pci_bus_speed */
unsigned char cur_bus_speed; /* enum pci_bus_speed */
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ int domain_nr;
+#endif
char name[48];
@@ -1103,6 +1106,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
resource_size_t),
void *alignf_data);
+
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+
static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
struct pci_bus_region region;
@@ -1288,12 +1294,32 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
*/
#ifdef CONFIG_PCI_DOMAINS
extern int pci_domains_supported;
+int pci_get_new_domain_nr(void);
#else
enum { pci_domains_supported = 0 };
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
+static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#endif /* CONFIG_PCI_DOMAINS */
+/*
+ * Generic implementation for PCI domain support. If your
+ * architecture does not need custom management of PCI
+ * domains then this implementation will be used
+ */
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+static inline int pci_domain_nr(struct pci_bus *bus)
+{
+ return bus->domain_nr;
+}
+void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent);
+#else
+static inline void pci_bus_assign_domain_nr(struct pci_bus *bus,
+ struct device *parent)
+{
+}
+#endif
+
/* some architectures require additional setup to direct VGA traffic */
typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
@@ -1402,6 +1428,7 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
+static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
@@ -1563,16 +1590,11 @@ enum pci_fixup_pass {
#ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
-struct pci_dev *pci_get_dma_source(struct pci_dev *dev);
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
void pci_dev_specific_enable_acs(struct pci_dev *dev);
#else
static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) { }
-static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
-{
- return pci_dev_get(dev);
-}
static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
u16 acs_flags)
{
@@ -1707,7 +1729,7 @@ bool pci_acs_path_enabled(struct pci_dev *start,
struct pci_dev *end, u16 acs_flags);
#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
-#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT)
+#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
/* Large Resource Data Type Tag Item Names */
#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
@@ -1834,15 +1856,17 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
int (*fn)(struct pci_dev *pdev,
u16 alias, void *data), void *data);
-/**
- * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device
- * @pdev: the PCI device
- *
- * if the device is PCIE, return NULL
- * if the device isn't connected to a PCIe bridge (that is its parent is a
- * legacy PCI bridge and the bridge is directly connected to bus 0), return its
- * parent
- */
-struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
-
+/* helper functions for operation of device flag */
+static inline void pci_set_dev_assigned(struct pci_dev *pdev)
+{
+ pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
+}
+static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
+{
+ pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+}
+static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
+{
+ return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
+}
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 5f2e559af6b0..2706ee9a4327 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -187,6 +187,4 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
return -ENODEV;
}
#endif
-
-void pci_configure_slot(struct pci_dev *dev);
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 6ed0bb73a864..da9e6f753196 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2245,6 +2245,8 @@
#define PCI_VENDOR_ID_MORETON 0x15aa
#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000
+#define PCI_VENDOR_ID_VMWARE 0x15ad
+
#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 47da53c27ffa..79abb9c71772 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -8,6 +8,7 @@
#include <linux/tracepoint.h>
#include <linux/edac.h>
#include <linux/ktime.h>
+#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/cper.h>
@@ -173,25 +174,34 @@ TRACE_EVENT(mc_event,
* u8 severity - error severity 0:NONFATAL 1:FATAL 2:CORRECTED
*/
-#define aer_correctable_errors \
- {BIT(0), "Receiver Error"}, \
- {BIT(6), "Bad TLP"}, \
- {BIT(7), "Bad DLLP"}, \
- {BIT(8), "RELAY_NUM Rollover"}, \
- {BIT(12), "Replay Timer Timeout"}, \
- {BIT(13), "Advisory Non-Fatal"}
-
-#define aer_uncorrectable_errors \
- {BIT(4), "Data Link Protocol"}, \
- {BIT(12), "Poisoned TLP"}, \
- {BIT(13), "Flow Control Protocol"}, \
- {BIT(14), "Completion Timeout"}, \
- {BIT(15), "Completer Abort"}, \
- {BIT(16), "Unexpected Completion"}, \
- {BIT(17), "Receiver Overflow"}, \
- {BIT(18), "Malformed TLP"}, \
- {BIT(19), "ECRC"}, \
- {BIT(20), "Unsupported Request"}
+#define aer_correctable_errors \
+ {PCI_ERR_COR_RCVR, "Receiver Error"}, \
+ {PCI_ERR_COR_BAD_TLP, "Bad TLP"}, \
+ {PCI_ERR_COR_BAD_DLLP, "Bad DLLP"}, \
+ {PCI_ERR_COR_REP_ROLL, "RELAY_NUM Rollover"}, \
+ {PCI_ERR_COR_REP_TIMER, "Replay Timer Timeout"}, \
+ {PCI_ERR_COR_ADV_NFAT, "Advisory Non-Fatal Error"}, \
+ {PCI_ERR_COR_INTERNAL, "Corrected Internal Error"}, \
+ {PCI_ERR_COR_LOG_OVER, "Header Log Overflow"}
+
+#define aer_uncorrectable_errors \
+ {PCI_ERR_UNC_UND, "Undefined"}, \
+ {PCI_ERR_UNC_DLP, "Data Link Protocol Error"}, \
+ {PCI_ERR_UNC_SURPDN, "Surprise Down Error"}, \
+ {PCI_ERR_UNC_POISON_TLP,"Poisoned TLP"}, \
+ {PCI_ERR_UNC_FCP, "Flow Control Protocol Error"}, \
+ {PCI_ERR_UNC_COMP_TIME, "Completion Timeout"}, \
+ {PCI_ERR_UNC_COMP_ABORT,"Completer Abort"}, \
+ {PCI_ERR_UNC_UNX_COMP, "Unexpected Completion"}, \
+ {PCI_ERR_UNC_RX_OVER, "Receiver Overflow"}, \
+ {PCI_ERR_UNC_MALF_TLP, "Malformed TLP"}, \
+ {PCI_ERR_UNC_ECRC, "ECRC Error"}, \
+ {PCI_ERR_UNC_UNSUP, "Unsupported Request Error"}, \
+ {PCI_ERR_UNC_ACSV, "ACS Violation"}, \
+ {PCI_ERR_UNC_INTN, "Uncorrectable Internal Error"},\
+ {PCI_ERR_UNC_MCBTLP, "MC Blocked TLP"}, \
+ {PCI_ERR_UNC_ATOMEG, "AtomicOp Egress Blocked"}, \
+ {PCI_ERR_UNC_TLPPRE, "TLP Prefix Blocked Error"}
TRACE_EVENT(aer_event,
TP_PROTO(const char *dev_name,
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 30db069bce62..4a1d0cc38ff2 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -552,6 +552,7 @@
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
#define PCI_EXP_RTSTA 32 /* Root Status */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
@@ -630,7 +631,7 @@
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
-#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */
+#define PCI_ERR_UNC_UND 0x00000001 /* Undefined */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */
diff --git a/kernel/resource.c b/kernel/resource.c
index 60c5a3856ab7..46322019ab7d 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1245,6 +1245,76 @@ int release_mem_region_adjustable(struct resource *parent,
/*
* Managed region resource
*/
+static void devm_resource_release(struct device *dev, void *ptr)
+{
+ struct resource **r = ptr;
+
+ release_resource(*r);
+}
+
+/**
+ * devm_request_resource() - request and reserve an I/O or memory resource
+ * @dev: device for which to request the resource
+ * @root: root of the resource tree from which to request the resource
+ * @new: descriptor of the resource to request
+ *
+ * This is a device-managed version of request_resource(). There is usually
+ * no need to release resources requested by this function explicitly since
+ * that will be taken care of when the device is unbound from its driver.
+ * If for some reason the resource needs to be released explicitly, because
+ * of ordering issues for example, drivers must call devm_release_resource()
+ * rather than the regular release_resource().
+ *
+ * When a conflict is detected between any existing resources and the newly
+ * requested resource, an error message will be printed.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int devm_request_resource(struct device *dev, struct resource *root,
+ struct resource *new)
+{
+ struct resource *conflict, **ptr;
+
+ ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ *ptr = new;
+
+ conflict = request_resource_conflict(root, new);
+ if (conflict) {
+ dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
+ new, conflict->name, conflict);
+ devres_free(ptr);
+ return -EBUSY;
+ }
+
+ devres_add(dev, ptr);
+ return 0;
+}
+EXPORT_SYMBOL(devm_request_resource);
+
+static int devm_resource_match(struct device *dev, void *res, void *data)
+{
+ struct resource **ptr = res;
+
+ return *ptr == data;
+}
+
+/**
+ * devm_release_resource() - release a previously requested resource
+ * @dev: device for which to release the resource
+ * @new: descriptor of the resource to release
+ *
+ * Releases a resource previously requested using devm_request_resource().
+ */
+void devm_release_resource(struct device *dev, struct resource *new)
+{
+ WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
+ new));
+}
+EXPORT_SYMBOL(devm_release_resource);
+
struct region_devres {
struct resource *parent;
resource_size_t start;
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 5819a2708d7e..e05000e200d2 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -302,7 +302,7 @@ static void kvm_free_assigned_device(struct kvm *kvm,
else
pci_restore_state(assigned_dev->dev);
- assigned_dev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+ pci_clear_dev_assigned(assigned_dev->dev);
pci_release_regions(assigned_dev->dev);
pci_disable_device(assigned_dev->dev);
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 714b94932312..e723bb91aa34 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -203,7 +203,7 @@ int kvm_assign_device(struct kvm *kvm,
goto out_unmap;
}
- pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
+ pci_set_dev_assigned(pdev);
dev_info(&pdev->dev, "kvm assign device\n");
@@ -229,7 +229,7 @@ int kvm_deassign_device(struct kvm *kvm,
iommu_detach_device(domain, &pdev->dev);
- pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+ pci_clear_dev_assigned(pdev);
dev_info(&pdev->dev, "kvm deassign device\n");
OpenPOWER on IntegriCloud