summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/rtl8169.c127
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/pci_tegra.c1143
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/as3722.c264
5 files changed, 1502 insertions, 34 deletions
diff --git a/drivers/net/rtl8169.c b/drivers/net/rtl8169.c
index c3ce17516c..cea6701203 100644
--- a/drivers/net/rtl8169.c
+++ b/drivers/net/rtl8169.c
@@ -41,6 +41,7 @@
* Modified to use le32_to_cpu and cpu_to_le32 properly
*/
#include <common.h>
+#include <errno.h>
#include <malloc.h>
#include <net.h>
#include <netdev.h>
@@ -79,7 +80,11 @@ static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
#define NUM_TX_DESC 1 /* Number of Tx descriptor registers */
-#define NUM_RX_DESC 4 /* Number of Rx descriptor registers */
+#ifdef CONFIG_SYS_RX_ETH_BUFFER
+ #define NUM_RX_DESC CONFIG_SYS_RX_ETH_BUFFER
+#else
+ #define NUM_RX_DESC 4 /* Number of Rx descriptor registers */
+#endif
#define RX_BUF_SIZE 1536 /* Rx Buffer size */
#define RX_BUF_LEN 8192
@@ -248,6 +253,7 @@ static struct {
{"RTL-8168b/8111sb", 0x38, 0xff7e1880,},
{"RTL-8168d/8111d", 0x28, 0xff7e1880,},
{"RTL-8168evl/8111evl", 0x2e, 0xff7e1880,},
+ {"RTL-8168/8111g", 0x4c, 0xff7e1880,},
{"RTL-8101e", 0x34, 0xff7e1880,},
{"RTL-8100e", 0x32, 0xff7e1880,},
};
@@ -273,23 +279,40 @@ struct RxDesc {
u32 buf_Haddr;
};
-/* Define the TX Descriptor */
-static u8 tx_ring[NUM_TX_DESC * sizeof(struct TxDesc) + 256];
-/* __attribute__ ((aligned(256))); */
+#define RTL8169_DESC_SIZE 16
-/* Create a static buffer of size RX_BUF_SZ for each
-TX Descriptor. All descriptors point to a
-part of this buffer */
-static unsigned char txb[NUM_TX_DESC * RX_BUF_SIZE];
+#if ARCH_DMA_MINALIGN > 256
+# define RTL8169_ALIGN ARCH_DMA_MINALIGN
+#else
+# define RTL8169_ALIGN 256
+#endif
-/* Define the RX Descriptor */
-static u8 rx_ring[NUM_RX_DESC * sizeof(struct TxDesc) + 256];
- /* __attribute__ ((aligned(256))); */
+/*
+ * Warn if the cache-line size is larger than the descriptor size. In such
+ * cases the driver will likely fail because the CPU needs to flush the cache
+ * when requeuing RX buffers, therefore descriptors written by the hardware
+ * may be discarded.
+ *
+ * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause
+ * the driver to allocate descriptors from a pool of non-cached memory.
+ */
+#if RTL8169_DESC_SIZE < ARCH_DMA_MINALIGN
+#if !defined(CONFIG_SYS_NONCACHED_MEMORY) && !defined(CONFIG_SYS_DCACHE_OFF)
+#warning cache-line size is larger than descriptor size
+#endif
+#endif
-/* Create a static buffer of size RX_BUF_SZ for each
-RX Descriptor All descriptors point to a
-part of this buffer */
-static unsigned char rxb[NUM_RX_DESC * RX_BUF_SIZE];
+/*
+ * Create a static buffer of size RX_BUF_SZ for each TX Descriptor. All
+ * descriptors point to a part of this buffer.
+ */
+DEFINE_ALIGN_BUFFER(u8, txb, NUM_TX_DESC * RX_BUF_SIZE, RTL8169_ALIGN);
+
+/*
+ * Create a static buffer of size RX_BUF_SZ for each RX Descriptor. All
+ * descriptors point to a part of this buffer.
+ */
+DEFINE_ALIGN_BUFFER(u8, rxb, NUM_RX_DESC * RX_BUF_SIZE, RTL8169_ALIGN);
struct rtl8169_private {
void *mmio_addr; /* memory map physical address */
@@ -297,8 +320,6 @@ struct rtl8169_private {
unsigned long cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
unsigned long cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
unsigned long dirty_tx;
- unsigned char *TxDescArrays; /* Index of Tx Descriptor buffer */
- unsigned char *RxDescArrays; /* Index of Rx Descriptor buffer */
struct TxDesc *TxDescArray; /* Index of 256-alignment Tx Descriptor buffer */
struct RxDesc *RxDescArray; /* Index of 256-alignment Rx Descriptor buffer */
unsigned char *RxBufferRings; /* Index of Rx Buffer */
@@ -398,34 +419,71 @@ match:
}
/*
+ * TX and RX descriptors are 16 bytes. This causes problems with the cache
+ * maintenance on CPUs where the cache-line size exceeds the size of these
+ * descriptors. What will happen is that when the driver receives a packet
+ * it will be immediately requeued for the hardware to reuse. The CPU will
+ * therefore need to flush the cache-line containing the descriptor, which
+ * will cause all other descriptors in the same cache-line to be flushed
+ * along with it. If one of those descriptors had been written to by the
+ * device those changes (and the associated packet) will be lost.
+ *
+ * To work around this, we make use of non-cached memory if available. If
+ * descriptors are mapped uncached there's no need to manually flush them
+ * or invalidate them.
+ *
+ * Note that this only applies to descriptors. The packet data buffers do
+ * not have the same constraints since they are 1536 bytes large, so they
+ * are unlikely to share cache-lines.
+ */
+static void *rtl_alloc_descs(unsigned int num)
+{
+ size_t size = num * RTL8169_DESC_SIZE;
+
+#ifdef CONFIG_SYS_NONCACHED_MEMORY
+ return (void *)noncached_alloc(size, RTL8169_ALIGN);
+#else
+ return memalign(RTL8169_ALIGN, size);
+#endif
+}
+
+/*
* Cache maintenance functions. These are simple wrappers around the more
* general purpose flush_cache() and invalidate_dcache_range() functions.
*/
static void rtl_inval_rx_desc(struct RxDesc *desc)
{
+#ifndef CONFIG_SYS_NONCACHED_MEMORY
unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
unsigned long end = ALIGN(start + sizeof(*desc), ARCH_DMA_MINALIGN);
invalidate_dcache_range(start, end);
+#endif
}
static void rtl_flush_rx_desc(struct RxDesc *desc)
{
+#ifndef CONFIG_SYS_NONCACHED_MEMORY
flush_cache((unsigned long)desc, sizeof(*desc));
+#endif
}
static void rtl_inval_tx_desc(struct TxDesc *desc)
{
+#ifndef CONFIG_SYS_NONCACHED_MEMORY
unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
unsigned long end = ALIGN(start + sizeof(*desc), ARCH_DMA_MINALIGN);
invalidate_dcache_range(start, end);
+#endif
}
static void rtl_flush_tx_desc(struct TxDesc *desc)
{
+#ifndef CONFIG_SYS_NONCACHED_MEMORY
flush_cache((unsigned long)desc, sizeof(*desc));
+#endif
}
static void rtl_inval_buffer(void *buf, size_t size)
@@ -707,16 +765,6 @@ static int rtl_reset(struct eth_device *dev, bd_t *bis)
printf ("%s\n", __FUNCTION__);
#endif
- tpc->TxDescArrays = tx_ring;
- /* Tx Desscriptor needs 256 bytes alignment; */
- tpc->TxDescArray = (struct TxDesc *) ((unsigned long)(tpc->TxDescArrays +
- 255) & ~255);
-
- tpc->RxDescArrays = rx_ring;
- /* Rx Desscriptor needs 256 bytes alignment; */
- tpc->RxDescArray = (struct RxDesc *) ((unsigned long)(tpc->RxDescArrays +
- 255) & ~255);
-
rtl8169_init_ring(dev);
rtl8169_hw_start(dev);
/* Construct a perfect filter frame with the mac address as first match
@@ -758,10 +806,6 @@ static void rtl_halt(struct eth_device *dev)
RTL_W32(RxMissed, 0);
- tpc->TxDescArrays = NULL;
- tpc->RxDescArrays = NULL;
- tpc->TxDescArray = NULL;
- tpc->RxDescArray = NULL;
for (i = 0; i < NUM_RX_DESC; i++) {
tpc->RxBufferRing[i] = NULL;
}
@@ -906,7 +950,16 @@ static int rtl_init(struct eth_device *dev, bd_t *bis)
#endif
}
- return 1;
+
+ tpc->RxDescArray = rtl_alloc_descs(NUM_RX_DESC);
+ if (!tpc->RxDescArray)
+ return -ENOMEM;
+
+ tpc->TxDescArray = rtl_alloc_descs(NUM_TX_DESC);
+ if (!tpc->TxDescArray)
+ return -ENOMEM;
+
+ return 0;
}
int rtl8169_initialize(bd_t *bis)
@@ -920,6 +973,7 @@ int rtl8169_initialize(bd_t *bis)
while(1){
unsigned int region;
u16 device;
+ int err;
/* Find RTL8169 */
if ((devno = pci_find_devices(supported, idx++)) < 0)
@@ -958,9 +1012,14 @@ int rtl8169_initialize(bd_t *bis)
dev->send = rtl_send;
dev->recv = rtl_recv;
- eth_register (dev);
+ err = rtl_init(dev, bis);
+ if (err < 0) {
+ printf(pr_fmt("failed to initialize card: %d\n"), err);
+ free(dev);
+ continue;
+ }
- rtl_init(dev, bis);
+ eth_register (dev);
card_number++;
}
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 85e82bdb8c..50b7be53ca 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_FTPCI100) += pci_ftpci100.o
obj-$(CONFIG_SH4_PCI) += pci_sh4.o
obj-$(CONFIG_SH7751_PCI) +=pci_sh7751.o
obj-$(CONFIG_SH7780_PCI) +=pci_sh7780.o
+obj-$(CONFIG_PCI_TEGRA) += pci_tegra.o
obj-$(CONFIG_TSI108_PCI) += tsi108_pci.o
obj-$(CONFIG_WINBOND_83C553) += w83c553f.o
obj-$(CONFIG_PCIE_LAYERSCAPE) += pcie_layerscape.o
diff --git a/drivers/pci/pci_tegra.c b/drivers/pci/pci_tegra.c
new file mode 100644
index 0000000000..a03ad5ff1f
--- /dev/null
+++ b/drivers/pci/pci_tegra.c
@@ -0,0 +1,1143 @@
+/*
+ * Copyright (c) 2010, CompuLab, Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Based on NVIDIA PCIe driver
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * Copyright (c) 2013-2014, NVIDIA Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#define DEBUG
+#define pr_fmt(fmt) "tegra-pcie: " fmt
+
+#include <common.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <malloc.h>
+#include <pci.h>
+
+#include <asm/io.h>
+#include <asm/gpio.h>
+
+#include <asm/arch/clock.h>
+#include <asm/arch/powergate.h>
+#include <asm/arch-tegra/xusb-padctl.h>
+
+#include <linux/list.h>
+
+#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define AFI_AXI_BAR0_SZ 0x00
+#define AFI_AXI_BAR1_SZ 0x04
+#define AFI_AXI_BAR2_SZ 0x08
+#define AFI_AXI_BAR3_SZ 0x0c
+#define AFI_AXI_BAR4_SZ 0x10
+#define AFI_AXI_BAR5_SZ 0x14
+
+#define AFI_AXI_BAR0_START 0x18
+#define AFI_AXI_BAR1_START 0x1c
+#define AFI_AXI_BAR2_START 0x20
+#define AFI_AXI_BAR3_START 0x24
+#define AFI_AXI_BAR4_START 0x28
+#define AFI_AXI_BAR5_START 0x2c
+
+#define AFI_FPCI_BAR0 0x30
+#define AFI_FPCI_BAR1 0x34
+#define AFI_FPCI_BAR2 0x38
+#define AFI_FPCI_BAR3 0x3c
+#define AFI_FPCI_BAR4 0x40
+#define AFI_FPCI_BAR5 0x44
+
+#define AFI_CACHE_BAR0_SZ 0x48
+#define AFI_CACHE_BAR0_ST 0x4c
+#define AFI_CACHE_BAR1_SZ 0x50
+#define AFI_CACHE_BAR1_ST 0x54
+
+#define AFI_MSI_BAR_SZ 0x60
+#define AFI_MSI_FPCI_BAR_ST 0x64
+#define AFI_MSI_AXI_BAR_ST 0x68
+
+#define AFI_CONFIGURATION 0xac
+#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
+
+#define AFI_FPCI_ERROR_MASKS 0xb0
+
+#define AFI_INTR_MASK 0xb4
+#define AFI_INTR_MASK_INT_MASK (1 << 0)
+#define AFI_INTR_MASK_MSI_MASK (1 << 8)
+
+#define AFI_SM_INTR_ENABLE 0xc4
+#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
+#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
+#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
+#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
+#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
+#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
+#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
+#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
+
+#define AFI_AFI_INTR_ENABLE 0xc8
+#define AFI_INTR_EN_INI_SLVERR (1 << 0)
+#define AFI_INTR_EN_INI_DECERR (1 << 1)
+#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
+#define AFI_INTR_EN_TGT_DECERR (1 << 3)
+#define AFI_INTR_EN_TGT_WRERR (1 << 4)
+#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
+#define AFI_INTR_EN_AXI_DECERR (1 << 6)
+#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
+#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
+
+#define AFI_PCIE_CONFIG 0x0f8
+#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
+#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
+
+#define AFI_FUSE 0x104
+#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
+
+#define AFI_PEX0_CTRL 0x110
+#define AFI_PEX1_CTRL 0x118
+#define AFI_PEX2_CTRL 0x128
+#define AFI_PEX_CTRL_RST (1 << 0)
+#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
+#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
+#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
+
+#define AFI_PLLE_CONTROL 0x160
+#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
+#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
+
+#define AFI_PEXBIAS_CTRL_0 0x168
+
+#define PADS_CTL_SEL 0x0000009C
+
+#define PADS_CTL 0x000000A0
+#define PADS_CTL_IDDQ_1L (1 << 0)
+#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
+#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
+
+#define PADS_PLL_CTL_TEGRA20 0x000000B8
+#define PADS_PLL_CTL_TEGRA30 0x000000B4
+#define PADS_PLL_CTL_RST_B4SM (0x1 << 1)
+#define PADS_PLL_CTL_LOCKDET (0x1 << 8)
+#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
+#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0x0 << 16)
+#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (0x1 << 16)
+#define PADS_PLL_CTL_REFCLK_EXTERNAL (0x2 << 16)
+#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
+#define PADS_PLL_CTL_TXCLKREF_DIV10 (0x0 << 20)
+#define PADS_PLL_CTL_TXCLKREF_DIV5 (0x1 << 20)
+#define PADS_PLL_CTL_TXCLKREF_BUF_EN (0x1 << 22)
+
+#define PADS_REFCLK_CFG0 0x000000C8
+#define PADS_REFCLK_CFG1 0x000000CC
+
+/*
+ * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
+ * entries, one entry per PCIe port. These field definitions and desired
+ * values aren't in the TRM, but do come from NVIDIA.
+ */
+#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
+#define PADS_REFCLK_CFG_E_TERM_SHIFT 7
+#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
+#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
+
+/* Default value provided by HW engineering is 0xfa5c */
+#define PADS_REFCLK_CFG_VALUE \
+ ( \
+ (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
+ (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
+ (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
+ (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
+ )
+
+#define RP_VEND_XP 0x00000F00
+#define RP_VEND_XP_DL_UP (1 << 30)
+
+#define RP_PRIV_MISC 0x00000FE0
+#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
+#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
+
+#define RP_LINK_CONTROL_STATUS 0x00000090
+#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
+#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
+
+struct tegra_pcie;
+
+struct tegra_pcie_port {
+ struct tegra_pcie *pcie;
+
+ struct fdt_resource regs;
+ unsigned int num_lanes;
+ unsigned int index;
+
+ struct list_head list;
+};
+
+struct tegra_pcie_soc {
+ unsigned int num_ports;
+ unsigned long pads_pll_ctl;
+ unsigned long tx_ref_sel;
+ bool has_pex_clkreq_en;
+ bool has_pex_bias_ctrl;
+ bool has_cml_clk;
+ bool has_gen2;
+};
+
+struct tegra_pcie {
+ struct pci_controller hose;
+
+ struct fdt_resource pads;
+ struct fdt_resource afi;
+ struct fdt_resource cs;
+
+ struct fdt_resource prefetch;
+ struct fdt_resource mem;
+ struct fdt_resource io;
+
+ struct list_head ports;
+ unsigned long xbar;
+
+ const struct tegra_pcie_soc *soc;
+ struct tegra_xusb_phy *phy;
+};
+
+static inline struct tegra_pcie *to_tegra_pcie(struct pci_controller *hose)
+{
+ return container_of(hose, struct tegra_pcie, hose);
+}
+
+static void afi_writel(struct tegra_pcie *pcie, unsigned long value,
+ unsigned long offset)
+{
+ writel(value, pcie->afi.start + offset);
+}
+
+static unsigned long afi_readl(struct tegra_pcie *pcie, unsigned long offset)
+{
+ return readl(pcie->afi.start + offset);
+}
+
+static void pads_writel(struct tegra_pcie *pcie, unsigned long value,
+ unsigned long offset)
+{
+ writel(value, pcie->pads.start + offset);
+}
+
+static unsigned long pads_readl(struct tegra_pcie *pcie, unsigned long offset)
+{
+ return readl(pcie->pads.start + offset);
+}
+
+static unsigned long rp_readl(struct tegra_pcie_port *port,
+ unsigned long offset)
+{
+ return readl(port->regs.start + offset);
+}
+
+static void rp_writel(struct tegra_pcie_port *port, unsigned long value,
+ unsigned long offset)
+{
+ writel(value, port->regs.start + offset);
+}
+
+static unsigned long tegra_pcie_conf_offset(pci_dev_t bdf, int where)
+{
+ return ((where & 0xf00) << 16) | (PCI_BUS(bdf) << 16) |
+ (PCI_DEV(bdf) << 11) | (PCI_FUNC(bdf) << 8) |
+ (where & 0xfc);
+}
+
+static int tegra_pcie_conf_address(struct tegra_pcie *pcie, pci_dev_t bdf,
+ int where, unsigned long *address)
+{
+ unsigned int bus = PCI_BUS(bdf);
+
+ if (bus == 0) {
+ unsigned int dev = PCI_DEV(bdf);
+ struct tegra_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ if (port->index + 1 == dev) {
+ *address = port->regs.start + (where & ~3);
+ return 0;
+ }
+ }
+ } else {
+ *address = pcie->cs.start + tegra_pcie_conf_offset(bdf, where);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int tegra_pcie_read_conf(struct pci_controller *hose, pci_dev_t bdf,
+ int where, u32 *value)
+{
+ struct tegra_pcie *pcie = to_tegra_pcie(hose);
+ unsigned long address;
+ int err;
+
+ err = tegra_pcie_conf_address(pcie, bdf, where, &address);
+ if (err < 0) {
+ *value = 0xffffffff;
+ return 1;
+ }
+
+ *value = readl(address);
+
+ /* fixup root port class */
+ if (PCI_BUS(bdf) == 0) {
+ if (where == PCI_CLASS_REVISION) {
+ *value &= ~0x00ff0000;
+ *value |= PCI_CLASS_BRIDGE_PCI << 16;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_write_conf(struct pci_controller *hose, pci_dev_t bdf,
+ int where, u32 value)
+{
+ struct tegra_pcie *pcie = to_tegra_pcie(hose);
+ unsigned long address;
+ int err;
+
+ err = tegra_pcie_conf_address(pcie, bdf, where, &address);
+ if (err < 0)
+ return 1;
+
+ writel(value, address);
+
+ return 0;
+}
+
+static int tegra_pcie_port_parse_dt(const void *fdt, int node,
+ struct tegra_pcie_port *port)
+{
+ const u32 *addr;
+ int len;
+
+ addr = fdt_getprop(fdt, node, "assigned-addresses", &len);
+ if (!addr) {
+ error("property \"assigned-addresses\" not found");
+ return -FDT_ERR_NOTFOUND;
+ }
+
+ port->regs.start = fdt32_to_cpu(addr[2]);
+ port->regs.end = port->regs.start + fdt32_to_cpu(addr[4]);
+
+ return 0;
+}
+
+static int tegra_pcie_get_xbar_config(const void *fdt, int node, u32 lanes,
+ unsigned long *xbar)
+{
+ enum fdt_compat_id id = fdtdec_lookup(fdt, node);
+
+ switch (id) {
+ case COMPAT_NVIDIA_TEGRA20_PCIE:
+ switch (lanes) {
+ case 0x00000004:
+ debug("single-mode configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
+ return 0;
+
+ case 0x00000202:
+ debug("dual-mode configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
+ return 0;
+ }
+ break;
+
+ case COMPAT_NVIDIA_TEGRA30_PCIE:
+ switch (lanes) {
+ case 0x00000204:
+ debug("4x1, 2x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
+ return 0;
+
+ case 0x00020202:
+ debug("2x3 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
+ return 0;
+
+ case 0x00010104:
+ debug("4x1, 1x2 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
+ return 0;
+ }
+ break;
+
+ case COMPAT_NVIDIA_TEGRA124_PCIE:
+ switch (lanes) {
+ case 0x0000104:
+ debug("4x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
+ return 0;
+
+ case 0x0000102:
+ debug("2x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return -FDT_ERR_NOTFOUND;
+}
+
+static int tegra_pcie_parse_dt_ranges(const void *fdt, int node,
+ struct tegra_pcie *pcie)
+{
+ const u32 *ptr, *end;
+ int len;
+
+ ptr = fdt_getprop(fdt, node, "ranges", &len);
+ if (!ptr) {
+ error("missing \"ranges\" property");
+ return -FDT_ERR_NOTFOUND;
+ }
+
+ end = ptr + len / 4;
+
+ while (ptr < end) {
+ struct fdt_resource *res = NULL;
+ u32 space = fdt32_to_cpu(*ptr);
+
+ switch ((space >> 24) & 0x3) {
+ case 0x01:
+ res = &pcie->io;
+ break;
+
+ case 0x02: /* 32 bit */
+ case 0x03: /* 64 bit */
+ if (space & (1 << 30))
+ res = &pcie->prefetch;
+ else
+ res = &pcie->mem;
+
+ break;
+ }
+
+ if (res) {
+ res->start = fdt32_to_cpu(ptr[3]);
+ res->end = res->start + fdt32_to_cpu(ptr[5]);
+ }
+
+ ptr += 3 + 1 + 2;
+ }
+
+ debug("PCI regions:\n");
+ debug(" I/O: %#x-%#x\n", pcie->io.start, pcie->io.end);
+ debug(" non-prefetchable memory: %#x-%#x\n", pcie->mem.start,
+ pcie->mem.end);
+ debug(" prefetchable memory: %#x-%#x\n", pcie->prefetch.start,
+ pcie->prefetch.end);
+
+ return 0;
+}
+
+static int tegra_pcie_parse_port_info(const void *fdt, int node,
+ unsigned int *index,
+ unsigned int *lanes)
+{
+ pci_dev_t bdf;
+ int err;
+
+ err = fdtdec_get_int(fdt, node, "nvidia,num-lanes", 0);
+ if (err < 0) {
+ error("failed to parse \"nvidia,num-lanes\" property");
+ return err;
+ }
+
+ *lanes = err;
+
+ err = fdtdec_pci_get_bdf(fdt, node, &bdf);
+ if (err < 0) {
+ error("failed to parse \"reg\" property");
+ return err;
+ }
+
+ *index = PCI_DEV(bdf) - 1;
+
+ return 0;
+}
+
+static int tegra_pcie_parse_dt(const void *fdt, int node,
+ struct tegra_pcie *pcie)
+{
+ int err, subnode;
+ u32 lanes = 0;
+
+ err = fdt_get_named_resource(fdt, node, "reg", "reg-names", "pads",
+ &pcie->pads);
+ if (err < 0) {
+ error("resource \"pads\" not found");
+ return err;
+ }
+
+ err = fdt_get_named_resource(fdt, node, "reg", "reg-names", "afi",
+ &pcie->afi);
+ if (err < 0) {
+ error("resource \"afi\" not found");
+ return err;
+ }
+
+ err = fdt_get_named_resource(fdt, node, "reg", "reg-names", "cs",
+ &pcie->cs);
+ if (err < 0) {
+ error("resource \"cs\" not found");
+ return err;
+ }
+
+ pcie->phy = tegra_xusb_phy_get(TEGRA_XUSB_PADCTL_PCIE);
+ if (pcie->phy) {
+ err = tegra_xusb_phy_prepare(pcie->phy);
+ if (err < 0) {
+ error("failed to prepare PHY: %d", err);
+ return err;
+ }
+ }
+
+ err = tegra_pcie_parse_dt_ranges(fdt, node, pcie);
+ if (err < 0) {
+ error("failed to parse \"ranges\" property");
+ return err;
+ }
+
+ fdt_for_each_subnode(fdt, subnode, node) {
+ unsigned int index = 0, num_lanes = 0;
+ struct tegra_pcie_port *port;
+
+ err = tegra_pcie_parse_port_info(fdt, subnode, &index,
+ &num_lanes);
+ if (err < 0) {
+ error("failed to obtain root port info");
+ continue;
+ }
+
+ lanes |= num_lanes << (index << 3);
+
+ if (!fdtdec_get_is_enabled(fdt, subnode))
+ continue;
+
+ port = malloc(sizeof(*port));
+ if (!port)
+ continue;
+
+ memset(port, 0, sizeof(*port));
+ port->num_lanes = num_lanes;
+ port->index = index;
+
+ err = tegra_pcie_port_parse_dt(fdt, subnode, port);
+ if (err < 0) {
+ free(port);
+ continue;
+ }
+
+ list_add_tail(&port->list, &pcie->ports);
+ port->pcie = pcie;
+ }
+
+ err = tegra_pcie_get_xbar_config(fdt, node, lanes, &pcie->xbar);
+ if (err < 0) {
+ error("invalid lane configuration");
+ return err;
+ }
+
+ return 0;
+}
+
+int __weak tegra_pcie_board_init(void)
+{
+ return 0;
+}
+
+static int tegra_pcie_power_on(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ unsigned long value;
+ int err;
+
+ /* reset PCIEXCLK logic, AFI controller and PCIe controller */
+ reset_set_enable(PERIPH_ID_PCIEXCLK, 1);
+ reset_set_enable(PERIPH_ID_AFI, 1);
+ reset_set_enable(PERIPH_ID_PCIE, 1);
+
+ err = tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+ if (err < 0) {
+ error("failed to power off PCIe partition: %d", err);
+ return err;
+ }
+
+ tegra_pcie_board_init();
+
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
+ PERIPH_ID_PCIE);
+ if (err < 0) {
+ error("failed to power up PCIe partition: %d", err);
+ return err;
+ }
+
+ /* take AFI controller out of reset */
+ reset_set_enable(PERIPH_ID_AFI, 0);
+
+ /* enable AFI clock */
+ clock_enable(PERIPH_ID_AFI);
+
+ if (soc->has_cml_clk) {
+ /* enable CML clock */
+ value = readl(NV_PA_CLK_RST_BASE + 0x48c);
+ value |= (1 << 0);
+ value &= ~(1 << 1);
+ writel(value, NV_PA_CLK_RST_BASE + 0x48c);
+ }
+
+ err = tegra_plle_enable();
+ if (err < 0) {
+ error("failed to enable PLLE: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ unsigned long start = get_timer(0);
+ u32 value;
+
+ while (get_timer(start) < timeout) {
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ if (value & PADS_PLL_CTL_LOCKDET)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ u32 value;
+ int err;
+
+ /* initialize internal PHY, enable up to 16 PCIe lanes */
+ pads_writel(pcie, 0, PADS_CTL_SEL);
+
+ /* override IDDQ to 1 on all 4 lanes */
+ value = pads_readl(pcie, PADS_CTL);
+ value |= PADS_CTL_IDDQ_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ /*
+ * Set up PHY PLL inputs select PLLE output as refclock, set TX
+ * ref sel to div10 (not div5).
+ */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
+ value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ /* reset PLL */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value &= ~PADS_PLL_CTL_RST_B4SM;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ udelay(20);
+
+ /* take PLL out of reset */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value |= PADS_PLL_CTL_RST_B4SM;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ /* configure the reference clock driver */
+ value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
+ pads_writel(pcie, value, PADS_REFCLK_CFG0);
+
+ if (soc->num_ports > 2)
+ pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
+
+ /* wait for the PLL to lock */
+ err = tegra_pcie_pll_wait(pcie, 500);
+ if (err < 0) {
+ error("PLL failed to lock: %d", err);
+ return err;
+ }
+
+ /* turn off IDDQ override */
+ value = pads_readl(pcie, PADS_CTL);
+ value &= ~PADS_CTL_IDDQ_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ /* enable TX/RX data */
+ value = pads_readl(pcie, PADS_CTL);
+ value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ return 0;
+}
+
+static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ struct tegra_pcie_port *port;
+ u32 value;
+ int err;
+
+ if (pcie->phy) {
+ value = afi_readl(pcie, AFI_PLLE_CONTROL);
+ value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
+ value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
+ afi_writel(pcie, value, AFI_PLLE_CONTROL);
+ }
+
+ if (soc->has_pex_bias_ctrl)
+ afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
+
+ value = afi_readl(pcie, AFI_PCIE_CONFIG);
+ value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
+ value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+
+ afi_writel(pcie, value, AFI_PCIE_CONFIG);
+
+ value = afi_readl(pcie, AFI_FUSE);
+
+ if (soc->has_gen2)
+ value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
+ else
+ value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
+
+ afi_writel(pcie, value, AFI_FUSE);
+
+ if (pcie->phy)
+ err = tegra_xusb_phy_enable(pcie->phy);
+ else
+ err = tegra_pcie_phy_enable(pcie);
+
+ if (err < 0) {
+ error("failed to power on PHY: %d\n", err);
+ return err;
+ }
+
+ /* take the PCIEXCLK logic out of reset */
+ reset_set_enable(PERIPH_ID_PCIEXCLK, 0);
+
+ /* finally enable PCIe */
+ value = afi_readl(pcie, AFI_CONFIGURATION);
+ value |= AFI_CONFIGURATION_EN_FPCI;
+ afi_writel(pcie, value, AFI_CONFIGURATION);
+
+ /* disable all interrupts */
+ afi_writel(pcie, 0, AFI_AFI_INTR_ENABLE);
+ afi_writel(pcie, 0, AFI_SM_INTR_ENABLE);
+ afi_writel(pcie, 0, AFI_INTR_MASK);
+ afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
+
+ return 0;
+}
+
+static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
+{
+ unsigned long fpci, axi, size;
+
+ /* BAR 0: type 1 extended configuration space */
+ fpci = 0xfe100000;
+ size = fdt_resource_size(&pcie->cs);
+ axi = pcie->cs.start;
+
+ afi_writel(pcie, axi, AFI_AXI_BAR0_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
+ afi_writel(pcie, fpci, AFI_FPCI_BAR0);
+
+ /* BAR 1: downstream I/O */
+ fpci = 0xfdfc0000;
+ size = fdt_resource_size(&pcie->io);
+ axi = pcie->io.start;
+
+ afi_writel(pcie, axi, AFI_AXI_BAR1_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
+ afi_writel(pcie, fpci, AFI_FPCI_BAR1);
+
+ /* BAR 2: prefetchable memory */
+ fpci = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
+ size = fdt_resource_size(&pcie->prefetch);
+ axi = pcie->prefetch.start;
+
+ afi_writel(pcie, axi, AFI_AXI_BAR2_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
+ afi_writel(pcie, fpci, AFI_FPCI_BAR2);
+
+ /* BAR 3: non-prefetchable memory */
+ fpci = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
+ size = fdt_resource_size(&pcie->mem);
+ axi = pcie->mem.start;
+
+ afi_writel(pcie, axi, AFI_AXI_BAR3_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
+ afi_writel(pcie, fpci, AFI_FPCI_BAR3);
+
+ /* NULL out the remaining BARs as they are not used */
+ afi_writel(pcie, 0, AFI_AXI_BAR4_START);
+ afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
+ afi_writel(pcie, 0, AFI_FPCI_BAR4);
+
+ afi_writel(pcie, 0, AFI_AXI_BAR5_START);
+ afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
+ afi_writel(pcie, 0, AFI_FPCI_BAR5);
+
+ /* map all upstream transactions as uncached */
+ afi_writel(pcie, NV_PA_SDRAM_BASE, AFI_CACHE_BAR0_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
+
+ /* MSI translations are setup only when needed */
+ afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
+ afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
+ afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
+ afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
+}
+
+static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
+{
+ unsigned long ret = 0;
+
+ switch (port->index) {
+ case 0:
+ ret = AFI_PEX0_CTRL;
+ break;
+
+ case 1:
+ ret = AFI_PEX1_CTRL;
+ break;
+
+ case 2:
+ ret = AFI_PEX2_CTRL;
+ break;
+ }
+
+ return ret;
+}
+
+static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
+{
+ unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ unsigned long value;
+
+ /* pulse reset signel */
+ value = afi_readl(port->pcie, ctrl);
+ value &= ~AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+
+ udelay(2000);
+
+ value = afi_readl(port->pcie, ctrl);
+ value |= AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+}
+
+static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
+{
+ unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ unsigned long value;
+
+ /* enable reference clock */
+ value = afi_readl(port->pcie, ctrl);
+ value |= AFI_PEX_CTRL_REFCLK_EN;
+
+ if (port->pcie->soc->has_pex_clkreq_en)
+ value |= AFI_PEX_CTRL_CLKREQ_EN;
+
+ value |= AFI_PEX_CTRL_OVERRIDE_EN;
+
+ afi_writel(port->pcie, value, ctrl);
+
+ tegra_pcie_port_reset(port);
+}
+
+static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
+{
+ unsigned int retries = 3;
+ unsigned long value;
+
+ value = rp_readl(port, RP_PRIV_MISC);
+ value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
+ value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
+ rp_writel(port, value, RP_PRIV_MISC);
+
+ do {
+ unsigned int timeout = 200;
+
+ do {
+ value = rp_readl(port, RP_VEND_XP);
+ if (value & RP_VEND_XP_DL_UP)
+ break;
+
+ udelay(2000);
+ } while (--timeout);
+
+ if (!timeout) {
+ debug("link %u down, retrying\n", port->index);
+ goto retry;
+ }
+
+ timeout = 200;
+
+ do {
+ value = rp_readl(port, RP_LINK_CONTROL_STATUS);
+ if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
+ return true;
+
+ udelay(2000);
+ } while (--timeout);
+
+retry:
+ tegra_pcie_port_reset(port);
+ } while (--retries);
+
+ return false;
+}
+
+static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
+{
+ unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ unsigned long value;
+
+ /* assert port reset */
+ value = afi_readl(port->pcie, ctrl);
+ value &= ~AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+
+ /* disable reference clock */
+ value = afi_readl(port->pcie, ctrl);
+ value &= ~AFI_PEX_CTRL_REFCLK_EN;
+ afi_writel(port->pcie, value, ctrl);
+}
+
+static void tegra_pcie_port_free(struct tegra_pcie_port *port)
+{
+ list_del(&port->list);
+ free(port);
+}
+
+static int tegra_pcie_enable(struct tegra_pcie *pcie)
+{
+ struct tegra_pcie_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ debug("probing port %u, using %u lanes\n", port->index,
+ port->num_lanes);
+
+ tegra_pcie_port_enable(port);
+
+ if (tegra_pcie_port_check_link(port))
+ continue;
+
+ debug("link %u down, ignoring\n", port->index);
+
+ tegra_pcie_port_disable(port);
+ tegra_pcie_port_free(port);
+ }
+
+ return 0;
+}
+
+static const struct tegra_pcie_soc tegra20_pcie_soc = {
+ .num_ports = 2,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
+ .has_pex_clkreq_en = false,
+ .has_pex_bias_ctrl = false,
+ .has_cml_clk = false,
+ .has_gen2 = false,
+};
+
+static const struct tegra_pcie_soc tegra30_pcie_soc = {
+ .num_ports = 3,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_cml_clk = true,
+ .has_gen2 = false,
+};
+
+static const struct tegra_pcie_soc tegra124_pcie_soc = {
+ .num_ports = 2,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_cml_clk = true,
+ .has_gen2 = true,
+};
+
+static int process_nodes(const void *fdt, int nodes[], unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ const struct tegra_pcie_soc *soc;
+ struct tegra_pcie *pcie;
+ enum fdt_compat_id id;
+ int err;
+
+ if (!fdtdec_get_is_enabled(fdt, nodes[i]))
+ continue;
+
+ id = fdtdec_lookup(fdt, nodes[i]);
+ switch (id) {
+ case COMPAT_NVIDIA_TEGRA20_PCIE:
+ soc = &tegra20_pcie_soc;
+ break;
+
+ case COMPAT_NVIDIA_TEGRA30_PCIE:
+ soc = &tegra30_pcie_soc;
+ break;
+
+ case COMPAT_NVIDIA_TEGRA124_PCIE:
+ soc = &tegra124_pcie_soc;
+ break;
+
+ default:
+ error("unsupported compatible: %s",
+ fdtdec_get_compatible(id));
+ continue;
+ }
+
+ pcie = malloc(sizeof(*pcie));
+ if (!pcie) {
+ error("failed to allocate controller");
+ continue;
+ }
+
+ memset(pcie, 0, sizeof(*pcie));
+ pcie->soc = soc;
+
+ INIT_LIST_HEAD(&pcie->ports);
+
+ err = tegra_pcie_parse_dt(fdt, nodes[i], pcie);
+ if (err < 0) {
+ free(pcie);
+ continue;
+ }
+
+ err = tegra_pcie_power_on(pcie);
+ if (err < 0) {
+ error("failed to power on");
+ continue;
+ }
+
+ err = tegra_pcie_enable_controller(pcie);
+ if (err < 0) {
+ error("failed to enable controller");
+ continue;
+ }
+
+ tegra_pcie_setup_translations(pcie);
+
+ err = tegra_pcie_enable(pcie);
+ if (err < 0) {
+ error("failed to enable PCIe");
+ continue;
+ }
+
+ pcie->hose.first_busno = 0;
+ pcie->hose.current_busno = 0;
+ pcie->hose.last_busno = 0;
+
+ pci_set_region(&pcie->hose.regions[0], NV_PA_SDRAM_BASE,
+ NV_PA_SDRAM_BASE, gd->ram_size,
+ PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
+
+ pci_set_region(&pcie->hose.regions[1], pcie->io.start,
+ pcie->io.start, fdt_resource_size(&pcie->io),
+ PCI_REGION_IO);
+
+ pci_set_region(&pcie->hose.regions[2], pcie->mem.start,
+ pcie->mem.start, fdt_resource_size(&pcie->mem),
+ PCI_REGION_MEM);
+
+ pci_set_region(&pcie->hose.regions[3], pcie->prefetch.start,
+ pcie->prefetch.start,
+ fdt_resource_size(&pcie->prefetch),
+ PCI_REGION_MEM | PCI_REGION_PREFETCH);
+
+ pcie->hose.region_count = 4;
+
+ pci_set_ops(&pcie->hose,
+ pci_hose_read_config_byte_via_dword,
+ pci_hose_read_config_word_via_dword,
+ tegra_pcie_read_conf,
+ pci_hose_write_config_byte_via_dword,
+ pci_hose_write_config_word_via_dword,
+ tegra_pcie_write_conf);
+
+ pci_register_hose(&pcie->hose);
+
+#ifdef CONFIG_PCI_SCAN_SHOW
+ printf("PCI: Enumerating devices...\n");
+ printf("---------------------------------------\n");
+ printf(" Device ID Description\n");
+ printf(" ------ -- -----------\n");
+#endif
+
+ pcie->hose.last_busno = pci_hose_scan(&pcie->hose);
+ }
+
+ return 0;
+}
+
+void pci_init_board(void)
+{
+ const void *fdt = gd->fdt_blob;
+ int count, nodes[1];
+
+ count = fdtdec_find_aliases_for_id(fdt, "pcie-controller",
+ COMPAT_NVIDIA_TEGRA124_PCIE,
+ nodes, ARRAY_SIZE(nodes));
+ if (process_nodes(fdt, nodes, count))
+ return;
+
+ count = fdtdec_find_aliases_for_id(fdt, "pcie-controller",
+ COMPAT_NVIDIA_TEGRA30_PCIE,
+ nodes, ARRAY_SIZE(nodes));
+ if (process_nodes(fdt, nodes, count))
+ return;
+
+ count = fdtdec_find_aliases_for_id(fdt, "pcie-controller",
+ COMPAT_NVIDIA_TEGRA20_PCIE,
+ nodes, ARRAY_SIZE(nodes));
+ if (process_nodes(fdt, nodes, count))
+ return;
+}
+
+int pci_skip_dev(struct pci_controller *hose, pci_dev_t dev)
+{
+ if (PCI_BUS(dev) != 0 && PCI_DEV(dev) > 0)
+ return 1;
+
+ return 0;
+}
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 04bd996cad..214565241e 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -5,6 +5,7 @@
# SPDX-License-Identifier: GPL-2.0+
#
+obj-$(CONFIG_AS3722_POWER) += as3722.o
obj-$(CONFIG_AXP152_POWER) += axp152.o
obj-$(CONFIG_AXP209_POWER) += axp209.o
obj-$(CONFIG_AXP221_POWER) += axp221.o
diff --git a/drivers/power/as3722.c b/drivers/power/as3722.c
new file mode 100644
index 0000000000..4c6de79cd6
--- /dev/null
+++ b/drivers/power/as3722.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2014 NVIDIA Corporation
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#define pr_fmt(fmt) "as3722: " fmt
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <i2c.h>
+
+#include <power/as3722.h>
+
+#define AS3722_SD_VOLTAGE(n) (0x00 + (n))
+#define AS3722_GPIO_CONTROL(n) (0x08 + (n))
+#define AS3722_GPIO_CONTROL_MODE_OUTPUT_VDDH (1 << 0)
+#define AS3722_GPIO_CONTROL_MODE_OUTPUT_VDDL (7 << 0)
+#define AS3722_GPIO_CONTROL_INVERT (1 << 7)
+#define AS3722_LDO_VOLTAGE(n) (0x10 + (n))
+#define AS3722_GPIO_SIGNAL_OUT 0x20
+#define AS3722_SD_CONTROL 0x4d
+#define AS3722_LDO_CONTROL 0x4e
+#define AS3722_ASIC_ID1 0x90
+#define AS3722_DEVICE_ID 0x0c
+#define AS3722_ASIC_ID2 0x91
+
+static int as3722_read(struct udevice *pmic, u8 reg, u8 *value)
+{
+ int err;
+
+ err = i2c_read(pmic, reg, value, 1);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int as3722_write(struct udevice *pmic, u8 reg, u8 value)
+{
+ int err;
+
+ err = i2c_write(pmic, reg, &value, 1);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int as3722_read_id(struct udevice *pmic, u8 *id, u8 *revision)
+{
+ int err;
+
+ err = as3722_read(pmic, AS3722_ASIC_ID1, id);
+ if (err) {
+ error("failed to read ID1 register: %d", err);
+ return err;
+ }
+
+ err = as3722_read(pmic, AS3722_ASIC_ID2, revision);
+ if (err) {
+ error("failed to read ID2 register: %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int as3722_sd_enable(struct udevice *pmic, unsigned int sd)
+{
+ u8 value;
+ int err;
+
+ if (sd > 6)
+ return -EINVAL;
+
+ err = as3722_read(pmic, AS3722_SD_CONTROL, &value);
+ if (err) {
+ error("failed to read SD control register: %d", err);
+ return err;
+ }
+
+ value |= 1 << sd;
+
+ err = as3722_write(pmic, AS3722_SD_CONTROL, value);
+ if (err < 0) {
+ error("failed to write SD control register: %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int as3722_sd_set_voltage(struct udevice *pmic, unsigned int sd, u8 value)
+{
+ int err;
+
+ if (sd > 6)
+ return -EINVAL;
+
+ err = as3722_write(pmic, AS3722_SD_VOLTAGE(sd), value);
+ if (err < 0) {
+ error("failed to write SD%u voltage register: %d", sd, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int as3722_ldo_enable(struct udevice *pmic, unsigned int ldo)
+{
+ u8 value;
+ int err;
+
+ if (ldo > 11)
+ return -EINVAL;
+
+ err = as3722_read(pmic, AS3722_LDO_CONTROL, &value);
+ if (err) {
+ error("failed to read LDO control register: %d", err);
+ return err;
+ }
+
+ value |= 1 << ldo;
+
+ err = as3722_write(pmic, AS3722_LDO_CONTROL, value);
+ if (err < 0) {
+ error("failed to write LDO control register: %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int as3722_ldo_set_voltage(struct udevice *pmic, unsigned int ldo, u8 value)
+{
+ int err;
+
+ if (ldo > 11)
+ return -EINVAL;
+
+ err = as3722_write(pmic, AS3722_LDO_VOLTAGE(ldo), value);
+ if (err < 0) {
+ error("failed to write LDO%u voltage register: %d", ldo,
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+int as3722_gpio_configure(struct udevice *pmic, unsigned int gpio,
+ unsigned long flags)
+{
+ u8 value = 0;
+ int err;
+
+ if (flags & AS3722_GPIO_OUTPUT_VDDH)
+ value |= AS3722_GPIO_CONTROL_MODE_OUTPUT_VDDH;
+
+ if (flags & AS3722_GPIO_INVERT)
+ value |= AS3722_GPIO_CONTROL_INVERT;
+
+ err = as3722_write(pmic, AS3722_GPIO_CONTROL(gpio), value);
+ if (err) {
+ error("failed to configure GPIO#%u: %d", gpio, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int as3722_gpio_set(struct udevice *pmic, unsigned int gpio,
+ unsigned int level)
+{
+ const char *l;
+ u8 value;
+ int err;
+
+ if (gpio > 7)
+ return -EINVAL;
+
+ err = as3722_read(pmic, AS3722_GPIO_SIGNAL_OUT, &value);
+ if (err < 0) {
+ error("failed to read GPIO signal out register: %d", err);
+ return err;
+ }
+
+ if (level == 0) {
+ value &= ~(1 << gpio);
+ l = "low";
+ } else {
+ value |= 1 << gpio;
+ l = "high";
+ }
+
+ err = as3722_write(pmic, AS3722_GPIO_SIGNAL_OUT, value);
+ if (err) {
+ error("failed to set GPIO#%u %s: %d", gpio, l, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int as3722_gpio_direction_output(struct udevice *pmic, unsigned int gpio,
+ unsigned int level)
+{
+ u8 value;
+ int err;
+
+ if (gpio > 7)
+ return -EINVAL;
+
+ if (level == 0)
+ value = AS3722_GPIO_CONTROL_MODE_OUTPUT_VDDL;
+ else
+ value = AS3722_GPIO_CONTROL_MODE_OUTPUT_VDDH;
+
+ err = as3722_write(pmic, AS3722_GPIO_CONTROL(gpio), value);
+ if (err) {
+ error("failed to configure GPIO#%u as output: %d", gpio, err);
+ return err;
+ }
+
+ err = as3722_gpio_set(pmic, gpio, level);
+ if (err < 0) {
+ error("failed to set GPIO#%u high: %d", gpio, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int as3722_init(struct udevice **devp)
+{
+ struct udevice *pmic;
+ u8 id, revision;
+ const unsigned int bus = 0;
+ const unsigned int address = 0x40;
+ int err;
+
+ err = i2c_get_chip_for_busnum(bus, address, &pmic);
+ if (err)
+ return err;
+ err = as3722_read_id(pmic, &id, &revision);
+ if (err < 0) {
+ error("failed to read ID: %d", err);
+ return err;
+ }
+
+ if (id != AS3722_DEVICE_ID) {
+ error("unknown device");
+ return -ENOENT;
+ }
+
+ debug("AS3722 revision %#x found on I2C bus %u, address %#x\n",
+ revision, bus, address);
+ *devp = pmic;
+
+ return 0;
+}
OpenPOWER on IntegriCloud