summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.in2
-rw-r--r--arch/arm/configs/ag5evm_defconfig83
-rw-r--r--arch/arm/include/asm/assembler.h2
-rw-r--r--arch/arm/include/asm/mmu.h4
-rw-r--r--arch/arm/include/asm/pgtable.h3
-rw-r--r--arch/arm/lib/findbit.S6
-rw-r--r--arch/arm/mach-aaec2000/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-bcmring/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-clps711x/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-davinci/dm355.c6
-rw-r--r--arch/arm/mach-davinci/dm365.c6
-rw-r--r--arch/arm/mach-davinci/dm644x.c4
-rw-r--r--arch/arm/mach-ebsa110/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-footbridge/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-h720x/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-imx/eukrea_mbimx27-baseboard.c6
-rw-r--r--arch/arm/mach-integrator/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-msm/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-mx25/devices-imx25.h4
-rw-r--r--arch/arm/mach-mx3/mach-pcm037_eet.c5
-rw-r--r--arch/arm/mach-netx/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-omap1/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-omap2/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-pnx4008/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-rpc/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-shark/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-shmobile/Kconfig16
-rw-r--r--arch/arm/mach-shmobile/Makefile3
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c263
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c157
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c35
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c350
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h7
-rw-r--r--arch/arm/mach-shmobile/include/mach/entry-macro-gic.S89
-rw-r--r--arch/arm/mach-shmobile/include/mach/entry-macro-intc.S39
-rw-r--r--arch/arm/mach-shmobile/include/mach/entry-macro.S30
-rw-r--r--arch/arm/mach-shmobile/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h2
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh73a0.h446
-rw-r--r--arch/arm/mach-shmobile/pfc-sh73a0.c2679
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c10
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c349
-rw-r--r--arch/arm/mach-ux500/cpu.c6
-rw-r--r--arch/arm/mach-versatile/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/ioremap.c4
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-dma.c8
-rw-r--r--arch/arm/plat-mxc/devices/platform-spi_imx.c1
-rw-r--r--arch/arm/plat-nomadik/timer.c89
-rw-r--r--arch/blackfin/kernel/process.c1
-rw-r--r--arch/frv/kernel/process.c1
-rw-r--r--arch/h8300/kernel/process.c1
-rw-r--r--arch/ia64/hp/sim/simscsi.c4
-rw-r--r--arch/m68k/kernel/process.c1
-rw-r--r--arch/m68knommu/kernel/process.c1
-rw-r--r--arch/mn10300/kernel/process.c1
-rw-r--r--arch/parisc/hpux/sys_hpux.c1
-rw-r--r--arch/parisc/kernel/sys_parisc32.c1
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/boot/div64.S3
-rw-r--r--arch/powerpc/kernel/kgdb.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c1
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S5
-rw-r--r--arch/powerpc/mm/tlb_nohash.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig6
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c2
-rw-r--r--arch/s390/Kconfig.debug12
-rw-r--r--arch/s390/include/asm/page.h5
-rw-r--r--arch/s390/kernel/compat_linux.c1
-rw-r--r--arch/s390/kernel/kprobes.c70
-rw-r--r--arch/s390/mm/gup.c7
-rw-r--r--arch/sh/include/asm/processor_32.h7
-rw-r--r--arch/sh/include/mach-common/mach/romimage.h2
-rw-r--r--arch/sh/include/mach-ecovec24/mach/romimage.h2
-rw-r--r--arch/sh/include/mach-kfr2r09/mach/romimage.h2
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4-202.c2
-rw-r--r--arch/sh/kernel/sys_sh.c2
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-trapa.S2
-rw-r--r--arch/sparc/kernel/leon_smp.c1
-rw-r--r--arch/sparc/kernel/sys_sparc32.c1
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c1
-rw-r--r--arch/sparc/kernel/unaligned_32.c1
-rw-r--r--arch/sparc/kernel/windows.c1
-rw-r--r--arch/tile/Kconfig12
-rw-r--r--arch/tile/include/asm/cacheflush.h52
-rw-r--r--arch/tile/include/asm/io.h15
-rw-r--r--arch/tile/include/asm/pci-bridge.h117
-rw-r--r--arch/tile/include/asm/pci.h107
-rw-r--r--arch/tile/include/asm/processor.h10
-rw-r--r--arch/tile/include/hv/drv_xgbe_impl.h300
-rw-r--r--arch/tile/include/hv/drv_xgbe_intf.h615
-rw-r--r--arch/tile/include/hv/netio_errors.h122
-rw-r--r--arch/tile/include/hv/netio_intf.h2975
-rw-r--r--arch/tile/kernel/Makefile1
-rw-r--r--arch/tile/kernel/compat.c1
-rw-r--r--arch/tile/kernel/compat_signal.c1
-rw-r--r--arch/tile/kernel/pci.c621
-rw-r--r--arch/tile/kernel/setup.c2
-rw-r--r--arch/tile/kernel/signal.c1
-rw-r--r--arch/tile/kernel/smpboot.c1
-rw-r--r--arch/tile/kernel/sys.c1
-rw-r--r--arch/tile/lib/memchr_32.c35
-rw-r--r--arch/tile/lib/spinlock_32.c29
-rw-r--r--arch/tile/mm/fault.c1
-rw-r--r--arch/tile/mm/hugetlbpage.c1
-rw-r--r--arch/um/drivers/line.c5
-rw-r--r--arch/um/kernel/exec.c1
-rw-r--r--arch/x86/ia32/sys_ia32.c1
-rw-r--r--arch/x86/include/asm/fixmap.h4
-rw-r--r--arch/x86/include/asm/xen/interface.h6
-rw-r--r--arch/x86/include/asm/xen/interface_32.h5
-rw-r--r--arch/x86/include/asm/xen/interface_64.h13
-rw-r--r--arch/x86/include/asm/xen/page.h7
-rw-r--r--arch/x86/kernel/cpuid.c1
-rw-r--r--arch/x86/kernel/kgdb.c12
-rw-r--r--arch/x86/kernel/msr.c1
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c19
-rw-r--r--arch/x86/xen/enlighten.c21
-rw-r--r--arch/x86/xen/mmu.c86
-rw-r--r--arch/x86/xen/setup.c12
125 files changed, 9634 insertions, 500 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6825c34646d4..9be21ba648cd 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -1084,6 +1084,6 @@ memdump: mov r12, r0
reloc_end:
.align
- .section ".stack", "w"
+ .section ".stack", "aw", %nobits
user_stack: .space 4096
user_stack_end:
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
index d08168941bd6..366a924019ac 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.in
@@ -57,7 +57,7 @@ SECTIONS
.bss : { *(.bss) }
_end = .;
- .stack (NOLOAD) : { *(.stack) }
+ .stack : { *(.stack) }
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
diff --git a/arch/arm/configs/ag5evm_defconfig b/arch/arm/configs/ag5evm_defconfig
new file mode 100644
index 000000000000..2b9cf56db363
--- /dev/null
+++ b/arch/arm/configs/ag5evm_defconfig
@@ -0,0 +1,83 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_SHMOBILE=y
+CONFIG_ARCH_SH73A0=y
+CONFIG_MACH_AG5EVM=y
+CONFIG_MEMORY_SIZE=0x10000000
+CONFIG_CPU_BPREDICT_DISABLE=y
+CONFIG_ARM_ERRATA_430973=y
+CONFIG_ARM_ERRATA_458693=y
+CONFIG_NO_HZ=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=tty0 console=ttySC2,115200 earlyprintk=sh-sci.2,115200 ignore_loglevel"
+CONFIG_CMDLINE_FORCE=y
+CONFIG_KEXEC=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM=y
+# CONFIG_SUSPEND is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=9
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_SH_MOBILE=y
+# CONFIG_HWMON is not set
+# CONFIG_MFD_SUPPORT is not set
+CONFIG_FB=y
+CONFIG_FB_SH_MOBILE_LCDC=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_FTRACE is not set
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 062b58c029ab..749bb6622404 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -238,7 +238,7 @@
@ Slightly optimised to avoid incrementing the pointer twice
usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
.if \rept == 2
- usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort
+ usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
.endif
add\cond \ptr, #\rept * \inc
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 68870c776671..b4ffe9d5b526 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -13,6 +13,10 @@ typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
#define ASID(mm) ((mm)->context.id & 255)
+
+/* init_mm.context.id_lock should be initialized. */
+#define INIT_MM_CONTEXT(name) \
+ .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock),
#else
#define ASID(mm) (0)
#endif
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index b155414192da..53d1d5deb111 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -374,6 +374,9 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
+/* we don't need complex calculations here as the pmd is folded into the pgd */
+#define pmd_addr_end(addr,end) (end)
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 1e4cbd4e7be9..64f6bc1a9132 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -174,8 +174,8 @@ ENDPROC(_find_next_bit_be)
*/
.L_found:
#if __LINUX_ARM_ARCH__ >= 5
- rsb r1, r3, #0
- and r3, r3, r1
+ rsb r0, r3, #0
+ and r3, r3, r0
clz r3, r3
rsb r3, r3, #31
add r0, r2, r3
@@ -190,5 +190,7 @@ ENDPROC(_find_next_bit_be)
addeq r2, r2, #1
mov r0, r2
#endif
+ cmp r1, r0 @ Clamp to maxbit
+ movlo r0, r1
mov pc, lr
diff --git a/arch/arm/mach-aaec2000/include/mach/vmalloc.h b/arch/arm/mach-aaec2000/include/mach/vmalloc.h
index cff4e0a996ce..a6299e8321bd 100644
--- a/arch/arm/mach-aaec2000/include/mach/vmalloc.h
+++ b/arch/arm/mach-aaec2000/include/mach/vmalloc.h
@@ -11,6 +11,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-bcmring/include/mach/vmalloc.h b/arch/arm/mach-bcmring/include/mach/vmalloc.h
index 3db3a09fd398..7397bd7817d9 100644
--- a/arch/arm/mach-bcmring/include/mach/vmalloc.h
+++ b/arch/arm/mach-bcmring/include/mach/vmalloc.h
@@ -22,4 +22,4 @@
* 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles
* larger physical memory designs better.
*/
-#define VMALLOC_END 0xf0000000
+#define VMALLOC_END 0xf0000000UL
diff --git a/arch/arm/mach-clps711x/include/mach/vmalloc.h b/arch/arm/mach-clps711x/include/mach/vmalloc.h
index 30b3a287ed88..467b96137e47 100644
--- a/arch/arm/mach-clps711x/include/mach/vmalloc.h
+++ b/arch/arm/mach-clps711x/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 9be261beae7d..2652af124acd 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -359,8 +359,8 @@ static struct clk_lookup dm355_clks[] = {
CLK(NULL, "uart1", &uart1_clk),
CLK(NULL, "uart2", &uart2_clk),
CLK("i2c_davinci.1", NULL, &i2c_clk),
- CLK("davinci-asp.0", NULL, &asp0_clk),
- CLK("davinci-asp.1", NULL, &asp1_clk),
+ CLK("davinci-mcbsp.0", NULL, &asp0_clk),
+ CLK("davinci-mcbsp.1", NULL, &asp1_clk),
CLK("davinci_mmc.0", NULL, &mmcsd0_clk),
CLK("davinci_mmc.1", NULL, &mmcsd1_clk),
CLK("spi_davinci.0", NULL, &spi0_clk),
@@ -664,7 +664,7 @@ static struct resource dm355_asp1_resources[] = {
};
static struct platform_device dm355_asp1_device = {
- .name = "davinci-asp",
+ .name = "davinci-mcbsp",
.id = 1,
.num_resources = ARRAY_SIZE(dm355_asp1_resources),
.resource = dm355_asp1_resources,
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index a12065e87266..c466d710d3c1 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -459,7 +459,7 @@ static struct clk_lookup dm365_clks[] = {
CLK(NULL, "usb", &usb_clk),
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("davinci_voicecodec", NULL, &voicecodec_clk),
- CLK("davinci-asp.0", NULL, &asp0_clk),
+ CLK("davinci-mcbsp", NULL, &asp0_clk),
CLK(NULL, "rto", &rto_clk),
CLK(NULL, "mjcp", &mjcp_clk),
CLK(NULL, NULL, NULL),
@@ -922,8 +922,8 @@ static struct resource dm365_asp_resources[] = {
};
static struct platform_device dm365_asp_device = {
- .name = "davinci-asp",
- .id = 0,
+ .name = "davinci-mcbsp",
+ .id = -1,
.num_resources = ARRAY_SIZE(dm365_asp_resources),
.resource = dm365_asp_resources,
};
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 0608dd776a16..9a2376b3137c 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -302,7 +302,7 @@ static struct clk_lookup dm644x_clks[] = {
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("i2c_davinci.1", NULL, &i2c_clk),
CLK("palm_bk3710", NULL, &ide_clk),
- CLK("davinci-asp", NULL, &asp_clk),
+ CLK("davinci-mcbsp", NULL, &asp_clk),
CLK("davinci_mmc.0", NULL, &mmcsd_clk),
CLK(NULL, "spi", &spi_clk),
CLK(NULL, "gpio", &gpio_clk),
@@ -580,7 +580,7 @@ static struct resource dm644x_asp_resources[] = {
};
static struct platform_device dm644x_asp_device = {
- .name = "davinci-asp",
+ .name = "davinci-mcbsp",
.id = -1,
.num_resources = ARRAY_SIZE(dm644x_asp_resources),
.resource = dm644x_asp_resources,
diff --git a/arch/arm/mach-ebsa110/include/mach/vmalloc.h b/arch/arm/mach-ebsa110/include/mach/vmalloc.h
index 60bde56fba4c..ea141b7a3e03 100644
--- a/arch/arm/mach-ebsa110/include/mach/vmalloc.h
+++ b/arch/arm/mach-ebsa110/include/mach/vmalloc.h
@@ -7,4 +7,4 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#define VMALLOC_END 0xdf000000
+#define VMALLOC_END 0xdf000000UL
diff --git a/arch/arm/mach-footbridge/include/mach/vmalloc.h b/arch/arm/mach-footbridge/include/mach/vmalloc.h
index 0ffbb7c85e59..40ba78e5782b 100644
--- a/arch/arm/mach-footbridge/include/mach/vmalloc.h
+++ b/arch/arm/mach-footbridge/include/mach/vmalloc.h
@@ -7,4 +7,4 @@
*/
-#define VMALLOC_END 0xf0000000
+#define VMALLOC_END 0xf0000000UL
diff --git a/arch/arm/mach-h720x/include/mach/vmalloc.h b/arch/arm/mach-h720x/include/mach/vmalloc.h
index a45915b88756..8520b4a4d4e6 100644
--- a/arch/arm/mach-h720x/include/mach/vmalloc.h
+++ b/arch/arm/mach-h720x/include/mach/vmalloc.h
@@ -5,6 +5,6 @@
#ifndef __ARCH_ARM_VMALLOC_H
#define __ARCH_ARM_VMALLOC_H
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
#endif
diff --git a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
index 026263c665ca..7e1e9dc2c8fc 100644
--- a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
@@ -250,9 +250,6 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
-#if defined(CONFIG_TOUCHSCREEN_ADS7846) \
- || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
-
#define ADS7846_PENDOWN (GPIO_PORTD | 25)
static void ads7846_dev_init(void)
@@ -273,9 +270,7 @@ static struct ads7846_platform_data ads7846_config __initdata = {
.get_pendown_state = ads7846_get_pendown_state,
.keep_vref_on = 1,
};
-#endif
-#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
static struct spi_board_info eukrea_mbimx27_spi_board_info[] __initdata = {
[0] = {
.modalias = "ads7846",
@@ -294,7 +289,6 @@ static const struct spi_imx_master eukrea_mbimx27_spi0_data __initconst = {
.chipselect = eukrea_mbimx27_spi_cs,
.num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs),
};
-#endif
static struct i2c_board_info eukrea_mbimx27_i2c_devices[] = {
{
diff --git a/arch/arm/mach-integrator/include/mach/vmalloc.h b/arch/arm/mach-integrator/include/mach/vmalloc.h
index e056e7cf5645..2f5a2bafb11f 100644
--- a/arch/arm/mach-integrator/include/mach/vmalloc.h
+++ b/arch/arm/mach-integrator/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-msm/include/mach/vmalloc.h b/arch/arm/mach-msm/include/mach/vmalloc.h
index 31a32ad062dc..d138448eff16 100644
--- a/arch/arm/mach-msm/include/mach/vmalloc.h
+++ b/arch/arm/mach-msm/include/mach/vmalloc.h
@@ -16,7 +16,7 @@
#ifndef __ASM_ARCH_MSM_VMALLOC_H
#define __ASM_ARCH_MSM_VMALLOC_H
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
#endif
diff --git a/arch/arm/mach-mx25/devices-imx25.h b/arch/arm/mach-mx25/devices-imx25.h
index 93afa10b13cf..d94d282fa676 100644
--- a/arch/arm/mach-mx25/devices-imx25.h
+++ b/arch/arm/mach-mx25/devices-imx25.h
@@ -42,9 +42,9 @@ extern const struct imx_mxc_nand_data imx25_mxc_nand_data __initconst;
#define imx25_add_mxc_nand(pdata) \
imx_add_mxc_nand(&imx25_mxc_nand_data, pdata)
-extern const struct imx_spi_imx_data imx25_spi_imx_data[] __initconst;
+extern const struct imx_spi_imx_data imx25_cspi_data[] __initconst;
#define imx25_add_spi_imx(id, pdata) \
- imx_add_spi_imx(&imx25_spi_imx_data[id], pdata)
+ imx_add_spi_imx(&imx25_cspi_data[id], pdata)
#define imx25_add_spi_imx0(pdata) imx25_add_spi_imx(0, pdata)
#define imx25_add_spi_imx1(pdata) imx25_add_spi_imx(1, pdata)
#define imx25_add_spi_imx2(pdata) imx25_add_spi_imx(2, pdata)
diff --git a/arch/arm/mach-mx3/mach-pcm037_eet.c b/arch/arm/mach-mx3/mach-pcm037_eet.c
index 99e0894e07db..fda56545d2fd 100644
--- a/arch/arm/mach-mx3/mach-pcm037_eet.c
+++ b/arch/arm/mach-mx3/mach-pcm037_eet.c
@@ -14,6 +14,7 @@
#include <mach/common.h>
#include <mach/iomux-mx3.h>
+#include <mach/spi.h>
#include <asm/mach-types.h>
@@ -59,14 +60,12 @@ static struct spi_board_info pcm037_spi_dev[] = {
};
/* Platform Data for MXC CSPI */
-#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)};
static const struct spi_imx_master pcm037_spi1_pdata __initconst = {
.chipselect = pcm037_spi1_cs,
.num_chipselect = ARRAY_SIZE(pcm037_spi1_cs),
};
-#endif
/* GPIO-keys input device */
static struct gpio_keys_button pcm037_gpio_keys[] = {
@@ -171,7 +170,7 @@ static struct platform_device pcm037_gpio_keys_device = {
},
};
-static int eet_init_devices(void)
+static int __init eet_init_devices(void)
{
if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET)
return 0;
diff --git a/arch/arm/mach-netx/include/mach/vmalloc.h b/arch/arm/mach-netx/include/mach/vmalloc.h
index 7cca3574308f..871f1ef7bff5 100644
--- a/arch/arm/mach-netx/include/mach/vmalloc.h
+++ b/arch/arm/mach-netx/include/mach/vmalloc.h
@@ -16,4 +16,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-omap1/include/mach/vmalloc.h b/arch/arm/mach-omap1/include/mach/vmalloc.h
index b001f67d695b..22ec4a479577 100644
--- a/arch/arm/mach-omap1/include/mach/vmalloc.h
+++ b/arch/arm/mach-omap1/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xd8000000
+#define VMALLOC_END 0xd8000000UL
diff --git a/arch/arm/mach-omap2/include/mach/vmalloc.h b/arch/arm/mach-omap2/include/mach/vmalloc.h
index 4da31e997efe..866319947760 100644
--- a/arch/arm/mach-omap2/include/mach/vmalloc.h
+++ b/arch/arm/mach-omap2/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xf8000000
+#define VMALLOC_END 0xf8000000UL
diff --git a/arch/arm/mach-pnx4008/include/mach/vmalloc.h b/arch/arm/mach-pnx4008/include/mach/vmalloc.h
index 31b65ee07b0b..184913c71141 100644
--- a/arch/arm/mach-pnx4008/include/mach/vmalloc.h
+++ b/arch/arm/mach-pnx4008/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-rpc/include/mach/vmalloc.h b/arch/arm/mach-rpc/include/mach/vmalloc.h
index 3bcd86fadb81..fb700228637a 100644
--- a/arch/arm/mach-rpc/include/mach/vmalloc.h
+++ b/arch/arm/mach-rpc/include/mach/vmalloc.h
@@ -7,4 +7,4 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#define VMALLOC_END 0xdc000000
+#define VMALLOC_END 0xdc000000UL
diff --git a/arch/arm/mach-shark/include/mach/vmalloc.h b/arch/arm/mach-shark/include/mach/vmalloc.h
index 8e845b6a7cb5..b10df988526d 100644
--- a/arch/arm/mach-shark/include/mach/vmalloc.h
+++ b/arch/arm/mach-shark/include/mach/vmalloc.h
@@ -1,4 +1,4 @@
/*
* arch/arm/mach-shark/include/mach/vmalloc.h
*/
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 7b2edd799fb4..285dbbd4ad2c 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -20,6 +20,13 @@ config ARCH_SH7372
select SH_CLK_CPG
select ARCH_WANT_OPTIONAL_GPIOLIB
+config ARCH_SH73A0
+ bool "SH-Mobile AG5 (R8A73A00)"
+ select CPU_V7
+ select SH_CLK_CPG
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARM_GIC
+
comment "SH-Mobile Board Type"
config MACH_G3EVM
@@ -51,6 +58,10 @@ config AP4EVB_WVGA
endchoice
+config MACH_AG5EVM
+ bool "AG5EVM board"
+ depends on ARCH_SH73A0
+
config MACH_MACKEREL
bool "mackerel board"
depends on ARCH_SH7372
@@ -63,8 +74,8 @@ menu "Memory configuration"
config MEMORY_START
hex "Physical memory start address"
default "0x50000000" if MACH_G3EVM
- default "0x40000000" if MACH_G4EVM
- default "0x40000000" if MACH_AP4EVB || MACH_MACKEREL
+ default "0x40000000" if MACH_G4EVM || MACH_AP4EVB || MACH_AG5EVM || \
+ MACH_MACKEREL
default "0x00000000"
---help---
Tweak this only when porting to a new machine which does not
@@ -76,6 +87,7 @@ config MEMORY_SIZE
default "0x08000000" if MACH_G3EVM
default "0x08000000" if MACH_G4EVM
default "0x10000000" if MACH_AP4EVB
+ default "0x20000000" if MACH_AG5EVM
default "0x04000000"
help
This sets the default memory size assumed by your kernel. It can
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 11f3242ffc1d..2d8328d5dd57 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -9,15 +9,18 @@ obj-y := timer.o console.o clock.o pm_runtime.o
obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
obj-$(CONFIG_ARCH_SH7377) += setup-sh7377.o clock-sh7377.o intc-sh7377.o
obj-$(CONFIG_ARCH_SH7372) += setup-sh7372.o clock-sh7372.o intc-sh7372.o
+obj-$(CONFIG_ARCH_SH73A0) += setup-sh73a0.o clock-sh73a0.o
# Pinmux setup
pfc-$(CONFIG_ARCH_SH7367) := pfc-sh7367.o
pfc-$(CONFIG_ARCH_SH7377) := pfc-sh7377.o
pfc-$(CONFIG_ARCH_SH7372) := pfc-sh7372.o
+pfc-$(CONFIG_ARCH_SH73A0) := pfc-sh73a0.o
obj-$(CONFIG_GENERIC_GPIO) += $(pfc-y)
# Board objects
obj-$(CONFIG_MACH_G3EVM) += board-g3evm.o
obj-$(CONFIG_MACH_G4EVM) += board-g4evm.o
obj-$(CONFIG_MACH_AP4EVB) += board-ap4evb.o
+obj-$(CONFIG_MACH_AG5EVM) += board-ag5evm.o
obj-$(CONFIG_MACH_MACKEREL) += board-mackerel.o
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
new file mode 100644
index 000000000000..d4c82bd42524
--- /dev/null
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -0,0 +1,263 @@
+/*
+ * arch/arm/mach-shmobile/board-ag5evm.c
+ *
+ * Copyright (C) 2010 Takashi Yoshii <yoshii.takashi.zj@renesas.com>
+ * Copyright (C) 2009 Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/serial_sci.h>
+#include <linux/smsc911x.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/input/sh_keysc.h>
+
+#include <sound/sh_fsi.h>
+
+#include <mach/hardware.h>
+#include <mach/sh73a0.h>
+#include <mach/common.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+#include <asm/hardware/gic.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/traps.h>
+
+static struct resource smsc9220_resources[] = {
+ [0] = {
+ .start = 0x14000000,
+ .end = 0x14000000 + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(33), /* PINT1 */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct smsc911x_platform_config smsc9220_platdata = {
+ .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
+};
+
+static struct platform_device eth_device = {
+ .name = "smsc911x",
+ .id = 0,
+ .dev = {
+ .platform_data = &smsc9220_platdata,
+ },
+ .resource = smsc9220_resources,
+ .num_resources = ARRAY_SIZE(smsc9220_resources),
+};
+
+static struct sh_keysc_info keysc_platdata = {
+ .mode = SH_KEYSC_MODE_6,
+ .scan_timing = 3,
+ .delay = 100,
+ .keycodes = {
+ KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G,
+ KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, KEY_M, KEY_N,
+ KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U,
+ KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP,
+ KEY_SPACE, KEY_9, KEY_6, KEY_3, KEY_WAKEUP, KEY_RIGHT, \
+ KEY_COFFEE,
+ KEY_0, KEY_8, KEY_5, KEY_2, KEY_DOWN, KEY_ENTER, KEY_UP,
+ KEY_KPASTERISK, KEY_7, KEY_4, KEY_1, KEY_STOP, KEY_LEFT, \
+ KEY_COMPUTER,
+ },
+};
+
+static struct resource keysc_resources[] = {
+ [0] = {
+ .name = "KEYSC",
+ .start = 0xe61b0000,
+ .end = 0xe61b0098 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(71),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device keysc_device = {
+ .name = "sh_keysc",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(keysc_resources),
+ .resource = keysc_resources,
+ .dev = {
+ .platform_data = &keysc_platdata,
+ },
+};
+
+/* FSI A */
+static struct sh_fsi_platform_info fsi_info = {
+ .porta_flags = SH_FSI_OUT_SLAVE_MODE |
+ SH_FSI_IN_SLAVE_MODE |
+ SH_FSI_OFMT(I2S) |
+ SH_FSI_IFMT(I2S),
+};
+
+static struct resource fsi_resources[] = {
+ [0] = {
+ .name = "FSI",
+ .start = 0xEC230000,
+ .end = 0xEC230400 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(146),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device fsi_device = {
+ .name = "sh_fsi2",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(fsi_resources),
+ .resource = fsi_resources,
+ .dev = {
+ .platform_data = &fsi_info,
+ },
+};
+
+static struct platform_device *ag5evm_devices[] __initdata = {
+ &eth_device,
+ &keysc_device,
+ &fsi_device,
+};
+
+static struct map_desc ag5evm_io_desc[] __initdata = {
+ /* create a 1:1 entity map for 0xe6xxxxxx
+ * used by CPGA, INTC and PFC.
+ */
+ {
+ .virtual = 0xe6000000,
+ .pfn = __phys_to_pfn(0xe6000000),
+ .length = 256 << 20,
+ .type = MT_DEVICE_NONSHARED
+ },
+};
+
+static void __init ag5evm_map_io(void)
+{
+ iotable_init(ag5evm_io_desc, ARRAY_SIZE(ag5evm_io_desc));
+
+ /* setup early devices and console here as well */
+ sh73a0_add_early_devices();
+ shmobile_setup_console();
+}
+
+#define PINTC_ADDR 0xe6900000
+#define PINTER0A (PINTC_ADDR + 0xa0)
+#define PINTCR0A (PINTC_ADDR + 0xb0)
+
+void __init ag5evm_init_irq(void)
+{
+ /* setup PINT: enable PINTA2 as active low */
+ __raw_writel(__raw_readl(PINTER0A) | (1<<29), PINTER0A);
+ __raw_writew(__raw_readw(PINTCR0A) | (2<<10), PINTCR0A);
+
+ gic_dist_init(0, __io(0xf0001000), 29);
+ gic_cpu_init(0, __io(0xf0000100));
+}
+
+static void __init ag5evm_init(void)
+{
+ sh73a0_pinmux_init();
+
+ /* enable SCIFA2 */
+ gpio_request(GPIO_FN_SCIFA2_TXD1, NULL);
+ gpio_request(GPIO_FN_SCIFA2_RXD1, NULL);
+ gpio_request(GPIO_FN_SCIFA2_RTS1_, NULL);
+ gpio_request(GPIO_FN_SCIFA2_CTS1_, NULL);
+
+ /* enable KEYSC */
+ gpio_request(GPIO_FN_KEYIN0, NULL);
+ gpio_request(GPIO_FN_KEYIN1, NULL);
+ gpio_request(GPIO_FN_KEYIN2, NULL);
+ gpio_request(GPIO_FN_KEYIN3, NULL);
+ gpio_request(GPIO_FN_KEYIN4, NULL);
+ gpio_request(GPIO_FN_KEYIN5, NULL);
+ gpio_request(GPIO_FN_KEYIN6, NULL);
+ gpio_request(GPIO_FN_KEYIN7, NULL);
+ gpio_request(GPIO_FN_KEYOUT0, NULL);
+ gpio_request(GPIO_FN_KEYOUT1, NULL);
+ gpio_request(GPIO_FN_KEYOUT2, NULL);
+ gpio_request(GPIO_FN_KEYOUT3, NULL);
+ gpio_request(GPIO_FN_KEYOUT4, NULL);
+ gpio_request(GPIO_FN_KEYOUT5, NULL);
+ gpio_request(GPIO_FN_PORT59_KEYOUT6, NULL);
+ gpio_request(GPIO_FN_PORT58_KEYOUT7, NULL);
+ gpio_request(GPIO_FN_KEYOUT8, NULL);
+ gpio_request(GPIO_FN_PORT149_KEYOUT9, NULL);
+
+ /* enable IC2 2 and 3 */
+ gpio_request(GPIO_FN_PORT236_I2C_SDA2, NULL);
+ gpio_request(GPIO_FN_PORT237_I2C_SCL2, NULL);
+ gpio_request(GPIO_FN_PORT248_I2C_SCL3, NULL);
+ gpio_request(GPIO_FN_PORT249_I2C_SDA3, NULL);
+
+ /* enable SMSC911X */
+ gpio_request(GPIO_PORT144, NULL); /* PINTA2 */
+ gpio_direction_input(GPIO_PORT144);
+ gpio_request(GPIO_PORT145, NULL); /* RESET */
+ gpio_direction_output(GPIO_PORT145, 1);
+
+ /* FSI A */
+ gpio_request(GPIO_FN_FSIACK, NULL);
+ gpio_request(GPIO_FN_FSIAILR, NULL);
+ gpio_request(GPIO_FN_FSIAIBT, NULL);
+ gpio_request(GPIO_FN_FSIAISLD, NULL);
+ gpio_request(GPIO_FN_FSIAOSLD, NULL);
+
+#ifdef CONFIG_CACHE_L2X0
+ /* Shared attribute override enable, 64K*8way */
+ l2x0_init(__io(0xf0100000), 0x00460000, 0xc2000fff);
+#endif
+ sh73a0_add_standard_devices();
+ platform_add_devices(ag5evm_devices, ARRAY_SIZE(ag5evm_devices));
+}
+
+static void __init ag5evm_timer_init(void)
+{
+ sh73a0_clock_init();
+ shmobile_timer.init();
+ return;
+}
+
+struct sys_timer ag5evm_timer = {
+ .init = ag5evm_timer_init,
+};
+
+MACHINE_START(AG5EVM, "ag5evm")
+ .map_io = ag5evm_map_io,
+ .init_irq = ag5evm_init_irq,
+ .init_machine = ag5evm_init,
+ .timer = &ag5evm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index d3260542b943..5b079529c948 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -272,6 +272,15 @@ static struct resource sh_mmcif_resources[] = {
},
};
+static struct sh_mmcif_dma sh_mmcif_dma = {
+ .chan_priv_rx = {
+ .slave_id = SHDMA_SLAVE_MMCIF_RX,
+ },
+ .chan_priv_tx = {
+ .slave_id = SHDMA_SLAVE_MMCIF_TX,
+ },
+};
+
static struct sh_mmcif_plat_data sh_mmcif_plat = {
.sup_pclk = 0,
.ocr = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34,
@@ -279,6 +288,7 @@ static struct sh_mmcif_plat_data sh_mmcif_plat = {
MMC_CAP_8_BIT_DATA |
MMC_CAP_NEEDS_POLL,
.get_cd = slot_cn7_get_cd,
+ .dma = &sh_mmcif_dma,
};
static struct platform_device sh_mmcif_device = {
@@ -567,38 +577,127 @@ static struct platform_device *qhd_devices[] __initdata = {
/* FSI */
#define IRQ_FSI evt2irq(0x1840)
+static int __fsi_set_rate(struct clk *clk, long rate, int enable)
+{
+ int ret = 0;
-static int fsi_set_rate(int is_porta, int rate)
+ if (rate <= 0)
+ return ret;
+
+ if (enable) {
+ ret = clk_set_rate(clk, rate);
+ if (0 == ret)
+ ret = clk_enable(clk);
+ } else {
+ clk_disable(clk);
+ }
+
+ return ret;
+}
+
+static int __fsi_set_round_rate(struct clk *clk, long rate, int enable)
+{
+ return __fsi_set_rate(clk, clk_round_rate(clk, rate), enable);
+}
+
+static int fsi_ak4642_set_rate(struct device *dev, int rate, int enable)
+{
+ struct clk *fsia_ick;
+ struct clk *fsiack;
+ int ret = -EIO;
+
+ fsia_ick = clk_get(dev, "icka");
+ if (IS_ERR(fsia_ick))
+ return PTR_ERR(fsia_ick);
+
+ /*
+ * FSIACK is connected to AK4642,
+ * and use external clock pin from it.
+ * it is parent of fsia_ick now.
+ */
+ fsiack = clk_get_parent(fsia_ick);
+ if (!fsiack)
+ goto fsia_ick_out;
+
+ /*
+ * we get 1/1 divided clock by setting same rate to fsiack and fsia_ick
+ *
+ ** FIXME **
+ * Because the freq_table of external clk (fsiack) are all 0,
+ * the return value of clk_round_rate became 0.
+ * So, it use __fsi_set_rate here.
+ */
+ ret = __fsi_set_rate(fsiack, rate, enable);
+ if (ret < 0)
+ goto fsiack_out;
+
+ ret = __fsi_set_round_rate(fsia_ick, rate, enable);
+ if ((ret < 0) && enable)
+ __fsi_set_round_rate(fsiack, rate, 0); /* disable FSI ACK */
+
+fsiack_out:
+ clk_put(fsiack);
+
+fsia_ick_out:
+ clk_put(fsia_ick);
+
+ return 0;
+}
+
+static int fsi_hdmi_set_rate(struct device *dev, int rate, int enable)
{
struct clk *fsib_clk;
struct clk *fdiv_clk = &sh7372_fsidivb_clk;
+ long fsib_rate = 0;
+ long fdiv_rate = 0;
+ int ackmd_bpfmd;
int ret;
- /* set_rate is not needed if port A */
- if (is_porta)
- return 0;
-
- fsib_clk = clk_get(NULL, "fsib_clk");
- if (IS_ERR(fsib_clk))
- return -EINVAL;
-
switch (rate) {
case 44100:
- clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 11283000));
- ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
+ fsib_rate = rate * 256;
+ ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
break;
case 48000:
- clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000));
- clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000));
- ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
+ fsib_rate = 85428000; /* around 48kHz x 256 x 7 */
+ fdiv_rate = rate * 256;
+ ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
break;
default:
pr_err("unsupported rate in FSI2 port B\n");
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
+ /* FSI B setting */
+ fsib_clk = clk_get(dev, "ickb");
+ if (IS_ERR(fsib_clk))
+ return -EIO;
+
+ ret = __fsi_set_round_rate(fsib_clk, fsib_rate, enable);
clk_put(fsib_clk);
+ if (ret < 0)
+ return ret;
+
+ /* FSI DIV setting */
+ ret = __fsi_set_round_rate(fdiv_clk, fdiv_rate, enable);
+ if (ret < 0) {
+ /* disable FSI B */
+ if (enable)
+ __fsi_set_round_rate(fsib_clk, fsib_rate, 0);
+ return ret;
+ }
+
+ return ackmd_bpfmd;
+}
+
+static int fsi_set_rate(struct device *dev, int is_porta, int rate, int enable)
+{
+ int ret;
+
+ if (is_porta)
+ ret = fsi_ak4642_set_rate(dev, rate, enable);
+ else
+ ret = fsi_hdmi_set_rate(dev, rate, enable);
return ret;
}
@@ -880,6 +979,11 @@ static int __init hdmi_init_pm_clock(void)
goto out;
}
+ ret = clk_enable(&sh7372_pllc2_clk);
+ if (ret < 0) {
+ pr_err("Cannot enable pllc2 clock\n");
+ goto out;
+ }
pr_debug("PLLC2 set frequency %lu\n", rate);
ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk);
@@ -896,23 +1000,11 @@ out:
device_initcall(hdmi_init_pm_clock);
-#define FSIACK_DUMMY_RATE 48000
static int __init fsi_init_pm_clock(void)
{
struct clk *fsia_ick;
int ret;
- /*
- * FSIACK is connected to AK4642,
- * and the rate is depend on playing sound rate.
- * So, set dummy rate (= 48k) here
- */
- ret = clk_set_rate(&sh7372_fsiack_clk, FSIACK_DUMMY_RATE);
- if (ret < 0) {
- pr_err("Cannot set FSIACK dummy rate: %d\n", ret);
- return ret;
- }
-
fsia_ick = clk_get(&fsi_device.dev, "icka");
if (IS_ERR(fsia_ick)) {
ret = PTR_ERR(fsia_ick);
@@ -921,16 +1013,9 @@ static int __init fsi_init_pm_clock(void)
}
ret = clk_set_parent(fsia_ick, &sh7372_fsiack_clk);
- if (ret < 0) {
- pr_err("Cannot set FSI-A parent: %d\n", ret);
- goto out;
- }
-
- ret = clk_set_rate(fsia_ick, FSIACK_DUMMY_RATE);
if (ret < 0)
- pr_err("Cannot set FSI-A rate: %d\n", ret);
+ pr_err("Cannot set FSI-A parent: %d\n", ret);
-out:
clk_put(fsia_ick);
return ret;
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 7db31e6c6bf2..eb92fefc084d 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -220,8 +220,7 @@ static void pllc2_disable(struct clk *clk)
__raw_writel(__raw_readl(PLLC2CR) & ~0x80000000, PLLC2CR);
}
-static int pllc2_set_rate(struct clk *clk,
- unsigned long rate, int algo_id)
+static int pllc2_set_rate(struct clk *clk, unsigned long rate)
{
unsigned long value;
int idx;
@@ -230,21 +229,13 @@ static int pllc2_set_rate(struct clk *clk,
if (idx < 0)
return idx;
- if (rate == clk->parent->rate) {
- pllc2_disable(clk);
- return 0;
- }
+ if (rate == clk->parent->rate)
+ return -EINVAL;
value = __raw_readl(PLLC2CR) & ~(0x3f << 24);
- if (value & 0x80000000)
- pllc2_disable(clk);
-
__raw_writel((value & ~0x80000000) | ((idx + 19) << 24), PLLC2CR);
- if (value & 0x80000000)
- return pllc2_enable(clk);
-
return 0;
}
@@ -453,32 +444,24 @@ static int fsidiv_enable(struct clk *clk)
unsigned long value;
value = __raw_readl(clk->mapping->base) >> 16;
- if (value < 2) {
- fsidiv_disable(clk);
- return -ENOENT;
- }
+ if (value < 2)
+ return -EIO;
__raw_writel((value << 16) | 0x3, clk->mapping->base);
return 0;
}
-static int fsidiv_set_rate(struct clk *clk,
- unsigned long rate, int algo_id)
+static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
{
int idx;
- if (clk->parent->rate == rate) {
- fsidiv_disable(clk);
- return 0;
- }
-
idx = (clk->parent->rate / rate) & 0xffff;
if (idx < 2)
- return -ENOENT;
+ return -EINVAL;
__raw_writel(idx << 16, clk->mapping->base);
- return fsidiv_enable(clk);
+ return 0;
}
static struct clk_ops fsidiv_clk_ops = {
@@ -609,8 +592,6 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
CLKDEV_CON_ID("fmsi_clk", &div6_clks[DIV6_FMSI]),
CLKDEV_CON_ID("fmso_clk", &div6_clks[DIV6_FMSO]),
- CLKDEV_CON_ID("fsia_clk", &div6_reparent_clks[DIV6_FSIA]),
- CLKDEV_CON_ID("fsib_clk", &div6_reparent_clks[DIV6_FSIB]),
CLKDEV_CON_ID("sub_clk", &div6_clks[DIV6_SUB]),
CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_SPU]),
CLKDEV_CON_ID("vou_clk", &div6_clks[DIV6_VOU]),
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
new file mode 100644
index 000000000000..7c8b5179e444
--- /dev/null
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -0,0 +1,350 @@
+/*
+ * sh73a0 clock framework support
+ *
+ * Copyright (C) 2010 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/sh_clk.h>
+#include <mach/common.h>
+#include <asm/clkdev.h>
+
+#define FRQCRA 0xe6150000
+#define FRQCRB 0xe6150004
+#define FRQCRD 0xe61500e4
+#define VCLKCR1 0xe6150008
+#define VCLKCR2 0xe615000C
+#define VCLKCR3 0xe615001C
+#define ZBCKCR 0xe6150010
+#define FLCKCR 0xe6150014
+#define SD0CKCR 0xe6150074
+#define SD1CKCR 0xe6150078
+#define SD2CKCR 0xe615007C
+#define FSIACKCR 0xe6150018
+#define FSIBCKCR 0xe6150090
+#define SUBCKCR 0xe6150080
+#define SPUACKCR 0xe6150084
+#define SPUVCKCR 0xe6150094
+#define MSUCKCR 0xe6150088
+#define HSICKCR 0xe615008C
+#define MFCK1CR 0xe6150098
+#define MFCK2CR 0xe615009C
+#define DSITCKCR 0xe6150060
+#define DSI0PCKCR 0xe6150064
+#define DSI1PCKCR 0xe6150068
+#define DSI0PHYCR 0xe615006C
+#define DSI1PHYCR 0xe6150070
+#define PLLECR 0xe61500d0
+#define PLL0CR 0xe61500d8
+#define PLL1CR 0xe6150028
+#define PLL2CR 0xe615002c
+#define PLL3CR 0xe61500dc
+#define SMSTPCR0 0xe6150130
+#define SMSTPCR1 0xe6150134
+#define SMSTPCR2 0xe6150138
+#define SMSTPCR3 0xe615013c
+#define SMSTPCR4 0xe6150140
+#define SMSTPCR5 0xe6150144
+#define CKSCR 0xe61500c0
+
+/* Fixed 32 KHz root clock from EXTALR pin */
+static struct clk r_clk = {
+ .rate = 32768,
+};
+
+/*
+ * 26MHz default rate for the EXTAL1 root input clock.
+ * If needed, reset this with clk_set_rate() from the platform code.
+ */
+struct clk sh73a0_extal1_clk = {
+ .rate = 26000000,
+};
+
+/*
+ * 48MHz default rate for the EXTAL2 root input clock.
+ * If needed, reset this with clk_set_rate() from the platform code.
+ */
+struct clk sh73a0_extal2_clk = {
+ .rate = 48000000,
+};
+
+/* A fixed divide-by-2 block */
+static unsigned long div2_recalc(struct clk *clk)
+{
+ return clk->parent->rate / 2;
+}
+
+static struct clk_ops div2_clk_ops = {
+ .recalc = div2_recalc,
+};
+
+/* Divide extal1 by two */
+static struct clk extal1_div2_clk = {
+ .ops = &div2_clk_ops,
+ .parent = &sh73a0_extal1_clk,
+};
+
+/* Divide extal2 by two */
+static struct clk extal2_div2_clk = {
+ .ops = &div2_clk_ops,
+ .parent = &sh73a0_extal2_clk,
+};
+
+static struct clk_ops main_clk_ops = {
+ .recalc = followparent_recalc,
+};
+
+/* Main clock */
+static struct clk main_clk = {
+ .ops = &main_clk_ops,
+};
+
+/* PLL0, PLL1, PLL2, PLL3 */
+static unsigned long pll_recalc(struct clk *clk)
+{
+ unsigned long mult = 1;
+
+ if (__raw_readl(PLLECR) & (1 << clk->enable_bit))
+ mult = (((__raw_readl(clk->enable_reg) >> 24) & 0x3f) + 1);
+
+ return clk->parent->rate * mult;
+}
+
+static struct clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll0_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+ .parent = &main_clk,
+ .enable_reg = (void __iomem *)PLL0CR,
+ .enable_bit = 0,
+};
+
+static struct clk pll1_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+ .parent = &main_clk,
+ .enable_reg = (void __iomem *)PLL1CR,
+ .enable_bit = 1,
+};
+
+static struct clk pll2_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+ .parent = &main_clk,
+ .enable_reg = (void __iomem *)PLL2CR,
+ .enable_bit = 2,
+};
+
+static struct clk pll3_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+ .parent = &main_clk,
+ .enable_reg = (void __iomem *)PLL3CR,
+ .enable_bit = 3,
+};
+
+/* Divide PLL1 by two */
+static struct clk pll1_div2_clk = {
+ .ops = &div2_clk_ops,
+ .parent = &pll1_clk,
+};
+
+static struct clk *main_clks[] = {
+ &r_clk,
+ &sh73a0_extal1_clk,
+ &sh73a0_extal2_clk,
+ &extal1_div2_clk,
+ &extal2_div2_clk,
+ &main_clk,
+ &pll0_clk,
+ &pll1_clk,
+ &pll2_clk,
+ &pll3_clk,
+ &pll1_div2_clk,
+};
+
+static void div4_kick(struct clk *clk)
+{
+ unsigned long value;
+
+ /* set KICK bit in FRQCRB to update hardware setting */
+ value = __raw_readl(FRQCRB);
+ value |= (1 << 31);
+ __raw_writel(value, FRQCRB);
+}
+
+static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 18,
+ 24, 0, 36, 48 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = divisors,
+ .nr_divisors = ARRAY_SIZE(divisors),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+ .kick = div4_kick,
+};
+
+enum { DIV4_I, DIV4_ZG, DIV4_M3, DIV4_B, DIV4_M1, DIV4_M2,
+ DIV4_Z, DIV4_ZTR, DIV4_ZT, DIV4_ZX, DIV4_HP, DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll1_clk, _reg, _bit, _mask, _flags)
+
+static struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCRA, 20, 0xfff, CLK_ENABLE_ON_INIT),
+ [DIV4_ZG] = DIV4(FRQCRA, 16, 0xbff, CLK_ENABLE_ON_INIT),
+ [DIV4_M3] = DIV4(FRQCRA, 8, 0xfff, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCRA, 8, 0xfff, CLK_ENABLE_ON_INIT),
+ [DIV4_M1] = DIV4(FRQCRA, 4, 0xfff, 0),
+ [DIV4_M2] = DIV4(FRQCRA, 0, 0xfff, 0),
+ [DIV4_Z] = DIV4(FRQCRB, 24, 0xbff, 0),
+ [DIV4_ZTR] = DIV4(FRQCRB, 20, 0xfff, 0),
+ [DIV4_ZT] = DIV4(FRQCRB, 16, 0xfff, 0),
+ [DIV4_ZX] = DIV4(FRQCRB, 12, 0xfff, 0),
+ [DIV4_HP] = DIV4(FRQCRB, 4, 0xfff, 0),
+};
+
+enum { DIV6_VCK1, DIV6_VCK2, DIV6_VCK3, DIV6_ZB1,
+ DIV6_FLCTL, DIV6_SDHI0, DIV6_SDHI1, DIV6_SDHI2,
+ DIV6_FSIA, DIV6_FSIB, DIV6_SUB,
+ DIV6_SPUA, DIV6_SPUV, DIV6_MSU,
+ DIV6_HSI, DIV6_MFG1, DIV6_MFG2,
+ DIV6_DSIT, DIV6_DSI0P, DIV6_DSI1P,
+ DIV6_NR };
+
+static struct clk div6_clks[DIV6_NR] = {
+ [DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0),
+ [DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0),
+ [DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0),
+ [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0),
+ [DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0),
+ [DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0),
+ [DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0),
+ [DIV6_SDHI2] = SH_CLK_DIV6(&pll1_div2_clk, SD2CKCR, 0),
+ [DIV6_FSIA] = SH_CLK_DIV6(&pll1_div2_clk, FSIACKCR, 0),
+ [DIV6_FSIB] = SH_CLK_DIV6(&pll1_div2_clk, FSIBCKCR, 0),
+ [DIV6_SUB] = SH_CLK_DIV6(&sh73a0_extal2_clk, SUBCKCR, 0),
+ [DIV6_SPUA] = SH_CLK_DIV6(&pll1_div2_clk, SPUACKCR, 0),
+ [DIV6_SPUV] = SH_CLK_DIV6(&pll1_div2_clk, SPUVCKCR, 0),
+ [DIV6_MSU] = SH_CLK_DIV6(&pll1_div2_clk, MSUCKCR, 0),
+ [DIV6_HSI] = SH_CLK_DIV6(&pll1_div2_clk, HSICKCR, 0),
+ [DIV6_MFG1] = SH_CLK_DIV6(&pll1_div2_clk, MFCK1CR, 0),
+ [DIV6_MFG2] = SH_CLK_DIV6(&pll1_div2_clk, MFCK2CR, 0),
+ [DIV6_DSIT] = SH_CLK_DIV6(&pll1_div2_clk, DSITCKCR, 0),
+ [DIV6_DSI0P] = SH_CLK_DIV6(&pll1_div2_clk, DSI0PCKCR, 0),
+ [DIV6_DSI1P] = SH_CLK_DIV6(&pll1_div2_clk, DSI1PCKCR, 0),
+};
+
+enum { MSTP001,
+ MSTP116,
+ MSTP219,
+ MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
+ MSTP331, MSTP329, MSTP323,
+ MSTP411, MSTP410, MSTP403,
+ MSTP_NR };
+
+#define MSTP(_parent, _reg, _bit, _flags) \
+ SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
+
+static struct clk mstp_clks[MSTP_NR] = {
+ [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
+ [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
+ [MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
+ [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
+ [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
+ [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
+ [MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */
+ [MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */
+ [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
+ [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
+ [MSTP331] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 31, 0), /* SCIFA6 */
+ [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
+ [MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
+ [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
+ [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
+};
+
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("r_clk", &r_clk),
+
+ /* MSTP32 clocks */
+ CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
+ CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
+ CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
+ CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
+ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
+ CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */
+ CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */
+ CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
+ CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
+ CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */
+ CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
+ CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
+ CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
+ CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
+ CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC0 */
+};
+
+void __init sh73a0_clock_init(void)
+{
+ int k, ret = 0;
+
+ /* detect main clock parent */
+ switch ((__raw_readl(CKSCR) >> 24) & 0x03) {
+ case 0:
+ main_clk.parent = &sh73a0_extal1_clk;
+ break;
+ case 1:
+ main_clk.parent = &extal1_div2_clk;
+ break;
+ case 2:
+ main_clk.parent = &sh73a0_extal2_clk;
+ break;
+ case 3:
+ main_clk.parent = &extal2_div2_clk;
+ break;
+ }
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
+
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ clk_init();
+ else
+ panic("failed to setup sh73a0 clocks\n");
+}
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index efeef778a875..15932fd2435e 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -30,4 +30,11 @@ extern void sh7372_pinmux_init(void);
extern struct clk sh7372_extal1_clk;
extern struct clk sh7372_extal2_clk;
+extern void sh73a0_add_early_devices(void);
+extern void sh73a0_add_standard_devices(void);
+extern void sh73a0_clock_init(void);
+extern void sh73a0_pinmux_init(void);
+extern struct clk sh73a0_extal1_clk;
+extern struct clk sh73a0_extal2_clk;
+
#endif /* __ARCH_MACH_COMMON_H */
diff --git a/arch/arm/mach-shmobile/include/mach/entry-macro-gic.S b/arch/arm/mach-shmobile/include/mach/entry-macro-gic.S
new file mode 100644
index 000000000000..50b1f16d54a2
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/entry-macro-gic.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <mach/hardware.h>
+#include <asm/hardware/gic.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ ldr \base, =(0xf0000100)
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ /*
+ * The interrupt numbering scheme is defined in the
+ * interrupt controller spec. To wit:
+ *
+ * Interrupts 0-15 are IPI
+ * 16-28 are reserved
+ * 29-31 are local. We allow 30 to be used for the watchdog.
+ * 32-1020 are global
+ * 1021-1022 are reserved
+ * 1023 is "spurious" (no interrupt)
+ *
+ * For now, we ignore all local interrupts so only return an
+ * interrupt if it's between 30 and 1020. The test_for_ipi
+ * routine below will pick up on IPIs.
+ *
+ * A simple read from the controller will tell us the number of
+ * the highest priority enabled interrupt. We then just need to
+ * check whether it is in the valid range for an IRQ (30-1020
+ * inclusive).
+ */
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+
+ ldr \irqstat, [\base, #GIC_CPU_INTACK]
+ /* bits 12-10 = src CPU, 9-0 = int # */
+
+ ldr \tmp, =1021
+ bic \irqnr, \irqstat, #0x1c00
+ cmp \irqnr, #29
+ cmpcc \irqnr, \irqnr
+ cmpne \irqnr, \tmp
+ cmpcs \irqnr, \irqnr
+
+ .endm
+
+ /*
+ * We assume that irqstat (the raw value of the IRQ acknowledge
+ * register) is preserved from the macro above.
+ * If there is an IPI, we immediately signal end of interrupt on the
+ * controller, since this requires the original irqstat value which
+ * we won't easily be able to recreate later.
+ */
+
+ .macro test_for_ipi, irqnr, irqstat, base, tmp
+ bic \irqnr, \irqstat, #0x1c00
+ cmp \irqnr, #16
+ strcc \irqstat, [\base, #GIC_CPU_EOI]
+ cmpcs \irqnr, \irqnr
+ .endm
+
+ /* As above, this assumes that irqstat and base are preserved.. */
+
+ .macro test_for_ltirq, irqnr, irqstat, base, tmp
+ bic \irqnr, \irqstat, #0x1c00
+ mov \tmp, #0
+ cmp \irqnr, #29
+ moveq \tmp, #1
+ streq \irqstat, [\base, #GIC_CPU_EOI]
+ cmp \tmp, #0
+ .endm
diff --git a/arch/arm/mach-shmobile/include/mach/entry-macro-intc.S b/arch/arm/mach-shmobile/include/mach/entry-macro-intc.S
new file mode 100644
index 000000000000..a285d13c7416
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/entry-macro-intc.S
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2008 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ ldr \base, =INTFLGA
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldr \irqnr, [\base]
+ cmp \irqnr, #0
+ beq 1000f
+ /* intevt to irq number */
+ lsr \irqnr, \irqnr, #0x5
+ subs \irqnr, \irqnr, #16
+
+1000:
+ .endm
diff --git a/arch/arm/mach-shmobile/include/mach/entry-macro.S b/arch/arm/mach-shmobile/include/mach/entry-macro.S
index a285d13c7416..26e401167605 100644
--- a/arch/arm/mach-shmobile/include/mach/entry-macro.S
+++ b/arch/arm/mach-shmobile/include/mach/entry-macro.S
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Renesas Solutions Corp.
+ * Copyright (C) 2010 Paul Mundt
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,26 +14,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =INTFLGA
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqnr, [\base]
- cmp \irqnr, #0
- beq 1000f
- /* intevt to irq number */
- lsr \irqnr, \irqnr, #0x5
- subs \irqnr, \irqnr, #16
-
-1000:
- .endm
+#if defined(CONFIG_ARM_GIC)
+#include <mach/entry-macro-gic.S>
+#else
+#include <mach/entry-macro-intc.S>
+#endif
diff --git a/arch/arm/mach-shmobile/include/mach/irqs.h b/arch/arm/mach-shmobile/include/mach/irqs.h
index fa15b5f8a001..09e0ab7d343e 100644
--- a/arch/arm/mach-shmobile/include/mach/irqs.h
+++ b/arch/arm/mach-shmobile/include/mach/irqs.h
@@ -3,6 +3,9 @@
#define NR_IRQS 512
+/* GIC */
+#define gic_spi(nr) ((nr) + 32)
+
/* INTCA */
#define evt2irq(evt) (((evt) >> 5) - 16)
#define irq2evt(irq) (((irq) + 16) << 5)
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index e4f9004e7103..5736efcca60c 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -455,6 +455,8 @@ enum {
SHDMA_SLAVE_SDHI1_TX,
SHDMA_SLAVE_SDHI2_RX,
SHDMA_SLAVE_SDHI2_TX,
+ SHDMA_SLAVE_MMCIF_RX,
+ SHDMA_SLAVE_MMCIF_TX,
};
extern struct clk sh7372_extal1_clk;
diff --git a/arch/arm/mach-shmobile/include/mach/sh73a0.h b/arch/arm/mach-shmobile/include/mach/sh73a0.h
new file mode 100644
index 000000000000..8a6593964402
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/sh73a0.h
@@ -0,0 +1,446 @@
+#ifndef __ASM_SH73A0_H__
+#define __ASM_SH73A0_H__
+
+/* Pin Function Controller:
+ * GPIO_FN_xx - GPIO used to select pin function and MSEL switch
+ * GPIO_PORTxx - GPIO mapped to real I/O pin on CPU
+ */
+enum {
+ /* Hardware manual Table 25-1 (GPIO) */
+ GPIO_PORT0, GPIO_PORT1, GPIO_PORT2, GPIO_PORT3, GPIO_PORT4,
+ GPIO_PORT5, GPIO_PORT6, GPIO_PORT7, GPIO_PORT8, GPIO_PORT9,
+
+ GPIO_PORT10, GPIO_PORT11, GPIO_PORT12, GPIO_PORT13, GPIO_PORT14,
+ GPIO_PORT15, GPIO_PORT16, GPIO_PORT17, GPIO_PORT18, GPIO_PORT19,
+
+ GPIO_PORT20, GPIO_PORT21, GPIO_PORT22, GPIO_PORT23, GPIO_PORT24,
+ GPIO_PORT25, GPIO_PORT26, GPIO_PORT27, GPIO_PORT28, GPIO_PORT29,
+
+ GPIO_PORT30, GPIO_PORT31, GPIO_PORT32, GPIO_PORT33, GPIO_PORT34,
+ GPIO_PORT35, GPIO_PORT36, GPIO_PORT37, GPIO_PORT38, GPIO_PORT39,
+
+ GPIO_PORT40, GPIO_PORT41, GPIO_PORT42, GPIO_PORT43, GPIO_PORT44,
+ GPIO_PORT45, GPIO_PORT46, GPIO_PORT47, GPIO_PORT48, GPIO_PORT49,
+
+ GPIO_PORT50, GPIO_PORT51, GPIO_PORT52, GPIO_PORT53, GPIO_PORT54,
+ GPIO_PORT55, GPIO_PORT56, GPIO_PORT57, GPIO_PORT58, GPIO_PORT59,
+
+ GPIO_PORT60, GPIO_PORT61, GPIO_PORT62, GPIO_PORT63, GPIO_PORT64,
+ GPIO_PORT65, GPIO_PORT66, GPIO_PORT67, GPIO_PORT68, GPIO_PORT69,
+
+ GPIO_PORT70, GPIO_PORT71, GPIO_PORT72, GPIO_PORT73, GPIO_PORT74,
+ GPIO_PORT75, GPIO_PORT76, GPIO_PORT77, GPIO_PORT78, GPIO_PORT79,
+
+ GPIO_PORT80, GPIO_PORT81, GPIO_PORT82, GPIO_PORT83, GPIO_PORT84,
+ GPIO_PORT85, GPIO_PORT86, GPIO_PORT87, GPIO_PORT88, GPIO_PORT89,
+
+ GPIO_PORT90, GPIO_PORT91, GPIO_PORT92, GPIO_PORT93, GPIO_PORT94,
+ GPIO_PORT95, GPIO_PORT96, GPIO_PORT97, GPIO_PORT98, GPIO_PORT99,
+
+ GPIO_PORT100, GPIO_PORT101, GPIO_PORT102, GPIO_PORT103, GPIO_PORT104,
+ GPIO_PORT105, GPIO_PORT106, GPIO_PORT107, GPIO_PORT108, GPIO_PORT109,
+
+ GPIO_PORT110, GPIO_PORT111, GPIO_PORT112, GPIO_PORT113, GPIO_PORT114,
+ GPIO_PORT115, GPIO_PORT116, GPIO_PORT117, GPIO_PORT118,
+
+ GPIO_PORT128, GPIO_PORT129,
+
+ GPIO_PORT130, GPIO_PORT131, GPIO_PORT132, GPIO_PORT133, GPIO_PORT134,
+ GPIO_PORT135, GPIO_PORT136, GPIO_PORT137, GPIO_PORT138, GPIO_PORT139,
+
+ GPIO_PORT140, GPIO_PORT141, GPIO_PORT142, GPIO_PORT143, GPIO_PORT144,
+ GPIO_PORT145, GPIO_PORT146, GPIO_PORT147, GPIO_PORT148, GPIO_PORT149,
+
+ GPIO_PORT150, GPIO_PORT151, GPIO_PORT152, GPIO_PORT153, GPIO_PORT154,
+ GPIO_PORT155, GPIO_PORT156, GPIO_PORT157, GPIO_PORT158, GPIO_PORT159,
+
+ GPIO_PORT160, GPIO_PORT161, GPIO_PORT162, GPIO_PORT163, GPIO_PORT164,
+
+ GPIO_PORT192, GPIO_PORT193, GPIO_PORT194,
+ GPIO_PORT195, GPIO_PORT196, GPIO_PORT197, GPIO_PORT198, GPIO_PORT199,
+
+ GPIO_PORT200, GPIO_PORT201, GPIO_PORT202, GPIO_PORT203, GPIO_PORT204,
+ GPIO_PORT205, GPIO_PORT206, GPIO_PORT207, GPIO_PORT208, GPIO_PORT209,
+
+ GPIO_PORT210, GPIO_PORT211, GPIO_PORT212, GPIO_PORT213, GPIO_PORT214,
+ GPIO_PORT215, GPIO_PORT216, GPIO_PORT217, GPIO_PORT218, GPIO_PORT219,
+
+ GPIO_PORT220, GPIO_PORT221, GPIO_PORT222, GPIO_PORT223, GPIO_PORT224,
+ GPIO_PORT225, GPIO_PORT226, GPIO_PORT227, GPIO_PORT228, GPIO_PORT229,
+
+ GPIO_PORT230, GPIO_PORT231, GPIO_PORT232, GPIO_PORT233, GPIO_PORT234,
+ GPIO_PORT235, GPIO_PORT236, GPIO_PORT237, GPIO_PORT238, GPIO_PORT239,
+
+ GPIO_PORT240, GPIO_PORT241, GPIO_PORT242, GPIO_PORT243, GPIO_PORT244,
+ GPIO_PORT245, GPIO_PORT246, GPIO_PORT247, GPIO_PORT248, GPIO_PORT249,
+
+ GPIO_PORT250, GPIO_PORT251, GPIO_PORT252, GPIO_PORT253, GPIO_PORT254,
+ GPIO_PORT255, GPIO_PORT256, GPIO_PORT257, GPIO_PORT258, GPIO_PORT259,
+
+ GPIO_PORT260, GPIO_PORT261, GPIO_PORT262, GPIO_PORT263, GPIO_PORT264,
+ GPIO_PORT265, GPIO_PORT266, GPIO_PORT267, GPIO_PORT268, GPIO_PORT269,
+
+ GPIO_PORT270, GPIO_PORT271, GPIO_PORT272, GPIO_PORT273, GPIO_PORT274,
+ GPIO_PORT275, GPIO_PORT276, GPIO_PORT277, GPIO_PORT278, GPIO_PORT279,
+
+ GPIO_PORT280, GPIO_PORT281, GPIO_PORT282,
+
+ GPIO_PORT288, GPIO_PORT289,
+
+ GPIO_PORT290, GPIO_PORT291, GPIO_PORT292, GPIO_PORT293, GPIO_PORT294,
+ GPIO_PORT295, GPIO_PORT296, GPIO_PORT297, GPIO_PORT298, GPIO_PORT299,
+
+ GPIO_PORT300, GPIO_PORT301, GPIO_PORT302, GPIO_PORT303, GPIO_PORT304,
+ GPIO_PORT305, GPIO_PORT306, GPIO_PORT307, GPIO_PORT308, GPIO_PORT309,
+
+ /* Table 25-1 (Function 0-7) */
+ GPIO_FN_VBUS_0,
+ GPIO_FN_GPI0,
+ GPIO_FN_GPI1,
+ GPIO_FN_GPI2,
+ GPIO_FN_GPI3,
+ GPIO_FN_GPI4,
+ GPIO_FN_GPI5,
+ GPIO_FN_GPI6,
+ GPIO_FN_GPI7,
+ GPIO_FN_SCIFA7_RXD,
+ GPIO_FN_SCIFA7_CTS_,
+ GPIO_FN_GPO7, GPIO_FN_MFG0_OUT2,
+ GPIO_FN_GPO6, GPIO_FN_MFG1_OUT2,
+ GPIO_FN_GPO5, GPIO_FN_SCIFA0_SCK, GPIO_FN_FSICOSLDT3, \
+ GPIO_FN_PORT16_VIO_CKOR,
+ GPIO_FN_SCIFA0_TXD,
+ GPIO_FN_SCIFA7_TXD,
+ GPIO_FN_SCIFA7_RTS_, GPIO_FN_PORT19_VIO_CKO2,
+ GPIO_FN_GPO0,
+ GPIO_FN_GPO1,
+ GPIO_FN_GPO2, GPIO_FN_STATUS0,
+ GPIO_FN_GPO3, GPIO_FN_STATUS1,
+ GPIO_FN_GPO4, GPIO_FN_STATUS2,
+ GPIO_FN_VINT,
+ GPIO_FN_TCKON,
+ GPIO_FN_XDVFS1, GPIO_FN_PORT27_I2C_SCL2, GPIO_FN_PORT27_I2C_SCL3, \
+ GPIO_FN_MFG0_OUT1, GPIO_FN_PORT27_IROUT,
+ GPIO_FN_XDVFS2, GPIO_FN_PORT28_I2C_SDA2, GPIO_FN_PORT28_I2C_SDA3, \
+ GPIO_FN_PORT28_TPU1TO1,
+ GPIO_FN_SIM_RST, GPIO_FN_PORT29_TPU1TO1,
+ GPIO_FN_SIM_CLK, GPIO_FN_PORT30_VIO_CKOR,
+ GPIO_FN_SIM_D, GPIO_FN_PORT31_IROUT,
+ GPIO_FN_SCIFA4_TXD,
+ GPIO_FN_SCIFA4_RXD, GPIO_FN_XWUP,
+ GPIO_FN_SCIFA4_RTS_,
+ GPIO_FN_SCIFA4_CTS_,
+ GPIO_FN_FSIBOBT, GPIO_FN_FSIBIBT,
+ GPIO_FN_FSIBOLR, GPIO_FN_FSIBILR,
+ GPIO_FN_FSIBOSLD,
+ GPIO_FN_FSIBISLD,
+ GPIO_FN_VACK,
+ GPIO_FN_XTAL1L,
+ GPIO_FN_SCIFA0_RTS_, GPIO_FN_FSICOSLDT2,
+ GPIO_FN_SCIFA0_RXD,
+ GPIO_FN_SCIFA0_CTS_, GPIO_FN_FSICOSLDT1,
+ GPIO_FN_FSICOBT, GPIO_FN_FSICIBT, GPIO_FN_FSIDOBT, GPIO_FN_FSIDIBT,
+ GPIO_FN_FSICOLR, GPIO_FN_FSICILR, GPIO_FN_FSIDOLR, GPIO_FN_FSIDILR,
+ GPIO_FN_FSICOSLD, GPIO_FN_PORT47_FSICSPDIF,
+ GPIO_FN_FSICISLD, GPIO_FN_FSIDISLD,
+ GPIO_FN_FSIACK, GPIO_FN_PORT49_IRDA_OUT, GPIO_FN_PORT49_IROUT, \
+ GPIO_FN_FSIAOMC,
+ GPIO_FN_FSIAOLR, GPIO_FN_BBIF2_TSYNC2, GPIO_FN_TPU2TO2, GPIO_FN_FSIAILR,
+
+ GPIO_FN_FSIAOBT, GPIO_FN_BBIF2_TSCK2, GPIO_FN_TPU2TO3, GPIO_FN_FSIAIBT,
+ GPIO_FN_FSIAOSLD, GPIO_FN_BBIF2_TXD2,
+ GPIO_FN_FSIASPDIF, GPIO_FN_PORT53_IRDA_IN, GPIO_FN_TPU3TO3, \
+ GPIO_FN_FSIBSPDIF, GPIO_FN_PORT53_FSICSPDIF,
+ GPIO_FN_FSIBCK, GPIO_FN_PORT54_IRDA_FIRSEL, GPIO_FN_TPU3TO2, \
+ GPIO_FN_FSIBOMC, GPIO_FN_FSICCK, GPIO_FN_FSICOMC,
+ GPIO_FN_FSIAISLD, GPIO_FN_TPU0TO0,
+ GPIO_FN_A0, GPIO_FN_BS_,
+ GPIO_FN_A12, GPIO_FN_PORT58_KEYOUT7, GPIO_FN_TPU4TO2,
+ GPIO_FN_A13, GPIO_FN_PORT59_KEYOUT6, GPIO_FN_TPU0TO1,
+ GPIO_FN_A14, GPIO_FN_KEYOUT5,
+ GPIO_FN_A15, GPIO_FN_KEYOUT4,
+ GPIO_FN_A16, GPIO_FN_KEYOUT3, GPIO_FN_MSIOF0_SS1,
+ GPIO_FN_A17, GPIO_FN_KEYOUT2, GPIO_FN_MSIOF0_TSYNC,
+ GPIO_FN_A18, GPIO_FN_KEYOUT1, GPIO_FN_MSIOF0_TSCK,
+ GPIO_FN_A19, GPIO_FN_KEYOUT0, GPIO_FN_MSIOF0_TXD,
+ GPIO_FN_A20, GPIO_FN_KEYIN0, GPIO_FN_MSIOF0_RSCK,
+ GPIO_FN_A21, GPIO_FN_KEYIN1, GPIO_FN_MSIOF0_RSYNC,
+ GPIO_FN_A22, GPIO_FN_KEYIN2, GPIO_FN_MSIOF0_MCK0,
+ GPIO_FN_A23, GPIO_FN_KEYIN3, GPIO_FN_MSIOF0_MCK1,
+ GPIO_FN_A24, GPIO_FN_KEYIN4, GPIO_FN_MSIOF0_RXD,
+ GPIO_FN_A25, GPIO_FN_KEYIN5, GPIO_FN_MSIOF0_SS2,
+ GPIO_FN_A26, GPIO_FN_KEYIN6,
+ GPIO_FN_KEYIN7,
+ GPIO_FN_D0_NAF0,
+ GPIO_FN_D1_NAF1,
+ GPIO_FN_D2_NAF2,
+ GPIO_FN_D3_NAF3,
+ GPIO_FN_D4_NAF4,
+ GPIO_FN_D5_NAF5,
+ GPIO_FN_D6_NAF6,
+ GPIO_FN_D7_NAF7,
+ GPIO_FN_D8_NAF8,
+ GPIO_FN_D9_NAF9,
+ GPIO_FN_D10_NAF10,
+ GPIO_FN_D11_NAF11,
+ GPIO_FN_D12_NAF12,
+ GPIO_FN_D13_NAF13,
+ GPIO_FN_D14_NAF14,
+ GPIO_FN_D15_NAF15,
+ GPIO_FN_CS4_,
+ GPIO_FN_CS5A_, GPIO_FN_PORT91_RDWR,
+ GPIO_FN_CS5B_, GPIO_FN_FCE1_,
+ GPIO_FN_CS6B_, GPIO_FN_DACK0,
+ GPIO_FN_FCE0_, GPIO_FN_CS6A_,
+ GPIO_FN_WAIT_, GPIO_FN_DREQ0,
+ GPIO_FN_RD__FSC,
+ GPIO_FN_WE0__FWE, GPIO_FN_RDWR_FWE,
+ GPIO_FN_WE1_,
+ GPIO_FN_FRB,
+ GPIO_FN_CKO,
+ GPIO_FN_NBRSTOUT_,
+ GPIO_FN_NBRST_,
+ GPIO_FN_BBIF2_TXD,
+ GPIO_FN_BBIF2_RXD,
+ GPIO_FN_BBIF2_SYNC,
+ GPIO_FN_BBIF2_SCK,
+ GPIO_FN_SCIFA3_CTS_, GPIO_FN_MFG3_IN2,
+ GPIO_FN_SCIFA3_RXD, GPIO_FN_MFG3_IN1,
+ GPIO_FN_BBIF1_SS2, GPIO_FN_SCIFA3_RTS_, GPIO_FN_MFG3_OUT1,
+ GPIO_FN_SCIFA3_TXD,
+ GPIO_FN_HSI_RX_DATA, GPIO_FN_BBIF1_RXD,
+ GPIO_FN_HSI_TX_WAKE, GPIO_FN_BBIF1_TSCK,
+ GPIO_FN_HSI_TX_DATA, GPIO_FN_BBIF1_TSYNC,
+ GPIO_FN_HSI_TX_READY, GPIO_FN_BBIF1_TXD,
+ GPIO_FN_HSI_RX_READY, GPIO_FN_BBIF1_RSCK, GPIO_FN_PORT115_I2C_SCL2, \
+ GPIO_FN_PORT115_I2C_SCL3,
+ GPIO_FN_HSI_RX_WAKE, GPIO_FN_BBIF1_RSYNC, GPIO_FN_PORT116_I2C_SDA2, \
+ GPIO_FN_PORT116_I2C_SDA3,
+ GPIO_FN_HSI_RX_FLAG, GPIO_FN_BBIF1_SS1, GPIO_FN_BBIF1_FLOW,
+ GPIO_FN_HSI_TX_FLAG,
+ GPIO_FN_VIO_VD, GPIO_FN_PORT128_LCD2VSYN, GPIO_FN_VIO2_VD, \
+ GPIO_FN_LCD2D0,
+
+ GPIO_FN_VIO_HD, GPIO_FN_PORT129_LCD2HSYN, GPIO_FN_PORT129_LCD2CS_, \
+ GPIO_FN_VIO2_HD, GPIO_FN_LCD2D1,
+ GPIO_FN_VIO_D0, GPIO_FN_PORT130_MSIOF2_RXD, GPIO_FN_LCD2D10,
+ GPIO_FN_VIO_D1, GPIO_FN_PORT131_KEYOUT6, GPIO_FN_PORT131_MSIOF2_SS1, \
+ GPIO_FN_PORT131_KEYOUT11, GPIO_FN_LCD2D11,
+ GPIO_FN_VIO_D2, GPIO_FN_PORT132_KEYOUT7, GPIO_FN_PORT132_MSIOF2_SS2, \
+ GPIO_FN_PORT132_KEYOUT10, GPIO_FN_LCD2D12,
+ GPIO_FN_VIO_D3, GPIO_FN_MSIOF2_TSYNC, GPIO_FN_LCD2D13,
+ GPIO_FN_VIO_D4, GPIO_FN_MSIOF2_TXD, GPIO_FN_LCD2D14,
+ GPIO_FN_VIO_D5, GPIO_FN_MSIOF2_TSCK, GPIO_FN_LCD2D15,
+ GPIO_FN_VIO_D6, GPIO_FN_PORT136_KEYOUT8, GPIO_FN_LCD2D16,
+ GPIO_FN_VIO_D7, GPIO_FN_PORT137_KEYOUT9, GPIO_FN_LCD2D17,
+ GPIO_FN_VIO_D8, GPIO_FN_PORT138_KEYOUT8, GPIO_FN_VIO2_D0, \
+ GPIO_FN_LCD2D6,
+ GPIO_FN_VIO_D9, GPIO_FN_PORT139_KEYOUT9, GPIO_FN_VIO2_D1, \
+ GPIO_FN_LCD2D7,
+ GPIO_FN_VIO_D10, GPIO_FN_TPU0TO2, GPIO_FN_VIO2_D2, GPIO_FN_LCD2D8,
+ GPIO_FN_VIO_D11, GPIO_FN_TPU0TO3, GPIO_FN_VIO2_D3, GPIO_FN_LCD2D9,
+ GPIO_FN_VIO_D12, GPIO_FN_PORT142_KEYOUT10, GPIO_FN_VIO2_D4, \
+ GPIO_FN_LCD2D2,
+ GPIO_FN_VIO_D13, GPIO_FN_PORT143_KEYOUT11, GPIO_FN_PORT143_KEYOUT6, \
+ GPIO_FN_VIO2_D5, GPIO_FN_LCD2D3,
+ GPIO_FN_VIO_D14, GPIO_FN_PORT144_KEYOUT7, GPIO_FN_VIO2_D6, \
+ GPIO_FN_LCD2D4,
+ GPIO_FN_VIO_D15, GPIO_FN_TPU1TO3, GPIO_FN_PORT145_LCD2DISP, \
+ GPIO_FN_PORT145_LCD2RS, GPIO_FN_VIO2_D7, GPIO_FN_LCD2D5,
+ GPIO_FN_VIO_CLK, GPIO_FN_LCD2DCK, GPIO_FN_PORT146_LCD2WR_, \
+ GPIO_FN_VIO2_CLK, GPIO_FN_LCD2D18,
+ GPIO_FN_VIO_FIELD, GPIO_FN_LCD2RD_, GPIO_FN_VIO2_FIELD, GPIO_FN_LCD2D19,
+ GPIO_FN_VIO_CKO,
+ GPIO_FN_A27, GPIO_FN_PORT149_RDWR, GPIO_FN_MFG0_IN1, \
+ GPIO_FN_PORT149_KEYOUT9,
+ GPIO_FN_MFG0_IN2,
+ GPIO_FN_TS_SPSYNC3, GPIO_FN_MSIOF2_RSCK,
+ GPIO_FN_TS_SDAT3, GPIO_FN_MSIOF2_RSYNC,
+ GPIO_FN_TPU1TO2, GPIO_FN_TS_SDEN3, GPIO_FN_PORT153_MSIOF2_SS1,
+ GPIO_FN_SCIFA2_TXD1, GPIO_FN_MSIOF2_MCK0,
+ GPIO_FN_SCIFA2_RXD1, GPIO_FN_MSIOF2_MCK1,
+ GPIO_FN_SCIFA2_RTS1_, GPIO_FN_PORT156_MSIOF2_SS2,
+ GPIO_FN_SCIFA2_CTS1_, GPIO_FN_PORT157_MSIOF2_RXD,
+ GPIO_FN_DINT_, GPIO_FN_SCIFA2_SCK1, GPIO_FN_TS_SCK3,
+ GPIO_FN_PORT159_SCIFB_SCK, GPIO_FN_PORT159_SCIFA5_SCK, GPIO_FN_NMI,
+ GPIO_FN_PORT160_SCIFB_TXD, GPIO_FN_PORT160_SCIFA5_TXD,
+ GPIO_FN_PORT161_SCIFB_CTS_, GPIO_FN_PORT161_SCIFA5_CTS_,
+ GPIO_FN_PORT162_SCIFB_RXD, GPIO_FN_PORT162_SCIFA5_RXD,
+ GPIO_FN_PORT163_SCIFB_RTS_, GPIO_FN_PORT163_SCIFA5_RTS_, \
+ GPIO_FN_TPU3TO0,
+ GPIO_FN_LCDD0,
+ GPIO_FN_LCDD1, GPIO_FN_PORT193_SCIFA5_CTS_, GPIO_FN_BBIF2_TSYNC1,
+ GPIO_FN_LCDD2, GPIO_FN_PORT194_SCIFA5_RTS_, GPIO_FN_BBIF2_TSCK1,
+ GPIO_FN_LCDD3, GPIO_FN_PORT195_SCIFA5_RXD, GPIO_FN_BBIF2_TXD1,
+ GPIO_FN_LCDD4, GPIO_FN_PORT196_SCIFA5_TXD,
+ GPIO_FN_LCDD5, GPIO_FN_PORT197_SCIFA5_SCK, GPIO_FN_MFG2_OUT2, \
+ GPIO_FN_TPU2TO1,
+ GPIO_FN_LCDD6,
+ GPIO_FN_LCDD7, GPIO_FN_TPU4TO1, GPIO_FN_MFG4_OUT2,
+ GPIO_FN_LCDD8, GPIO_FN_D16,
+ GPIO_FN_LCDD9, GPIO_FN_D17,
+ GPIO_FN_LCDD10, GPIO_FN_D18,
+ GPIO_FN_LCDD11, GPIO_FN_D19,
+ GPIO_FN_LCDD12, GPIO_FN_D20,
+ GPIO_FN_LCDD13, GPIO_FN_D21,
+ GPIO_FN_LCDD14, GPIO_FN_D22,
+ GPIO_FN_LCDD15, GPIO_FN_PORT207_MSIOF0L_SS1, GPIO_FN_D23,
+ GPIO_FN_LCDD16, GPIO_FN_PORT208_MSIOF0L_SS2, GPIO_FN_D24,
+ GPIO_FN_LCDD17, GPIO_FN_D25,
+ GPIO_FN_LCDD18, GPIO_FN_DREQ2, GPIO_FN_PORT210_MSIOF0L_SS1, GPIO_FN_D26,
+ GPIO_FN_LCDD19, GPIO_FN_PORT211_MSIOF0L_SS2, GPIO_FN_D27,
+ GPIO_FN_LCDD20, GPIO_FN_TS_SPSYNC1, GPIO_FN_MSIOF0L_MCK0, GPIO_FN_D28,
+ GPIO_FN_LCDD21, GPIO_FN_TS_SDAT1, GPIO_FN_MSIOF0L_MCK1, GPIO_FN_D29,
+ GPIO_FN_LCDD22, GPIO_FN_TS_SDEN1, GPIO_FN_MSIOF0L_RSCK, GPIO_FN_D30,
+ GPIO_FN_LCDD23, GPIO_FN_TS_SCK1, GPIO_FN_MSIOF0L_RSYNC, GPIO_FN_D31,
+ GPIO_FN_LCDDCK, GPIO_FN_LCDWR_,
+ GPIO_FN_LCDRD_, GPIO_FN_DACK2, GPIO_FN_PORT217_LCD2RS, \
+ GPIO_FN_MSIOF0L_TSYNC, GPIO_FN_VIO2_FIELD3, GPIO_FN_PORT217_LCD2DISP,
+ GPIO_FN_LCDHSYN, GPIO_FN_LCDCS_, GPIO_FN_LCDCS2_, GPIO_FN_DACK3, \
+ GPIO_FN_PORT218_VIO_CKOR,
+ GPIO_FN_LCDDISP, GPIO_FN_LCDRS, GPIO_FN_PORT219_LCD2WR_, \
+ GPIO_FN_DREQ3, GPIO_FN_MSIOF0L_TSCK, GPIO_FN_VIO2_CLK3, \
+ GPIO_FN_LCD2DCK_2,
+ GPIO_FN_LCDVSYN, GPIO_FN_LCDVSYN2,
+ GPIO_FN_LCDLCLK, GPIO_FN_DREQ1, GPIO_FN_PORT221_LCD2CS_, \
+ GPIO_FN_PWEN, GPIO_FN_MSIOF0L_RXD, GPIO_FN_VIO2_HD3, \
+ GPIO_FN_PORT221_LCD2HSYN,
+ GPIO_FN_LCDDON, GPIO_FN_LCDDON2, GPIO_FN_DACK1, GPIO_FN_OVCN, \
+ GPIO_FN_MSIOF0L_TXD, GPIO_FN_VIO2_VD3, GPIO_FN_PORT222_LCD2VSYN,
+
+ GPIO_FN_SCIFA1_TXD, GPIO_FN_OVCN2,
+ GPIO_FN_EXTLP, GPIO_FN_SCIFA1_SCK, GPIO_FN_PORT226_VIO_CKO2,
+ GPIO_FN_SCIFA1_RTS_, GPIO_FN_IDIN,
+ GPIO_FN_SCIFA1_RXD,
+ GPIO_FN_SCIFA1_CTS_, GPIO_FN_MFG1_IN1,
+ GPIO_FN_MSIOF1_TXD, GPIO_FN_SCIFA2_TXD2,
+ GPIO_FN_MSIOF1_TSYNC, GPIO_FN_SCIFA2_CTS2_,
+ GPIO_FN_MSIOF1_TSCK, GPIO_FN_SCIFA2_SCK2,
+ GPIO_FN_MSIOF1_RXD, GPIO_FN_SCIFA2_RXD2,
+ GPIO_FN_MSIOF1_RSCK, GPIO_FN_SCIFA2_RTS2_, GPIO_FN_VIO2_CLK2, \
+ GPIO_FN_LCD2D20,
+ GPIO_FN_MSIOF1_RSYNC, GPIO_FN_MFG1_IN2, GPIO_FN_VIO2_VD2, \
+ GPIO_FN_LCD2D21,
+ GPIO_FN_MSIOF1_MCK0, GPIO_FN_PORT236_I2C_SDA2,
+ GPIO_FN_MSIOF1_MCK1, GPIO_FN_PORT237_I2C_SCL2,
+ GPIO_FN_MSIOF1_SS1, GPIO_FN_VIO2_FIELD2, GPIO_FN_LCD2D22,
+ GPIO_FN_MSIOF1_SS2, GPIO_FN_VIO2_HD2, GPIO_FN_LCD2D23,
+ GPIO_FN_SCIFA6_TXD,
+ GPIO_FN_PORT241_IRDA_OUT, GPIO_FN_PORT241_IROUT, GPIO_FN_MFG4_OUT1, \
+ GPIO_FN_TPU4TO0,
+ GPIO_FN_PORT242_IRDA_IN, GPIO_FN_MFG4_IN2,
+ GPIO_FN_PORT243_IRDA_FIRSEL, GPIO_FN_PORT243_VIO_CKO2,
+ GPIO_FN_PORT244_SCIFA5_CTS_, GPIO_FN_MFG2_IN1, \
+ GPIO_FN_PORT244_SCIFB_CTS_, GPIO_FN_MSIOF2R_RXD,
+ GPIO_FN_PORT245_SCIFA5_RTS_, GPIO_FN_MFG2_IN2, \
+ GPIO_FN_PORT245_SCIFB_RTS_, GPIO_FN_MSIOF2R_TXD,
+ GPIO_FN_PORT246_SCIFA5_RXD, GPIO_FN_MFG1_OUT1, \
+ GPIO_FN_PORT246_SCIFB_RXD, GPIO_FN_TPU1TO0,
+ GPIO_FN_PORT247_SCIFA5_TXD, GPIO_FN_MFG3_OUT2, \
+ GPIO_FN_PORT247_SCIFB_TXD, GPIO_FN_TPU3TO1,
+ GPIO_FN_PORT248_SCIFA5_SCK, GPIO_FN_MFG2_OUT1, \
+ GPIO_FN_PORT248_SCIFB_SCK, GPIO_FN_TPU2TO0, \
+ GPIO_FN_PORT248_I2C_SCL3, GPIO_FN_MSIOF2R_TSCK,
+ GPIO_FN_PORT249_IROUT, GPIO_FN_MFG4_IN1, \
+ GPIO_FN_PORT249_I2C_SDA3, GPIO_FN_MSIOF2R_TSYNC,
+ GPIO_FN_SDHICLK0,
+ GPIO_FN_SDHICD0,
+ GPIO_FN_SDHID0_0,
+ GPIO_FN_SDHID0_1,
+ GPIO_FN_SDHID0_2,
+ GPIO_FN_SDHID0_3,
+ GPIO_FN_SDHICMD0,
+ GPIO_FN_SDHIWP0,
+ GPIO_FN_SDHICLK1,
+ GPIO_FN_SDHID1_0, GPIO_FN_TS_SPSYNC2,
+ GPIO_FN_SDHID1_1, GPIO_FN_TS_SDAT2,
+ GPIO_FN_SDHID1_2, GPIO_FN_TS_SDEN2,
+ GPIO_FN_SDHID1_3, GPIO_FN_TS_SCK2,
+ GPIO_FN_SDHICMD1,
+ GPIO_FN_SDHICLK2,
+ GPIO_FN_SDHID2_0, GPIO_FN_TS_SPSYNC4,
+ GPIO_FN_SDHID2_1, GPIO_FN_TS_SDAT4,
+ GPIO_FN_SDHID2_2, GPIO_FN_TS_SDEN4,
+ GPIO_FN_SDHID2_3, GPIO_FN_TS_SCK4,
+ GPIO_FN_SDHICMD2,
+ GPIO_FN_MMCCLK0,
+ GPIO_FN_MMCD0_0,
+ GPIO_FN_MMCD0_1,
+ GPIO_FN_MMCD0_2,
+ GPIO_FN_MMCD0_3,
+ GPIO_FN_MMCD0_4, GPIO_FN_TS_SPSYNC5,
+ GPIO_FN_MMCD0_5, GPIO_FN_TS_SDAT5,
+ GPIO_FN_MMCD0_6, GPIO_FN_TS_SDEN5,
+ GPIO_FN_MMCD0_7, GPIO_FN_TS_SCK5,
+ GPIO_FN_MMCCMD0,
+ GPIO_FN_RESETOUTS_, GPIO_FN_EXTAL2OUT,
+ GPIO_FN_MCP_WAIT__MCP_FRB,
+ GPIO_FN_MCP_CKO, GPIO_FN_MMCCLK1,
+ GPIO_FN_MCP_D15_MCP_NAF15,
+ GPIO_FN_MCP_D14_MCP_NAF14,
+ GPIO_FN_MCP_D13_MCP_NAF13,
+ GPIO_FN_MCP_D12_MCP_NAF12,
+ GPIO_FN_MCP_D11_MCP_NAF11,
+ GPIO_FN_MCP_D10_MCP_NAF10,
+ GPIO_FN_MCP_D9_MCP_NAF9,
+ GPIO_FN_MCP_D8_MCP_NAF8, GPIO_FN_MMCCMD1,
+ GPIO_FN_MCP_D7_MCP_NAF7, GPIO_FN_MMCD1_7,
+
+ GPIO_FN_MCP_D6_MCP_NAF6, GPIO_FN_MMCD1_6,
+ GPIO_FN_MCP_D5_MCP_NAF5, GPIO_FN_MMCD1_5,
+ GPIO_FN_MCP_D4_MCP_NAF4, GPIO_FN_MMCD1_4,
+ GPIO_FN_MCP_D3_MCP_NAF3, GPIO_FN_MMCD1_3,
+ GPIO_FN_MCP_D2_MCP_NAF2, GPIO_FN_MMCD1_2,
+ GPIO_FN_MCP_D1_MCP_NAF1, GPIO_FN_MMCD1_1,
+ GPIO_FN_MCP_D0_MCP_NAF0, GPIO_FN_MMCD1_0,
+ GPIO_FN_MCP_NBRSTOUT_,
+ GPIO_FN_MCP_WE0__MCP_FWE, GPIO_FN_MCP_RDWR_MCP_FWE,
+
+ /* MSEL2 special case */
+ GPIO_FN_TSIF2_TS_XX1,
+ GPIO_FN_TSIF2_TS_XX2,
+ GPIO_FN_TSIF2_TS_XX3,
+ GPIO_FN_TSIF2_TS_XX4,
+ GPIO_FN_TSIF2_TS_XX5,
+ GPIO_FN_TSIF1_TS_XX1,
+ GPIO_FN_TSIF1_TS_XX2,
+ GPIO_FN_TSIF1_TS_XX3,
+ GPIO_FN_TSIF1_TS_XX4,
+ GPIO_FN_TSIF1_TS_XX5,
+ GPIO_FN_TSIF0_TS_XX1,
+ GPIO_FN_TSIF0_TS_XX2,
+ GPIO_FN_TSIF0_TS_XX3,
+ GPIO_FN_TSIF0_TS_XX4,
+ GPIO_FN_TSIF0_TS_XX5,
+ GPIO_FN_MST1_TS_XX1,
+ GPIO_FN_MST1_TS_XX2,
+ GPIO_FN_MST1_TS_XX3,
+ GPIO_FN_MST1_TS_XX4,
+ GPIO_FN_MST1_TS_XX5,
+ GPIO_FN_MST0_TS_XX1,
+ GPIO_FN_MST0_TS_XX2,
+ GPIO_FN_MST0_TS_XX3,
+ GPIO_FN_MST0_TS_XX4,
+ GPIO_FN_MST0_TS_XX5,
+
+ /* MSEL3 special cases */
+ GPIO_FN_SDHI0_VCCQ_MC0_ON,
+ GPIO_FN_SDHI0_VCCQ_MC0_OFF,
+ GPIO_FN_DEBUG_MON_VIO,
+ GPIO_FN_DEBUG_MON_LCDD,
+ GPIO_FN_LCDC_LCDC0,
+ GPIO_FN_LCDC_LCDC1,
+
+ /* MSEL4 special cases */
+ GPIO_FN_IRQ9_MEM_INT,
+ GPIO_FN_IRQ9_MCP_INT,
+ GPIO_FN_A11,
+ GPIO_FN_KEYOUT8,
+ GPIO_FN_TPU4TO3,
+ GPIO_FN_RESETA_N_PU_ON,
+ GPIO_FN_RESETA_N_PU_OFF,
+ GPIO_FN_EDBGREQ_PD,
+ GPIO_FN_EDBGREQ_PU,
+};
+
+#endif /* __ASM_SH73A0_H__ */
diff --git a/arch/arm/mach-shmobile/pfc-sh73a0.c b/arch/arm/mach-shmobile/pfc-sh73a0.c
new file mode 100644
index 000000000000..1681170f9a4f
--- /dev/null
+++ b/arch/arm/mach-shmobile/pfc-sh73a0.c
@@ -0,0 +1,2679 @@
+/*
+ * sh73a0 processor support - PFC hardware block
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Copyright (C) 2010 NISHIMOTO Hiroki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <mach/sh73a0.h>
+
+#define _1(fn, pfx, sfx) fn(pfx, sfx)
+
+#define _10(fn, pfx, sfx) \
+ _1(fn, pfx##0, sfx), _1(fn, pfx##1, sfx), \
+ _1(fn, pfx##2, sfx), _1(fn, pfx##3, sfx), \
+ _1(fn, pfx##4, sfx), _1(fn, pfx##5, sfx), \
+ _1(fn, pfx##6, sfx), _1(fn, pfx##7, sfx), \
+ _1(fn, pfx##8, sfx), _1(fn, pfx##9, sfx)
+
+#define _310(fn, pfx, sfx) \
+ _10(fn, pfx, sfx), _10(fn, pfx##1, sfx), \
+ _10(fn, pfx##2, sfx), _10(fn, pfx##3, sfx), \
+ _10(fn, pfx##4, sfx), _10(fn, pfx##5, sfx), \
+ _10(fn, pfx##6, sfx), _10(fn, pfx##7, sfx), \
+ _10(fn, pfx##8, sfx), _10(fn, pfx##9, sfx), \
+ _10(fn, pfx##10, sfx), \
+ _1(fn, pfx##110, sfx), _1(fn, pfx##111, sfx), \
+ _1(fn, pfx##112, sfx), _1(fn, pfx##113, sfx), \
+ _1(fn, pfx##114, sfx), _1(fn, pfx##115, sfx), \
+ _1(fn, pfx##116, sfx), _1(fn, pfx##117, sfx), \
+ _1(fn, pfx##118, sfx), \
+ _1(fn, pfx##128, sfx), _1(fn, pfx##129, sfx), \
+ _10(fn, pfx##13, sfx), _10(fn, pfx##14, sfx), \
+ _10(fn, pfx##15, sfx), \
+ _1(fn, pfx##160, sfx), _1(fn, pfx##161, sfx), \
+ _1(fn, pfx##162, sfx), _1(fn, pfx##163, sfx), \
+ _1(fn, pfx##164, sfx), \
+ _1(fn, pfx##192, sfx), _1(fn, pfx##193, sfx), \
+ _1(fn, pfx##194, sfx), _1(fn, pfx##195, sfx), \
+ _1(fn, pfx##196, sfx), _1(fn, pfx##197, sfx), \
+ _1(fn, pfx##198, sfx), _1(fn, pfx##199, sfx), \
+ _10(fn, pfx##20, sfx), _10(fn, pfx##21, sfx), \
+ _10(fn, pfx##22, sfx), _10(fn, pfx##23, sfx), \
+ _10(fn, pfx##24, sfx), _10(fn, pfx##25, sfx), \
+ _10(fn, pfx##26, sfx), _10(fn, pfx##27, sfx), \
+ _1(fn, pfx##280, sfx), _1(fn, pfx##281, sfx), \
+ _1(fn, pfx##282, sfx), \
+ _1(fn, pfx##288, sfx), _1(fn, pfx##289, sfx), \
+ _10(fn, pfx##29, sfx), _10(fn, pfx##30, sfx)
+
+#define _PORT(pfx, sfx) pfx##_##sfx
+#define PORT_310(str) _310(_PORT, PORT, str)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PORT_310(DATA), /* PORT0_DATA -> PORT309_DATA */
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PORT_310(IN), /* PORT0_IN -> PORT309_IN */
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PORT_310(IN_PU), /* PORT0_IN_PU -> PORT309_IN_PU */
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_INPUT_PULLDOWN_BEGIN,
+ PORT_310(IN_PD), /* PORT0_IN_PD -> PORT309_IN_PD */
+ PINMUX_INPUT_PULLDOWN_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PORT_310(OUT), /* PORT0_OUT -> PORT309_OUT */
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PORT_310(FN_IN), /* PORT0_FN_IN -> PORT309_FN_IN */
+ PORT_310(FN_OUT), /* PORT0_FN_OUT -> PORT309_FN_OUT */
+ PORT_310(FN0), /* PORT0_FN0 -> PORT309_FN0 */
+ PORT_310(FN1), /* PORT0_FN1 -> PORT309_FN1 */
+ PORT_310(FN2), /* PORT0_FN2 -> PORT309_FN2 */
+ PORT_310(FN3), /* PORT0_FN3 -> PORT309_FN3 */
+ PORT_310(FN4), /* PORT0_FN4 -> PORT309_FN4 */
+ PORT_310(FN5), /* PORT0_FN5 -> PORT309_FN5 */
+ PORT_310(FN6), /* PORT0_FN6 -> PORT309_FN6 */
+ PORT_310(FN7), /* PORT0_FN7 -> PORT309_FN7 */
+
+ MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1,
+ MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1,
+ MSEL2CR_MSEL17_0, MSEL2CR_MSEL17_1,
+ MSEL2CR_MSEL16_0, MSEL2CR_MSEL16_1,
+ MSEL2CR_MSEL14_0, MSEL2CR_MSEL14_1,
+ MSEL2CR_MSEL13_0, MSEL2CR_MSEL13_1,
+ MSEL2CR_MSEL12_0, MSEL2CR_MSEL12_1,
+ MSEL2CR_MSEL11_0, MSEL2CR_MSEL11_1,
+ MSEL2CR_MSEL10_0, MSEL2CR_MSEL10_1,
+ MSEL2CR_MSEL9_0, MSEL2CR_MSEL9_1,
+ MSEL2CR_MSEL8_0, MSEL2CR_MSEL8_1,
+ MSEL2CR_MSEL7_0, MSEL2CR_MSEL7_1,
+ MSEL2CR_MSEL6_0, MSEL2CR_MSEL6_1,
+ MSEL2CR_MSEL4_0, MSEL2CR_MSEL4_1,
+ MSEL2CR_MSEL5_0, MSEL2CR_MSEL5_1,
+ MSEL2CR_MSEL3_0, MSEL2CR_MSEL3_1,
+ MSEL2CR_MSEL2_0, MSEL2CR_MSEL2_1,
+ MSEL2CR_MSEL1_0, MSEL2CR_MSEL1_1,
+ MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1,
+ MSEL3CR_MSEL28_0, MSEL3CR_MSEL28_1,
+ MSEL3CR_MSEL15_0, MSEL3CR_MSEL15_1,
+ MSEL3CR_MSEL11_0, MSEL3CR_MSEL11_1,
+ MSEL3CR_MSEL9_0, MSEL3CR_MSEL9_1,
+ MSEL3CR_MSEL6_0, MSEL3CR_MSEL6_1,
+ MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1,
+ MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1,
+ MSEL4CR_MSEL27_0, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0, MSEL4CR_MSEL26_1,
+ MSEL4CR_MSEL22_0, MSEL4CR_MSEL22_1,
+ MSEL4CR_MSEL21_0, MSEL4CR_MSEL21_1,
+ MSEL4CR_MSEL20_0, MSEL4CR_MSEL20_1,
+ MSEL4CR_MSEL19_0, MSEL4CR_MSEL19_1,
+ MSEL4CR_MSEL15_0, MSEL4CR_MSEL15_1,
+ MSEL4CR_MSEL13_0, MSEL4CR_MSEL13_1,
+ MSEL4CR_MSEL12_0, MSEL4CR_MSEL12_1,
+ MSEL4CR_MSEL11_0, MSEL4CR_MSEL11_1,
+ MSEL4CR_MSEL10_0, MSEL4CR_MSEL10_1,
+ MSEL4CR_MSEL9_0, MSEL4CR_MSEL9_1,
+ MSEL4CR_MSEL8_0, MSEL4CR_MSEL8_1,
+ MSEL4CR_MSEL7_0, MSEL4CR_MSEL7_1,
+ MSEL4CR_MSEL4_0, MSEL4CR_MSEL4_1,
+ MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /* Hardware manual Table 25-1 (Function 0-7) */
+ VBUS_0_MARK,
+ GPI0_MARK,
+ GPI1_MARK,
+ GPI2_MARK,
+ GPI3_MARK,
+ GPI4_MARK,
+ GPI5_MARK,
+ GPI6_MARK,
+ GPI7_MARK,
+ SCIFA7_RXD_MARK,
+ SCIFA7_CTS__MARK,
+ GPO7_MARK, MFG0_OUT2_MARK,
+ GPO6_MARK, MFG1_OUT2_MARK,
+ GPO5_MARK, SCIFA0_SCK_MARK, FSICOSLDT3_MARK, PORT16_VIO_CKOR_MARK,
+ SCIFA0_TXD_MARK,
+ SCIFA7_TXD_MARK,
+ SCIFA7_RTS__MARK, PORT19_VIO_CKO2_MARK,
+ GPO0_MARK,
+ GPO1_MARK,
+ GPO2_MARK, STATUS0_MARK,
+ GPO3_MARK, STATUS1_MARK,
+ GPO4_MARK, STATUS2_MARK,
+ VINT_MARK,
+ TCKON_MARK,
+ XDVFS1_MARK, PORT27_I2C_SCL2_MARK, PORT27_I2C_SCL3_MARK, \
+ MFG0_OUT1_MARK, PORT27_IROUT_MARK,
+ XDVFS2_MARK, PORT28_I2C_SDA2_MARK, PORT28_I2C_SDA3_MARK, \
+ PORT28_TPU1TO1_MARK,
+ SIM_RST_MARK, PORT29_TPU1TO1_MARK,
+ SIM_CLK_MARK, PORT30_VIO_CKOR_MARK,
+ SIM_D_MARK, PORT31_IROUT_MARK,
+ SCIFA4_TXD_MARK,
+ SCIFA4_RXD_MARK, XWUP_MARK,
+ SCIFA4_RTS__MARK,
+ SCIFA4_CTS__MARK,
+ FSIBOBT_MARK, FSIBIBT_MARK,
+ FSIBOLR_MARK, FSIBILR_MARK,
+ FSIBOSLD_MARK,
+ FSIBISLD_MARK,
+ VACK_MARK,
+ XTAL1L_MARK,
+ SCIFA0_RTS__MARK, FSICOSLDT2_MARK,
+ SCIFA0_RXD_MARK,
+ SCIFA0_CTS__MARK, FSICOSLDT1_MARK,
+ FSICOBT_MARK, FSICIBT_MARK, FSIDOBT_MARK, FSIDIBT_MARK,
+ FSICOLR_MARK, FSICILR_MARK, FSIDOLR_MARK, FSIDILR_MARK,
+ FSICOSLD_MARK, PORT47_FSICSPDIF_MARK,
+ FSICISLD_MARK, FSIDISLD_MARK,
+ FSIACK_MARK, PORT49_IRDA_OUT_MARK, PORT49_IROUT_MARK, FSIAOMC_MARK,
+ FSIAOLR_MARK, BBIF2_TSYNC2_MARK, TPU2TO2_MARK, FSIAILR_MARK,
+
+ FSIAOBT_MARK, BBIF2_TSCK2_MARK, TPU2TO3_MARK, FSIAIBT_MARK,
+ FSIAOSLD_MARK, BBIF2_TXD2_MARK,
+ FSIASPDIF_MARK, PORT53_IRDA_IN_MARK, TPU3TO3_MARK, FSIBSPDIF_MARK, \
+ PORT53_FSICSPDIF_MARK,
+ FSIBCK_MARK, PORT54_IRDA_FIRSEL_MARK, TPU3TO2_MARK, FSIBOMC_MARK, \
+ FSICCK_MARK, FSICOMC_MARK,
+ FSIAISLD_MARK, TPU0TO0_MARK,
+ A0_MARK, BS__MARK,
+ A12_MARK, PORT58_KEYOUT7_MARK, TPU4TO2_MARK,
+ A13_MARK, PORT59_KEYOUT6_MARK, TPU0TO1_MARK,
+ A14_MARK, KEYOUT5_MARK,
+ A15_MARK, KEYOUT4_MARK,
+ A16_MARK, KEYOUT3_MARK, MSIOF0_SS1_MARK,
+ A17_MARK, KEYOUT2_MARK, MSIOF0_TSYNC_MARK,
+ A18_MARK, KEYOUT1_MARK, MSIOF0_TSCK_MARK,
+ A19_MARK, KEYOUT0_MARK, MSIOF0_TXD_MARK,
+ A20_MARK, KEYIN0_MARK, MSIOF0_RSCK_MARK,
+ A21_MARK, KEYIN1_MARK, MSIOF0_RSYNC_MARK,
+ A22_MARK, KEYIN2_MARK, MSIOF0_MCK0_MARK,
+ A23_MARK, KEYIN3_MARK, MSIOF0_MCK1_MARK,
+ A24_MARK, KEYIN4_MARK, MSIOF0_RXD_MARK,
+ A25_MARK, KEYIN5_MARK, MSIOF0_SS2_MARK,
+ A26_MARK, KEYIN6_MARK,
+ KEYIN7_MARK,
+ D0_NAF0_MARK,
+ D1_NAF1_MARK,
+ D2_NAF2_MARK,
+ D3_NAF3_MARK,
+ D4_NAF4_MARK,
+ D5_NAF5_MARK,
+ D6_NAF6_MARK,
+ D7_NAF7_MARK,
+ D8_NAF8_MARK,
+ D9_NAF9_MARK,
+ D10_NAF10_MARK,
+ D11_NAF11_MARK,
+ D12_NAF12_MARK,
+ D13_NAF13_MARK,
+ D14_NAF14_MARK,
+ D15_NAF15_MARK,
+ CS4__MARK,
+ CS5A__MARK, PORT91_RDWR_MARK,
+ CS5B__MARK, FCE1__MARK,
+ CS6B__MARK, DACK0_MARK,
+ FCE0__MARK, CS6A__MARK,
+ WAIT__MARK, DREQ0_MARK,
+ RD__FSC_MARK,
+ WE0__FWE_MARK, RDWR_FWE_MARK,
+ WE1__MARK,
+ FRB_MARK,
+ CKO_MARK,
+ NBRSTOUT__MARK,
+ NBRST__MARK,
+ BBIF2_TXD_MARK,
+ BBIF2_RXD_MARK,
+ BBIF2_SYNC_MARK,
+ BBIF2_SCK_MARK,
+ SCIFA3_CTS__MARK, MFG3_IN2_MARK,
+ SCIFA3_RXD_MARK, MFG3_IN1_MARK,
+ BBIF1_SS2_MARK, SCIFA3_RTS__MARK, MFG3_OUT1_MARK,
+ SCIFA3_TXD_MARK,
+ HSI_RX_DATA_MARK, BBIF1_RXD_MARK,
+ HSI_TX_WAKE_MARK, BBIF1_TSCK_MARK,
+ HSI_TX_DATA_MARK, BBIF1_TSYNC_MARK,
+ HSI_TX_READY_MARK, BBIF1_TXD_MARK,
+ HSI_RX_READY_MARK, BBIF1_RSCK_MARK, PORT115_I2C_SCL2_MARK, \
+ PORT115_I2C_SCL3_MARK,
+ HSI_RX_WAKE_MARK, BBIF1_RSYNC_MARK, PORT116_I2C_SDA2_MARK, \
+ PORT116_I2C_SDA3_MARK,
+ HSI_RX_FLAG_MARK, BBIF1_SS1_MARK, BBIF1_FLOW_MARK,
+ HSI_TX_FLAG_MARK,
+ VIO_VD_MARK, PORT128_LCD2VSYN_MARK, VIO2_VD_MARK, LCD2D0_MARK,
+
+ VIO_HD_MARK, PORT129_LCD2HSYN_MARK, PORT129_LCD2CS__MARK, \
+ VIO2_HD_MARK, LCD2D1_MARK,
+ VIO_D0_MARK, PORT130_MSIOF2_RXD_MARK, LCD2D10_MARK,
+ VIO_D1_MARK, PORT131_KEYOUT6_MARK, PORT131_MSIOF2_SS1_MARK, \
+ PORT131_KEYOUT11_MARK, LCD2D11_MARK,
+ VIO_D2_MARK, PORT132_KEYOUT7_MARK, PORT132_MSIOF2_SS2_MARK, \
+ PORT132_KEYOUT10_MARK, LCD2D12_MARK,
+ VIO_D3_MARK, MSIOF2_TSYNC_MARK, LCD2D13_MARK,
+ VIO_D4_MARK, MSIOF2_TXD_MARK, LCD2D14_MARK,
+ VIO_D5_MARK, MSIOF2_TSCK_MARK, LCD2D15_MARK,
+ VIO_D6_MARK, PORT136_KEYOUT8_MARK, LCD2D16_MARK,
+ VIO_D7_MARK, PORT137_KEYOUT9_MARK, LCD2D17_MARK,
+ VIO_D8_MARK, PORT138_KEYOUT8_MARK, VIO2_D0_MARK, LCD2D6_MARK,
+ VIO_D9_MARK, PORT139_KEYOUT9_MARK, VIO2_D1_MARK, LCD2D7_MARK,
+ VIO_D10_MARK, TPU0TO2_MARK, VIO2_D2_MARK, LCD2D8_MARK,
+ VIO_D11_MARK, TPU0TO3_MARK, VIO2_D3_MARK, LCD2D9_MARK,
+ VIO_D12_MARK, PORT142_KEYOUT10_MARK, VIO2_D4_MARK, LCD2D2_MARK,
+ VIO_D13_MARK, PORT143_KEYOUT11_MARK, PORT143_KEYOUT6_MARK, \
+ VIO2_D5_MARK, LCD2D3_MARK,
+ VIO_D14_MARK, PORT144_KEYOUT7_MARK, VIO2_D6_MARK, LCD2D4_MARK,
+ VIO_D15_MARK, TPU1TO3_MARK, PORT145_LCD2DISP_MARK, \
+ PORT145_LCD2RS_MARK, VIO2_D7_MARK, LCD2D5_MARK,
+ VIO_CLK_MARK, LCD2DCK_MARK, PORT146_LCD2WR__MARK, VIO2_CLK_MARK, \
+ LCD2D18_MARK,
+ VIO_FIELD_MARK, LCD2RD__MARK, VIO2_FIELD_MARK, LCD2D19_MARK,
+ VIO_CKO_MARK,
+ A27_MARK, PORT149_RDWR_MARK, MFG0_IN1_MARK, PORT149_KEYOUT9_MARK,
+ MFG0_IN2_MARK,
+ TS_SPSYNC3_MARK, MSIOF2_RSCK_MARK,
+ TS_SDAT3_MARK, MSIOF2_RSYNC_MARK,
+ TPU1TO2_MARK, TS_SDEN3_MARK, PORT153_MSIOF2_SS1_MARK,
+ SCIFA2_TXD1_MARK, MSIOF2_MCK0_MARK,
+ SCIFA2_RXD1_MARK, MSIOF2_MCK1_MARK,
+ SCIFA2_RTS1__MARK, PORT156_MSIOF2_SS2_MARK,
+ SCIFA2_CTS1__MARK, PORT157_MSIOF2_RXD_MARK,
+ DINT__MARK, SCIFA2_SCK1_MARK, TS_SCK3_MARK,
+ PORT159_SCIFB_SCK_MARK, PORT159_SCIFA5_SCK_MARK, NMI_MARK,
+ PORT160_SCIFB_TXD_MARK, PORT160_SCIFA5_TXD_MARK,
+ PORT161_SCIFB_CTS__MARK, PORT161_SCIFA5_CTS__MARK,
+ PORT162_SCIFB_RXD_MARK, PORT162_SCIFA5_RXD_MARK,
+ PORT163_SCIFB_RTS__MARK, PORT163_SCIFA5_RTS__MARK, TPU3TO0_MARK,
+ LCDD0_MARK,
+ LCDD1_MARK, PORT193_SCIFA5_CTS__MARK, BBIF2_TSYNC1_MARK,
+ LCDD2_MARK, PORT194_SCIFA5_RTS__MARK, BBIF2_TSCK1_MARK,
+ LCDD3_MARK, PORT195_SCIFA5_RXD_MARK, BBIF2_TXD1_MARK,
+ LCDD4_MARK, PORT196_SCIFA5_TXD_MARK,
+ LCDD5_MARK, PORT197_SCIFA5_SCK_MARK, MFG2_OUT2_MARK, TPU2TO1_MARK,
+ LCDD6_MARK,
+ LCDD7_MARK, TPU4TO1_MARK, MFG4_OUT2_MARK,
+ LCDD8_MARK, D16_MARK,
+ LCDD9_MARK, D17_MARK,
+ LCDD10_MARK, D18_MARK,
+ LCDD11_MARK, D19_MARK,
+ LCDD12_MARK, D20_MARK,
+ LCDD13_MARK, D21_MARK,
+ LCDD14_MARK, D22_MARK,
+ LCDD15_MARK, PORT207_MSIOF0L_SS1_MARK, D23_MARK,
+ LCDD16_MARK, PORT208_MSIOF0L_SS2_MARK, D24_MARK,
+ LCDD17_MARK, D25_MARK,
+ LCDD18_MARK, DREQ2_MARK, PORT210_MSIOF0L_SS1_MARK, D26_MARK,
+ LCDD19_MARK, PORT211_MSIOF0L_SS2_MARK, D27_MARK,
+ LCDD20_MARK, TS_SPSYNC1_MARK, MSIOF0L_MCK0_MARK, D28_MARK,
+ LCDD21_MARK, TS_SDAT1_MARK, MSIOF0L_MCK1_MARK, D29_MARK,
+ LCDD22_MARK, TS_SDEN1_MARK, MSIOF0L_RSCK_MARK, D30_MARK,
+ LCDD23_MARK, TS_SCK1_MARK, MSIOF0L_RSYNC_MARK, D31_MARK,
+ LCDDCK_MARK, LCDWR__MARK,
+ LCDRD__MARK, DACK2_MARK, PORT217_LCD2RS_MARK, MSIOF0L_TSYNC_MARK, \
+ VIO2_FIELD3_MARK, PORT217_LCD2DISP_MARK,
+ LCDHSYN_MARK, LCDCS__MARK, LCDCS2__MARK, DACK3_MARK, \
+ PORT218_VIO_CKOR_MARK,
+ LCDDISP_MARK, LCDRS_MARK, PORT219_LCD2WR__MARK, DREQ3_MARK, \
+ MSIOF0L_TSCK_MARK, VIO2_CLK3_MARK, LCD2DCK_2_MARK,
+ LCDVSYN_MARK, LCDVSYN2_MARK,
+ LCDLCLK_MARK, DREQ1_MARK, PORT221_LCD2CS__MARK, PWEN_MARK, \
+ MSIOF0L_RXD_MARK, VIO2_HD3_MARK, PORT221_LCD2HSYN_MARK,
+ LCDDON_MARK, LCDDON2_MARK, DACK1_MARK, OVCN_MARK, MSIOF0L_TXD_MARK, \
+ VIO2_VD3_MARK, PORT222_LCD2VSYN_MARK,
+
+ SCIFA1_TXD_MARK, OVCN2_MARK,
+ EXTLP_MARK, SCIFA1_SCK_MARK, PORT226_VIO_CKO2_MARK,
+ SCIFA1_RTS__MARK, IDIN_MARK,
+ SCIFA1_RXD_MARK,
+ SCIFA1_CTS__MARK, MFG1_IN1_MARK,
+ MSIOF1_TXD_MARK, SCIFA2_TXD2_MARK,
+ MSIOF1_TSYNC_MARK, SCIFA2_CTS2__MARK,
+ MSIOF1_TSCK_MARK, SCIFA2_SCK2_MARK,
+ MSIOF1_RXD_MARK, SCIFA2_RXD2_MARK,
+ MSIOF1_RSCK_MARK, SCIFA2_RTS2__MARK, VIO2_CLK2_MARK, LCD2D20_MARK,
+ MSIOF1_RSYNC_MARK, MFG1_IN2_MARK, VIO2_VD2_MARK, LCD2D21_MARK,
+ MSIOF1_MCK0_MARK, PORT236_I2C_SDA2_MARK,
+ MSIOF1_MCK1_MARK, PORT237_I2C_SCL2_MARK,
+ MSIOF1_SS1_MARK, VIO2_FIELD2_MARK, LCD2D22_MARK,
+ MSIOF1_SS2_MARK, VIO2_HD2_MARK, LCD2D23_MARK,
+ SCIFA6_TXD_MARK,
+ PORT241_IRDA_OUT_MARK, PORT241_IROUT_MARK, MFG4_OUT1_MARK, TPU4TO0_MARK,
+ PORT242_IRDA_IN_MARK, MFG4_IN2_MARK,
+ PORT243_IRDA_FIRSEL_MARK, PORT243_VIO_CKO2_MARK,
+ PORT244_SCIFA5_CTS__MARK, MFG2_IN1_MARK, PORT244_SCIFB_CTS__MARK, \
+ MSIOF2R_RXD_MARK,
+ PORT245_SCIFA5_RTS__MARK, MFG2_IN2_MARK, PORT245_SCIFB_RTS__MARK, \
+ MSIOF2R_TXD_MARK,
+ PORT246_SCIFA5_RXD_MARK, MFG1_OUT1_MARK, PORT246_SCIFB_RXD_MARK, \
+ TPU1TO0_MARK,
+ PORT247_SCIFA5_TXD_MARK, MFG3_OUT2_MARK, PORT247_SCIFB_TXD_MARK, \
+ TPU3TO1_MARK,
+ PORT248_SCIFA5_SCK_MARK, MFG2_OUT1_MARK, PORT248_SCIFB_SCK_MARK, \
+ TPU2TO0_MARK, PORT248_I2C_SCL3_MARK, MSIOF2R_TSCK_MARK,
+ PORT249_IROUT_MARK, MFG4_IN1_MARK, PORT249_I2C_SDA3_MARK, \
+ MSIOF2R_TSYNC_MARK,
+ SDHICLK0_MARK,
+ SDHICD0_MARK,
+ SDHID0_0_MARK,
+ SDHID0_1_MARK,
+ SDHID0_2_MARK,
+ SDHID0_3_MARK,
+ SDHICMD0_MARK,
+ SDHIWP0_MARK,
+ SDHICLK1_MARK,
+ SDHID1_0_MARK, TS_SPSYNC2_MARK,
+ SDHID1_1_MARK, TS_SDAT2_MARK,
+ SDHID1_2_MARK, TS_SDEN2_MARK,
+ SDHID1_3_MARK, TS_SCK2_MARK,
+ SDHICMD1_MARK,
+ SDHICLK2_MARK,
+ SDHID2_0_MARK, TS_SPSYNC4_MARK,
+ SDHID2_1_MARK, TS_SDAT4_MARK,
+ SDHID2_2_MARK, TS_SDEN4_MARK,
+ SDHID2_3_MARK, TS_SCK4_MARK,
+ SDHICMD2_MARK,
+ MMCCLK0_MARK,
+ MMCD0_0_MARK,
+ MMCD0_1_MARK,
+ MMCD0_2_MARK,
+ MMCD0_3_MARK,
+ MMCD0_4_MARK, TS_SPSYNC5_MARK,
+ MMCD0_5_MARK, TS_SDAT5_MARK,
+ MMCD0_6_MARK, TS_SDEN5_MARK,
+ MMCD0_7_MARK, TS_SCK5_MARK,
+ MMCCMD0_MARK,
+ RESETOUTS__MARK, EXTAL2OUT_MARK,
+ MCP_WAIT__MCP_FRB_MARK,
+ MCP_CKO_MARK, MMCCLK1_MARK,
+ MCP_D15_MCP_NAF15_MARK,
+ MCP_D14_MCP_NAF14_MARK,
+ MCP_D13_MCP_NAF13_MARK,
+ MCP_D12_MCP_NAF12_MARK,
+ MCP_D11_MCP_NAF11_MARK,
+ MCP_D10_MCP_NAF10_MARK,
+ MCP_D9_MCP_NAF9_MARK,
+ MCP_D8_MCP_NAF8_MARK, MMCCMD1_MARK,
+ MCP_D7_MCP_NAF7_MARK, MMCD1_7_MARK,
+
+ MCP_D6_MCP_NAF6_MARK, MMCD1_6_MARK,
+ MCP_D5_MCP_NAF5_MARK, MMCD1_5_MARK,
+ MCP_D4_MCP_NAF4_MARK, MMCD1_4_MARK,
+ MCP_D3_MCP_NAF3_MARK, MMCD1_3_MARK,
+ MCP_D2_MCP_NAF2_MARK, MMCD1_2_MARK,
+ MCP_D1_MCP_NAF1_MARK, MMCD1_1_MARK,
+ MCP_D0_MCP_NAF0_MARK, MMCD1_0_MARK,
+ MCP_NBRSTOUT__MARK,
+ MCP_WE0__MCP_FWE_MARK, MCP_RDWR_MCP_FWE_MARK,
+
+ /* MSEL2 special cases */
+ TSIF2_TS_XX1_MARK,
+ TSIF2_TS_XX2_MARK,
+ TSIF2_TS_XX3_MARK,
+ TSIF2_TS_XX4_MARK,
+ TSIF2_TS_XX5_MARK,
+ TSIF1_TS_XX1_MARK,
+ TSIF1_TS_XX2_MARK,
+ TSIF1_TS_XX3_MARK,
+ TSIF1_TS_XX4_MARK,
+ TSIF1_TS_XX5_MARK,
+ TSIF0_TS_XX1_MARK,
+ TSIF0_TS_XX2_MARK,
+ TSIF0_TS_XX3_MARK,
+ TSIF0_TS_XX4_MARK,
+ TSIF0_TS_XX5_MARK,
+ MST1_TS_XX1_MARK,
+ MST1_TS_XX2_MARK,
+ MST1_TS_XX3_MARK,
+ MST1_TS_XX4_MARK,
+ MST1_TS_XX5_MARK,
+ MST0_TS_XX1_MARK,
+ MST0_TS_XX2_MARK,
+ MST0_TS_XX3_MARK,
+ MST0_TS_XX4_MARK,
+ MST0_TS_XX5_MARK,
+
+ /* MSEL3 special cases */
+ SDHI0_VCCQ_MC0_ON_MARK,
+ SDHI0_VCCQ_MC0_OFF_MARK,
+ DEBUG_MON_VIO_MARK,
+ DEBUG_MON_LCDD_MARK,
+ LCDC_LCDC0_MARK,
+ LCDC_LCDC1_MARK,
+
+ /* MSEL4 special cases */
+ IRQ9_MEM_INT_MARK,
+ IRQ9_MCP_INT_MARK,
+ A11_MARK,
+ KEYOUT8_MARK,
+ TPU4TO3_MARK,
+ RESETA_N_PU_ON_MARK,
+ RESETA_N_PU_OFF_MARK,
+ EDBGREQ_PD_MARK,
+ EDBGREQ_PU_MARK,
+
+ PINMUX_MARK_END,
+};
+
+#define PORT_DATA_I(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
+
+#define PORT_DATA_I_PD(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_IN, PORT##nr##_IN_PD)
+
+#define PORT_DATA_I_PU(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_IN, PORT##nr##_IN_PU)
+
+#define PORT_DATA_I_PU_PD(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_IN, PORT##nr##_IN_PD, \
+ PORT##nr##_IN_PU)
+
+#define PORT_DATA_O(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_OUT)
+
+#define PORT_DATA_IO(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_OUT, PORT##nr##_IN)
+
+#define PORT_DATA_IO_PD(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_OUT, PORT##nr##_IN, \
+ PORT##nr##_IN_PD)
+
+#define PORT_DATA_IO_PU(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_OUT, PORT##nr##_IN, \
+ PORT##nr##_IN_PU)
+
+#define PORT_DATA_IO_PU_PD(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_OUT, PORT##nr##_IN, \
+ PORT##nr##_IN_PD, PORT##nr##_IN_PU)
+
+static pinmux_enum_t pinmux_data[] = {
+ /* specify valid pin states for each pin in GPIO mode */
+
+ /* Table 25-1 (I/O and Pull U/D) */
+ PORT_DATA_I_PD(0),
+ PORT_DATA_I_PU(1),
+ PORT_DATA_I_PU(2),
+ PORT_DATA_I_PU(3),
+ PORT_DATA_I_PU(4),
+ PORT_DATA_I_PU(5),
+ PORT_DATA_I_PU(6),
+ PORT_DATA_I_PU(7),
+ PORT_DATA_I_PU(8),
+ PORT_DATA_I_PD(9),
+ PORT_DATA_I_PD(10),
+ PORT_DATA_I_PU_PD(11),
+ PORT_DATA_IO_PU_PD(12),
+ PORT_DATA_IO_PU_PD(13),
+ PORT_DATA_IO_PU_PD(14),
+ PORT_DATA_IO_PU_PD(15),
+ PORT_DATA_IO_PD(16),
+ PORT_DATA_IO_PD(17),
+ PORT_DATA_IO_PU(18),
+ PORT_DATA_IO_PU(19),
+ PORT_DATA_O(20),
+ PORT_DATA_O(21),
+ PORT_DATA_O(22),
+ PORT_DATA_O(23),
+ PORT_DATA_O(24),
+ PORT_DATA_I_PD(25),
+ PORT_DATA_I_PD(26),
+ PORT_DATA_IO_PU(27),
+ PORT_DATA_IO_PU(28),
+ PORT_DATA_IO_PD(29),
+ PORT_DATA_IO_PD(30),
+ PORT_DATA_IO_PU(31),
+ PORT_DATA_IO_PD(32),
+ PORT_DATA_I_PU_PD(33),
+ PORT_DATA_IO_PD(34),
+ PORT_DATA_I_PU_PD(35),
+ PORT_DATA_IO_PD(36),
+ PORT_DATA_IO(37),
+ PORT_DATA_O(38),
+ PORT_DATA_I_PU(39),
+ PORT_DATA_I_PU_PD(40),
+ PORT_DATA_O(41),
+ PORT_DATA_IO_PD(42),
+ PORT_DATA_IO_PU_PD(43),
+ PORT_DATA_IO_PU_PD(44),
+ PORT_DATA_IO_PD(45),
+ PORT_DATA_IO_PD(46),
+ PORT_DATA_IO_PD(47),
+ PORT_DATA_I_PD(48),
+ PORT_DATA_IO_PU_PD(49),
+ PORT_DATA_IO_PD(50),
+
+ PORT_DATA_IO_PD(51),
+ PORT_DATA_O(52),
+ PORT_DATA_IO_PU_PD(53),
+ PORT_DATA_IO_PU_PD(54),
+ PORT_DATA_IO_PD(55),
+ PORT_DATA_I_PU_PD(56),
+ PORT_DATA_IO(57),
+ PORT_DATA_IO(58),
+ PORT_DATA_IO(59),
+ PORT_DATA_IO(60),
+ PORT_DATA_IO(61),
+ PORT_DATA_IO_PD(62),
+ PORT_DATA_IO_PD(63),
+ PORT_DATA_IO_PU_PD(64),
+ PORT_DATA_IO_PD(65),
+ PORT_DATA_IO_PU_PD(66),
+ PORT_DATA_IO_PU_PD(67),
+ PORT_DATA_IO_PU_PD(68),
+ PORT_DATA_IO_PU_PD(69),
+ PORT_DATA_IO_PU_PD(70),
+ PORT_DATA_IO_PU_PD(71),
+ PORT_DATA_IO_PU_PD(72),
+ PORT_DATA_I_PU_PD(73),
+ PORT_DATA_IO_PU(74),
+ PORT_DATA_IO_PU(75),
+ PORT_DATA_IO_PU(76),
+ PORT_DATA_IO_PU(77),
+ PORT_DATA_IO_PU(78),
+ PORT_DATA_IO_PU(79),
+ PORT_DATA_IO_PU(80),
+ PORT_DATA_IO_PU(81),
+ PORT_DATA_IO_PU(82),
+ PORT_DATA_IO_PU(83),
+ PORT_DATA_IO_PU(84),
+ PORT_DATA_IO_PU(85),
+ PORT_DATA_IO_PU(86),
+ PORT_DATA_IO_PU(87),
+ PORT_DATA_IO_PU(88),
+ PORT_DATA_IO_PU(89),
+ PORT_DATA_O(90),
+ PORT_DATA_IO_PU(91),
+ PORT_DATA_O(92),
+ PORT_DATA_IO_PU(93),
+ PORT_DATA_O(94),
+ PORT_DATA_I_PU_PD(95),
+ PORT_DATA_IO(96),
+ PORT_DATA_IO(97),
+ PORT_DATA_IO(98),
+ PORT_DATA_I_PU(99),
+ PORT_DATA_O(100),
+ PORT_DATA_O(101),
+ PORT_DATA_I_PU(102),
+ PORT_DATA_IO_PD(103),
+ PORT_DATA_I_PU_PD(104),
+ PORT_DATA_I_PD(105),
+ PORT_DATA_I_PD(106),
+ PORT_DATA_I_PU_PD(107),
+ PORT_DATA_I_PU_PD(108),
+ PORT_DATA_IO_PD(109),
+ PORT_DATA_IO_PD(110),
+ PORT_DATA_IO_PU_PD(111),
+ PORT_DATA_IO_PU_PD(112),
+ PORT_DATA_IO_PU_PD(113),
+ PORT_DATA_IO_PD(114),
+ PORT_DATA_IO_PU(115),
+ PORT_DATA_IO_PU(116),
+ PORT_DATA_IO_PU_PD(117),
+ PORT_DATA_IO_PU_PD(118),
+ PORT_DATA_IO_PD(128),
+
+ PORT_DATA_IO_PD(129),
+ PORT_DATA_IO_PU_PD(130),
+ PORT_DATA_IO_PD(131),
+ PORT_DATA_IO_PD(132),
+ PORT_DATA_IO_PD(133),
+ PORT_DATA_IO_PU_PD(134),
+ PORT_DATA_IO_PU_PD(135),
+ PORT_DATA_IO_PU_PD(136),
+ PORT_DATA_IO_PU_PD(137),
+ PORT_DATA_IO_PD(138),
+ PORT_DATA_IO_PD(139),
+ PORT_DATA_IO_PD(140),
+ PORT_DATA_IO_PD(141),
+ PORT_DATA_IO_PD(142),
+ PORT_DATA_IO_PD(143),
+ PORT_DATA_IO_PU_PD(144),
+ PORT_DATA_IO_PD(145),
+ PORT_DATA_IO_PU_PD(146),
+ PORT_DATA_IO_PU_PD(147),
+ PORT_DATA_IO_PU_PD(148),
+ PORT_DATA_IO_PU_PD(149),
+ PORT_DATA_I_PU_PD(150),
+ PORT_DATA_IO_PU_PD(151),
+ PORT_DATA_IO_PU_PD(152),
+ PORT_DATA_IO_PD(153),
+ PORT_DATA_IO_PD(154),
+ PORT_DATA_I_PU_PD(155),
+ PORT_DATA_IO_PU_PD(156),
+ PORT_DATA_I_PD(157),
+ PORT_DATA_IO_PD(158),
+ PORT_DATA_IO_PU_PD(159),
+ PORT_DATA_IO_PU_PD(160),
+ PORT_DATA_I_PU_PD(161),
+ PORT_DATA_I_PU_PD(162),
+ PORT_DATA_IO_PU_PD(163),
+ PORT_DATA_I_PU_PD(164),
+ PORT_DATA_IO_PD(192),
+ PORT_DATA_IO_PU_PD(193),
+ PORT_DATA_IO_PD(194),
+ PORT_DATA_IO_PU_PD(195),
+ PORT_DATA_IO_PD(196),
+ PORT_DATA_IO_PD(197),
+ PORT_DATA_IO_PD(198),
+ PORT_DATA_IO_PD(199),
+ PORT_DATA_IO_PU_PD(200),
+ PORT_DATA_IO_PU_PD(201),
+ PORT_DATA_IO_PU_PD(202),
+ PORT_DATA_IO_PU_PD(203),
+ PORT_DATA_IO_PU_PD(204),
+ PORT_DATA_IO_PU_PD(205),
+ PORT_DATA_IO_PU_PD(206),
+ PORT_DATA_IO_PD(207),
+ PORT_DATA_IO_PD(208),
+ PORT_DATA_IO_PD(209),
+ PORT_DATA_IO_PD(210),
+ PORT_DATA_IO_PD(211),
+ PORT_DATA_IO_PD(212),
+ PORT_DATA_IO_PD(213),
+ PORT_DATA_IO_PU_PD(214),
+ PORT_DATA_IO_PU_PD(215),
+ PORT_DATA_IO_PD(216),
+ PORT_DATA_IO_PD(217),
+ PORT_DATA_O(218),
+ PORT_DATA_IO_PD(219),
+ PORT_DATA_IO_PD(220),
+ PORT_DATA_IO_PU_PD(221),
+ PORT_DATA_IO_PU_PD(222),
+ PORT_DATA_I_PU_PD(223),
+ PORT_DATA_I_PU_PD(224),
+
+ PORT_DATA_IO_PU_PD(225),
+ PORT_DATA_O(226),
+ PORT_DATA_IO_PU_PD(227),
+ PORT_DATA_I_PU_PD(228),
+ PORT_DATA_I_PD(229),
+ PORT_DATA_IO(230),
+ PORT_DATA_IO_PU_PD(231),
+ PORT_DATA_IO_PU_PD(232),
+ PORT_DATA_I_PU_PD(233),
+ PORT_DATA_IO_PU_PD(234),
+ PORT_DATA_IO_PU_PD(235),
+ PORT_DATA_IO_PU_PD(236),
+ PORT_DATA_IO_PD(237),
+ PORT_DATA_IO_PU_PD(238),
+ PORT_DATA_IO_PU_PD(239),
+ PORT_DATA_IO_PU_PD(240),
+ PORT_DATA_O(241),
+ PORT_DATA_I_PD(242),
+ PORT_DATA_IO_PU_PD(243),
+ PORT_DATA_IO_PU_PD(244),
+ PORT_DATA_IO_PU_PD(245),
+ PORT_DATA_IO_PU_PD(246),
+ PORT_DATA_IO_PU_PD(247),
+ PORT_DATA_IO_PU_PD(248),
+ PORT_DATA_IO_PU_PD(249),
+ PORT_DATA_IO_PU_PD(250),
+ PORT_DATA_IO_PU_PD(251),
+ PORT_DATA_IO_PU_PD(252),
+ PORT_DATA_IO_PU_PD(253),
+ PORT_DATA_IO_PU_PD(254),
+ PORT_DATA_IO_PU_PD(255),
+ PORT_DATA_IO_PU_PD(256),
+ PORT_DATA_IO_PU_PD(257),
+ PORT_DATA_IO_PU_PD(258),
+ PORT_DATA_IO_PU_PD(259),
+ PORT_DATA_IO_PU_PD(260),
+ PORT_DATA_IO_PU_PD(261),
+ PORT_DATA_IO_PU_PD(262),
+ PORT_DATA_IO_PU_PD(263),
+ PORT_DATA_IO_PU_PD(264),
+ PORT_DATA_IO_PU_PD(265),
+ PORT_DATA_IO_PU_PD(266),
+ PORT_DATA_IO_PU_PD(267),
+ PORT_DATA_IO_PU_PD(268),
+ PORT_DATA_IO_PU_PD(269),
+ PORT_DATA_IO_PU_PD(270),
+ PORT_DATA_IO_PU_PD(271),
+ PORT_DATA_IO_PU_PD(272),
+ PORT_DATA_IO_PU_PD(273),
+ PORT_DATA_IO_PU_PD(274),
+ PORT_DATA_IO_PU_PD(275),
+ PORT_DATA_IO_PU_PD(276),
+ PORT_DATA_IO_PU_PD(277),
+ PORT_DATA_IO_PU_PD(278),
+ PORT_DATA_IO_PU_PD(279),
+ PORT_DATA_IO_PU_PD(280),
+ PORT_DATA_O(281),
+ PORT_DATA_O(282),
+ PORT_DATA_I_PU(288),
+ PORT_DATA_IO_PU_PD(289),
+ PORT_DATA_IO_PU_PD(290),
+ PORT_DATA_IO_PU_PD(291),
+ PORT_DATA_IO_PU_PD(292),
+ PORT_DATA_IO_PU_PD(293),
+ PORT_DATA_IO_PU_PD(294),
+ PORT_DATA_IO_PU_PD(295),
+ PORT_DATA_IO_PU_PD(296),
+ PORT_DATA_IO_PU_PD(297),
+ PORT_DATA_IO_PU_PD(298),
+
+ PORT_DATA_IO_PU_PD(299),
+ PORT_DATA_IO_PU_PD(300),
+ PORT_DATA_IO_PU_PD(301),
+ PORT_DATA_IO_PU_PD(302),
+ PORT_DATA_IO_PU_PD(303),
+ PORT_DATA_IO_PU_PD(304),
+ PORT_DATA_IO_PU_PD(305),
+ PORT_DATA_O(306),
+ PORT_DATA_O(307),
+ PORT_DATA_I_PU(308),
+ PORT_DATA_O(309),
+
+ /* Table 25-1 (Function 0-7) */
+ PINMUX_DATA(VBUS_0_MARK, PORT0_FN1),
+ PINMUX_DATA(GPI0_MARK, PORT1_FN1),
+ PINMUX_DATA(GPI1_MARK, PORT2_FN1),
+ PINMUX_DATA(GPI2_MARK, PORT3_FN1),
+ PINMUX_DATA(GPI3_MARK, PORT4_FN1),
+ PINMUX_DATA(GPI4_MARK, PORT5_FN1),
+ PINMUX_DATA(GPI5_MARK, PORT6_FN1),
+ PINMUX_DATA(GPI6_MARK, PORT7_FN1),
+ PINMUX_DATA(GPI7_MARK, PORT8_FN1),
+ PINMUX_DATA(SCIFA7_RXD_MARK, PORT12_FN2),
+ PINMUX_DATA(SCIFA7_CTS__MARK, PORT13_FN2),
+ PINMUX_DATA(GPO7_MARK, PORT14_FN1), \
+ PINMUX_DATA(MFG0_OUT2_MARK, PORT14_FN4),
+ PINMUX_DATA(GPO6_MARK, PORT15_FN1), \
+ PINMUX_DATA(MFG1_OUT2_MARK, PORT15_FN4),
+ PINMUX_DATA(GPO5_MARK, PORT16_FN1), \
+ PINMUX_DATA(SCIFA0_SCK_MARK, PORT16_FN2), \
+ PINMUX_DATA(FSICOSLDT3_MARK, PORT16_FN3), \
+ PINMUX_DATA(PORT16_VIO_CKOR_MARK, PORT16_FN4),
+ PINMUX_DATA(SCIFA0_TXD_MARK, PORT17_FN2),
+ PINMUX_DATA(SCIFA7_TXD_MARK, PORT18_FN2),
+ PINMUX_DATA(SCIFA7_RTS__MARK, PORT19_FN2), \
+ PINMUX_DATA(PORT19_VIO_CKO2_MARK, PORT19_FN3),
+ PINMUX_DATA(GPO0_MARK, PORT20_FN1),
+ PINMUX_DATA(GPO1_MARK, PORT21_FN1),
+ PINMUX_DATA(GPO2_MARK, PORT22_FN1), \
+ PINMUX_DATA(STATUS0_MARK, PORT22_FN2),
+ PINMUX_DATA(GPO3_MARK, PORT23_FN1), \
+ PINMUX_DATA(STATUS1_MARK, PORT23_FN2),
+ PINMUX_DATA(GPO4_MARK, PORT24_FN1), \
+ PINMUX_DATA(STATUS2_MARK, PORT24_FN2),
+ PINMUX_DATA(VINT_MARK, PORT25_FN1),
+ PINMUX_DATA(TCKON_MARK, PORT26_FN1),
+ PINMUX_DATA(XDVFS1_MARK, PORT27_FN1), \
+ PINMUX_DATA(PORT27_I2C_SCL2_MARK, PORT27_FN2, MSEL2CR_MSEL17_0,
+ MSEL2CR_MSEL16_1), \
+ PINMUX_DATA(PORT27_I2C_SCL3_MARK, PORT27_FN3, MSEL2CR_MSEL19_0,
+ MSEL2CR_MSEL18_0), \
+ PINMUX_DATA(MFG0_OUT1_MARK, PORT27_FN4), \
+ PINMUX_DATA(PORT27_IROUT_MARK, PORT27_FN7),
+ PINMUX_DATA(XDVFS2_MARK, PORT28_FN1), \
+ PINMUX_DATA(PORT28_I2C_SDA2_MARK, PORT28_FN2, MSEL2CR_MSEL17_0,
+ MSEL2CR_MSEL16_1), \
+ PINMUX_DATA(PORT28_I2C_SDA3_MARK, PORT28_FN3, MSEL2CR_MSEL19_0,
+ MSEL2CR_MSEL18_0), \
+ PINMUX_DATA(PORT28_TPU1TO1_MARK, PORT28_FN7),
+ PINMUX_DATA(SIM_RST_MARK, PORT29_FN1), \
+ PINMUX_DATA(PORT29_TPU1TO1_MARK, PORT29_FN4),
+ PINMUX_DATA(SIM_CLK_MARK, PORT30_FN1), \
+ PINMUX_DATA(PORT30_VIO_CKOR_MARK, PORT30_FN4),
+ PINMUX_DATA(SIM_D_MARK, PORT31_FN1), \
+ PINMUX_DATA(PORT31_IROUT_MARK, PORT31_FN4),
+ PINMUX_DATA(SCIFA4_TXD_MARK, PORT32_FN2),
+ PINMUX_DATA(SCIFA4_RXD_MARK, PORT33_FN2), \
+ PINMUX_DATA(XWUP_MARK, PORT33_FN3),
+ PINMUX_DATA(SCIFA4_RTS__MARK, PORT34_FN2),
+ PINMUX_DATA(SCIFA4_CTS__MARK, PORT35_FN2),
+ PINMUX_DATA(FSIBOBT_MARK, PORT36_FN1), \
+ PINMUX_DATA(FSIBIBT_MARK, PORT36_FN2),
+ PINMUX_DATA(FSIBOLR_MARK, PORT37_FN1), \
+ PINMUX_DATA(FSIBILR_MARK, PORT37_FN2),
+ PINMUX_DATA(FSIBOSLD_MARK, PORT38_FN1),
+ PINMUX_DATA(FSIBISLD_MARK, PORT39_FN1),
+ PINMUX_DATA(VACK_MARK, PORT40_FN1),
+ PINMUX_DATA(XTAL1L_MARK, PORT41_FN1),
+ PINMUX_DATA(SCIFA0_RTS__MARK, PORT42_FN2), \
+ PINMUX_DATA(FSICOSLDT2_MARK, PORT42_FN3),
+ PINMUX_DATA(SCIFA0_RXD_MARK, PORT43_FN2),
+ PINMUX_DATA(SCIFA0_CTS__MARK, PORT44_FN2), \
+ PINMUX_DATA(FSICOSLDT1_MARK, PORT44_FN3),
+ PINMUX_DATA(FSICOBT_MARK, PORT45_FN1), \
+ PINMUX_DATA(FSICIBT_MARK, PORT45_FN2), \
+ PINMUX_DATA(FSIDOBT_MARK, PORT45_FN3), \
+ PINMUX_DATA(FSIDIBT_MARK, PORT45_FN4),
+ PINMUX_DATA(FSICOLR_MARK, PORT46_FN1), \
+ PINMUX_DATA(FSICILR_MARK, PORT46_FN2), \
+ PINMUX_DATA(FSIDOLR_MARK, PORT46_FN3), \
+ PINMUX_DATA(FSIDILR_MARK, PORT46_FN4),
+ PINMUX_DATA(FSICOSLD_MARK, PORT47_FN1), \
+ PINMUX_DATA(PORT47_FSICSPDIF_MARK, PORT47_FN2),
+ PINMUX_DATA(FSICISLD_MARK, PORT48_FN1), \
+ PINMUX_DATA(FSIDISLD_MARK, PORT48_FN3),
+ PINMUX_DATA(FSIACK_MARK, PORT49_FN1), \
+ PINMUX_DATA(PORT49_IRDA_OUT_MARK, PORT49_FN2, MSEL4CR_MSEL19_1), \
+ PINMUX_DATA(PORT49_IROUT_MARK, PORT49_FN4), \
+ PINMUX_DATA(FSIAOMC_MARK, PORT49_FN5),
+ PINMUX_DATA(FSIAOLR_MARK, PORT50_FN1), \
+ PINMUX_DATA(BBIF2_TSYNC2_MARK, PORT50_FN2), \
+ PINMUX_DATA(TPU2TO2_MARK, PORT50_FN3), \
+ PINMUX_DATA(FSIAILR_MARK, PORT50_FN5),
+
+ PINMUX_DATA(FSIAOBT_MARK, PORT51_FN1), \
+ PINMUX_DATA(BBIF2_TSCK2_MARK, PORT51_FN2), \
+ PINMUX_DATA(TPU2TO3_MARK, PORT51_FN3), \
+ PINMUX_DATA(FSIAIBT_MARK, PORT51_FN5),
+ PINMUX_DATA(FSIAOSLD_MARK, PORT52_FN1), \
+ PINMUX_DATA(BBIF2_TXD2_MARK, PORT52_FN2),
+ PINMUX_DATA(FSIASPDIF_MARK, PORT53_FN1), \
+ PINMUX_DATA(PORT53_IRDA_IN_MARK, PORT53_FN2, MSEL4CR_MSEL19_1), \
+ PINMUX_DATA(TPU3TO3_MARK, PORT53_FN3), \
+ PINMUX_DATA(FSIBSPDIF_MARK, PORT53_FN5), \
+ PINMUX_DATA(PORT53_FSICSPDIF_MARK, PORT53_FN6),
+ PINMUX_DATA(FSIBCK_MARK, PORT54_FN1), \
+ PINMUX_DATA(PORT54_IRDA_FIRSEL_MARK, PORT54_FN2, MSEL4CR_MSEL19_1), \
+ PINMUX_DATA(TPU3TO2_MARK, PORT54_FN3), \
+ PINMUX_DATA(FSIBOMC_MARK, PORT54_FN5), \
+ PINMUX_DATA(FSICCK_MARK, PORT54_FN6), \
+ PINMUX_DATA(FSICOMC_MARK, PORT54_FN7),
+ PINMUX_DATA(FSIAISLD_MARK, PORT55_FN1), \
+ PINMUX_DATA(TPU0TO0_MARK, PORT55_FN3),
+ PINMUX_DATA(A0_MARK, PORT57_FN1), \
+ PINMUX_DATA(BS__MARK, PORT57_FN2),
+ PINMUX_DATA(A12_MARK, PORT58_FN1), \
+ PINMUX_DATA(PORT58_KEYOUT7_MARK, PORT58_FN2), \
+ PINMUX_DATA(TPU4TO2_MARK, PORT58_FN4),
+ PINMUX_DATA(A13_MARK, PORT59_FN1), \
+ PINMUX_DATA(PORT59_KEYOUT6_MARK, PORT59_FN2), \
+ PINMUX_DATA(TPU0TO1_MARK, PORT59_FN4),
+ PINMUX_DATA(A14_MARK, PORT60_FN1), \
+ PINMUX_DATA(KEYOUT5_MARK, PORT60_FN2),
+ PINMUX_DATA(A15_MARK, PORT61_FN1), \
+ PINMUX_DATA(KEYOUT4_MARK, PORT61_FN2),
+ PINMUX_DATA(A16_MARK, PORT62_FN1), \
+ PINMUX_DATA(KEYOUT3_MARK, PORT62_FN2), \
+ PINMUX_DATA(MSIOF0_SS1_MARK, PORT62_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A17_MARK, PORT63_FN1), \
+ PINMUX_DATA(KEYOUT2_MARK, PORT63_FN2), \
+ PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT63_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A18_MARK, PORT64_FN1), \
+ PINMUX_DATA(KEYOUT1_MARK, PORT64_FN2), \
+ PINMUX_DATA(MSIOF0_TSCK_MARK, PORT64_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A19_MARK, PORT65_FN1), \
+ PINMUX_DATA(KEYOUT0_MARK, PORT65_FN2), \
+ PINMUX_DATA(MSIOF0_TXD_MARK, PORT65_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A20_MARK, PORT66_FN1), \
+ PINMUX_DATA(KEYIN0_MARK, PORT66_FN2, PORT66_IN_PU), \
+ PINMUX_DATA(MSIOF0_RSCK_MARK, PORT66_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A21_MARK, PORT67_FN1), \
+ PINMUX_DATA(KEYIN1_MARK, PORT67_FN2, PORT67_IN_PU), \
+ PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT67_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A22_MARK, PORT68_FN1), \
+ PINMUX_DATA(KEYIN2_MARK, PORT68_FN2, PORT68_IN_PU), \
+ PINMUX_DATA(MSIOF0_MCK0_MARK, PORT68_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A23_MARK, PORT69_FN1), \
+ PINMUX_DATA(KEYIN3_MARK, PORT69_FN2, PORT69_IN_PU), \
+ PINMUX_DATA(MSIOF0_MCK1_MARK, PORT69_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A24_MARK, PORT70_FN1), \
+ PINMUX_DATA(KEYIN4_MARK, PORT70_FN2, PORT70_IN_PU), \
+ PINMUX_DATA(MSIOF0_RXD_MARK, PORT70_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A25_MARK, PORT71_FN1), \
+ PINMUX_DATA(KEYIN5_MARK, PORT71_FN2, PORT71_IN_PU), \
+ PINMUX_DATA(MSIOF0_SS2_MARK, PORT71_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A26_MARK, PORT72_FN1), \
+ PINMUX_DATA(KEYIN6_MARK, PORT72_FN2, PORT72_IN_PU),
+ PINMUX_DATA(KEYIN7_MARK, PORT73_FN2, PORT73_IN_PU),
+ PINMUX_DATA(D0_NAF0_MARK, PORT74_FN1),
+ PINMUX_DATA(D1_NAF1_MARK, PORT75_FN1),
+ PINMUX_DATA(D2_NAF2_MARK, PORT76_FN1),
+ PINMUX_DATA(D3_NAF3_MARK, PORT77_FN1),
+ PINMUX_DATA(D4_NAF4_MARK, PORT78_FN1),
+ PINMUX_DATA(D5_NAF5_MARK, PORT79_FN1),
+ PINMUX_DATA(D6_NAF6_MARK, PORT80_FN1),
+ PINMUX_DATA(D7_NAF7_MARK, PORT81_FN1),
+ PINMUX_DATA(D8_NAF8_MARK, PORT82_FN1),
+ PINMUX_DATA(D9_NAF9_MARK, PORT83_FN1),
+ PINMUX_DATA(D10_NAF10_MARK, PORT84_FN1),
+ PINMUX_DATA(D11_NAF11_MARK, PORT85_FN1),
+ PINMUX_DATA(D12_NAF12_MARK, PORT86_FN1),
+ PINMUX_DATA(D13_NAF13_MARK, PORT87_FN1),
+ PINMUX_DATA(D14_NAF14_MARK, PORT88_FN1),
+ PINMUX_DATA(D15_NAF15_MARK, PORT89_FN1),
+ PINMUX_DATA(CS4__MARK, PORT90_FN1),
+ PINMUX_DATA(CS5A__MARK, PORT91_FN1), \
+ PINMUX_DATA(PORT91_RDWR_MARK, PORT91_FN2),
+ PINMUX_DATA(CS5B__MARK, PORT92_FN1), \
+ PINMUX_DATA(FCE1__MARK, PORT92_FN2),
+ PINMUX_DATA(CS6B__MARK, PORT93_FN1), \
+ PINMUX_DATA(DACK0_MARK, PORT93_FN4),
+ PINMUX_DATA(FCE0__MARK, PORT94_FN1), \
+ PINMUX_DATA(CS6A__MARK, PORT94_FN2),
+ PINMUX_DATA(WAIT__MARK, PORT95_FN1), \
+ PINMUX_DATA(DREQ0_MARK, PORT95_FN2),
+ PINMUX_DATA(RD__FSC_MARK, PORT96_FN1),
+ PINMUX_DATA(WE0__FWE_MARK, PORT97_FN1), \
+ PINMUX_DATA(RDWR_FWE_MARK, PORT97_FN2),
+ PINMUX_DATA(WE1__MARK, PORT98_FN1),
+ PINMUX_DATA(FRB_MARK, PORT99_FN1),
+ PINMUX_DATA(CKO_MARK, PORT100_FN1),
+ PINMUX_DATA(NBRSTOUT__MARK, PORT101_FN1),
+ PINMUX_DATA(NBRST__MARK, PORT102_FN1),
+ PINMUX_DATA(BBIF2_TXD_MARK, PORT103_FN3),
+ PINMUX_DATA(BBIF2_RXD_MARK, PORT104_FN3),
+ PINMUX_DATA(BBIF2_SYNC_MARK, PORT105_FN3),
+ PINMUX_DATA(BBIF2_SCK_MARK, PORT106_FN3),
+ PINMUX_DATA(SCIFA3_CTS__MARK, PORT107_FN3), \
+ PINMUX_DATA(MFG3_IN2_MARK, PORT107_FN4),
+ PINMUX_DATA(SCIFA3_RXD_MARK, PORT108_FN3), \
+ PINMUX_DATA(MFG3_IN1_MARK, PORT108_FN4),
+ PINMUX_DATA(BBIF1_SS2_MARK, PORT109_FN2), \
+ PINMUX_DATA(SCIFA3_RTS__MARK, PORT109_FN3), \
+ PINMUX_DATA(MFG3_OUT1_MARK, PORT109_FN4),
+ PINMUX_DATA(SCIFA3_TXD_MARK, PORT110_FN3),
+ PINMUX_DATA(HSI_RX_DATA_MARK, PORT111_FN1), \
+ PINMUX_DATA(BBIF1_RXD_MARK, PORT111_FN3),
+ PINMUX_DATA(HSI_TX_WAKE_MARK, PORT112_FN1), \
+ PINMUX_DATA(BBIF1_TSCK_MARK, PORT112_FN3),
+ PINMUX_DATA(HSI_TX_DATA_MARK, PORT113_FN1), \
+ PINMUX_DATA(BBIF1_TSYNC_MARK, PORT113_FN3),
+ PINMUX_DATA(HSI_TX_READY_MARK, PORT114_FN1), \
+ PINMUX_DATA(BBIF1_TXD_MARK, PORT114_FN3),
+ PINMUX_DATA(HSI_RX_READY_MARK, PORT115_FN1), \
+ PINMUX_DATA(BBIF1_RSCK_MARK, PORT115_FN3), \
+ PINMUX_DATA(PORT115_I2C_SCL2_MARK, PORT115_FN5, MSEL2CR_MSEL17_1), \
+ PINMUX_DATA(PORT115_I2C_SCL3_MARK, PORT115_FN6, MSEL2CR_MSEL19_1),
+ PINMUX_DATA(HSI_RX_WAKE_MARK, PORT116_FN1), \
+ PINMUX_DATA(BBIF1_RSYNC_MARK, PORT116_FN3), \
+ PINMUX_DATA(PORT116_I2C_SDA2_MARK, PORT116_FN5, MSEL2CR_MSEL17_1), \
+ PINMUX_DATA(PORT116_I2C_SDA3_MARK, PORT116_FN6, MSEL2CR_MSEL19_1),
+ PINMUX_DATA(HSI_RX_FLAG_MARK, PORT117_FN1), \
+ PINMUX_DATA(BBIF1_SS1_MARK, PORT117_FN2), \
+ PINMUX_DATA(BBIF1_FLOW_MARK, PORT117_FN3),
+ PINMUX_DATA(HSI_TX_FLAG_MARK, PORT118_FN1),
+ PINMUX_DATA(VIO_VD_MARK, PORT128_FN1), \
+ PINMUX_DATA(PORT128_LCD2VSYN_MARK, PORT128_FN4, MSEL3CR_MSEL2_0), \
+ PINMUX_DATA(VIO2_VD_MARK, PORT128_FN6, MSEL4CR_MSEL27_0), \
+ PINMUX_DATA(LCD2D0_MARK, PORT128_FN7),
+
+ PINMUX_DATA(VIO_HD_MARK, PORT129_FN1), \
+ PINMUX_DATA(PORT129_LCD2HSYN_MARK, PORT129_FN4), \
+ PINMUX_DATA(PORT129_LCD2CS__MARK, PORT129_FN5), \
+ PINMUX_DATA(VIO2_HD_MARK, PORT129_FN6, MSEL4CR_MSEL27_0), \
+ PINMUX_DATA(LCD2D1_MARK, PORT129_FN7),
+ PINMUX_DATA(VIO_D0_MARK, PORT130_FN1), \
+ PINMUX_DATA(PORT130_MSIOF2_RXD_MARK, PORT130_FN3, MSEL4CR_MSEL11_0,
+ MSEL4CR_MSEL10_1), \
+ PINMUX_DATA(LCD2D10_MARK, PORT130_FN7),
+ PINMUX_DATA(VIO_D1_MARK, PORT131_FN1), \
+ PINMUX_DATA(PORT131_KEYOUT6_MARK, PORT131_FN2), \
+ PINMUX_DATA(PORT131_MSIOF2_SS1_MARK, PORT131_FN3), \
+ PINMUX_DATA(PORT131_KEYOUT11_MARK, PORT131_FN4), \
+ PINMUX_DATA(LCD2D11_MARK, PORT131_FN7),
+ PINMUX_DATA(VIO_D2_MARK, PORT132_FN1), \
+ PINMUX_DATA(PORT132_KEYOUT7_MARK, PORT132_FN2), \
+ PINMUX_DATA(PORT132_MSIOF2_SS2_MARK, PORT132_FN3), \
+ PINMUX_DATA(PORT132_KEYOUT10_MARK, PORT132_FN4), \
+ PINMUX_DATA(LCD2D12_MARK, PORT132_FN7),
+ PINMUX_DATA(VIO_D3_MARK, PORT133_FN1), \
+ PINMUX_DATA(MSIOF2_TSYNC_MARK, PORT133_FN3, MSEL4CR_MSEL11_0), \
+ PINMUX_DATA(LCD2D13_MARK, PORT133_FN7),
+ PINMUX_DATA(VIO_D4_MARK, PORT134_FN1), \
+ PINMUX_DATA(MSIOF2_TXD_MARK, PORT134_FN3, MSEL4CR_MSEL11_0), \
+ PINMUX_DATA(LCD2D14_MARK, PORT134_FN7),
+ PINMUX_DATA(VIO_D5_MARK, PORT135_FN1), \
+ PINMUX_DATA(MSIOF2_TSCK_MARK, PORT135_FN3, MSEL4CR_MSEL11_0), \
+ PINMUX_DATA(LCD2D15_MARK, PORT135_FN7),
+ PINMUX_DATA(VIO_D6_MARK, PORT136_FN1), \
+ PINMUX_DATA(PORT136_KEYOUT8_MARK, PORT136_FN2), \
+ PINMUX_DATA(LCD2D16_MARK, PORT136_FN7),
+ PINMUX_DATA(VIO_D7_MARK, PORT137_FN1), \
+ PINMUX_DATA(PORT137_KEYOUT9_MARK, PORT137_FN2), \
+ PINMUX_DATA(LCD2D17_MARK, PORT137_FN7),
+ PINMUX_DATA(VIO_D8_MARK, PORT138_FN1), \
+ PINMUX_DATA(PORT138_KEYOUT8_MARK, PORT138_FN2), \
+ PINMUX_DATA(VIO2_D0_MARK, PORT138_FN6), \
+ PINMUX_DATA(LCD2D6_MARK, PORT138_FN7),
+ PINMUX_DATA(VIO_D9_MARK, PORT139_FN1), \
+ PINMUX_DATA(PORT139_KEYOUT9_MARK, PORT139_FN2), \
+ PINMUX_DATA(VIO2_D1_MARK, PORT139_FN6), \
+ PINMUX_DATA(LCD2D7_MARK, PORT139_FN7),
+ PINMUX_DATA(VIO_D10_MARK, PORT140_FN1), \
+ PINMUX_DATA(TPU0TO2_MARK, PORT140_FN4), \
+ PINMUX_DATA(VIO2_D2_MARK, PORT140_FN6), \
+ PINMUX_DATA(LCD2D8_MARK, PORT140_FN7),
+ PINMUX_DATA(VIO_D11_MARK, PORT141_FN1), \
+ PINMUX_DATA(TPU0TO3_MARK, PORT141_FN4), \
+ PINMUX_DATA(VIO2_D3_MARK, PORT141_FN6), \
+ PINMUX_DATA(LCD2D9_MARK, PORT141_FN7),
+ PINMUX_DATA(VIO_D12_MARK, PORT142_FN1), \
+ PINMUX_DATA(PORT142_KEYOUT10_MARK, PORT142_FN2), \
+ PINMUX_DATA(VIO2_D4_MARK, PORT142_FN6), \
+ PINMUX_DATA(LCD2D2_MARK, PORT142_FN7),
+ PINMUX_DATA(VIO_D13_MARK, PORT143_FN1), \
+ PINMUX_DATA(PORT143_KEYOUT11_MARK, PORT143_FN2), \
+ PINMUX_DATA(PORT143_KEYOUT6_MARK, PORT143_FN3), \
+ PINMUX_DATA(VIO2_D5_MARK, PORT143_FN6), \
+ PINMUX_DATA(LCD2D3_MARK, PORT143_FN7),
+ PINMUX_DATA(VIO_D14_MARK, PORT144_FN1), \
+ PINMUX_DATA(PORT144_KEYOUT7_MARK, PORT144_FN2), \
+ PINMUX_DATA(VIO2_D6_MARK, PORT144_FN6), \
+ PINMUX_DATA(LCD2D4_MARK, PORT144_FN7),
+ PINMUX_DATA(VIO_D15_MARK, PORT145_FN1), \
+ PINMUX_DATA(TPU1TO3_MARK, PORT145_FN3), \
+ PINMUX_DATA(PORT145_LCD2DISP_MARK, PORT145_FN4), \
+ PINMUX_DATA(PORT145_LCD2RS_MARK, PORT145_FN5), \
+ PINMUX_DATA(VIO2_D7_MARK, PORT145_FN6), \
+ PINMUX_DATA(LCD2D5_MARK, PORT145_FN7),
+ PINMUX_DATA(VIO_CLK_MARK, PORT146_FN1), \
+ PINMUX_DATA(LCD2DCK_MARK, PORT146_FN4), \
+ PINMUX_DATA(PORT146_LCD2WR__MARK, PORT146_FN5), \
+ PINMUX_DATA(VIO2_CLK_MARK, PORT146_FN6, MSEL4CR_MSEL27_0), \
+ PINMUX_DATA(LCD2D18_MARK, PORT146_FN7),
+ PINMUX_DATA(VIO_FIELD_MARK, PORT147_FN1), \
+ PINMUX_DATA(LCD2RD__MARK, PORT147_FN4), \
+ PINMUX_DATA(VIO2_FIELD_MARK, PORT147_FN6, MSEL4CR_MSEL27_0), \
+ PINMUX_DATA(LCD2D19_MARK, PORT147_FN7),
+ PINMUX_DATA(VIO_CKO_MARK, PORT148_FN1),
+ PINMUX_DATA(A27_MARK, PORT149_FN1), \
+ PINMUX_DATA(PORT149_RDWR_MARK, PORT149_FN2), \
+ PINMUX_DATA(MFG0_IN1_MARK, PORT149_FN3), \
+ PINMUX_DATA(PORT149_KEYOUT9_MARK, PORT149_FN4),
+ PINMUX_DATA(MFG0_IN2_MARK, PORT150_FN3),
+ PINMUX_DATA(TS_SPSYNC3_MARK, PORT151_FN4), \
+ PINMUX_DATA(MSIOF2_RSCK_MARK, PORT151_FN5),
+ PINMUX_DATA(TS_SDAT3_MARK, PORT152_FN4), \
+ PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT152_FN5),
+ PINMUX_DATA(TPU1TO2_MARK, PORT153_FN3), \
+ PINMUX_DATA(TS_SDEN3_MARK, PORT153_FN4), \
+ PINMUX_DATA(PORT153_MSIOF2_SS1_MARK, PORT153_FN5),
+ PINMUX_DATA(SCIFA2_TXD1_MARK, PORT154_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(MSIOF2_MCK0_MARK, PORT154_FN5),
+ PINMUX_DATA(SCIFA2_RXD1_MARK, PORT155_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(MSIOF2_MCK1_MARK, PORT155_FN5),
+ PINMUX_DATA(SCIFA2_RTS1__MARK, PORT156_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(PORT156_MSIOF2_SS2_MARK, PORT156_FN5),
+ PINMUX_DATA(SCIFA2_CTS1__MARK, PORT157_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(PORT157_MSIOF2_RXD_MARK, PORT157_FN5, MSEL4CR_MSEL11_0,
+ MSEL4CR_MSEL10_0),
+ PINMUX_DATA(DINT__MARK, PORT158_FN1), \
+ PINMUX_DATA(SCIFA2_SCK1_MARK, PORT158_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(TS_SCK3_MARK, PORT158_FN4),
+ PINMUX_DATA(PORT159_SCIFB_SCK_MARK, PORT159_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT159_SCIFA5_SCK_MARK, PORT159_FN2, MSEL4CR_MSEL21_1), \
+ PINMUX_DATA(NMI_MARK, PORT159_FN3),
+ PINMUX_DATA(PORT160_SCIFB_TXD_MARK, PORT160_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT160_SCIFA5_TXD_MARK, PORT160_FN2, MSEL4CR_MSEL21_1),
+ PINMUX_DATA(PORT161_SCIFB_CTS__MARK, PORT161_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT161_SCIFA5_CTS__MARK, PORT161_FN2, MSEL4CR_MSEL21_1),
+ PINMUX_DATA(PORT162_SCIFB_RXD_MARK, PORT162_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT162_SCIFA5_RXD_MARK, PORT162_FN2, MSEL4CR_MSEL21_1),
+ PINMUX_DATA(PORT163_SCIFB_RTS__MARK, PORT163_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT163_SCIFA5_RTS__MARK, PORT163_FN2, MSEL4CR_MSEL21_1), \
+ PINMUX_DATA(TPU3TO0_MARK, PORT163_FN5),
+ PINMUX_DATA(LCDD0_MARK, PORT192_FN1),
+ PINMUX_DATA(LCDD1_MARK, PORT193_FN1), \
+ PINMUX_DATA(PORT193_SCIFA5_CTS__MARK, PORT193_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1), \
+ PINMUX_DATA(BBIF2_TSYNC1_MARK, PORT193_FN5),
+ PINMUX_DATA(LCDD2_MARK, PORT194_FN1), \
+ PINMUX_DATA(PORT194_SCIFA5_RTS__MARK, PORT194_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1), \
+ PINMUX_DATA(BBIF2_TSCK1_MARK, PORT194_FN5),
+ PINMUX_DATA(LCDD3_MARK, PORT195_FN1), \
+ PINMUX_DATA(PORT195_SCIFA5_RXD_MARK, PORT195_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1), \
+ PINMUX_DATA(BBIF2_TXD1_MARK, PORT195_FN5),
+ PINMUX_DATA(LCDD4_MARK, PORT196_FN1), \
+ PINMUX_DATA(PORT196_SCIFA5_TXD_MARK, PORT196_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1),
+ PINMUX_DATA(LCDD5_MARK, PORT197_FN1), \
+ PINMUX_DATA(PORT197_SCIFA5_SCK_MARK, PORT197_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1), \
+ PINMUX_DATA(MFG2_OUT2_MARK, PORT197_FN5), \
+ PINMUX_DATA(TPU2TO1_MARK, PORT197_FN7),
+ PINMUX_DATA(LCDD6_MARK, PORT198_FN1),
+ PINMUX_DATA(LCDD7_MARK, PORT199_FN1), \
+ PINMUX_DATA(TPU4TO1_MARK, PORT199_FN2), \
+ PINMUX_DATA(MFG4_OUT2_MARK, PORT199_FN5),
+ PINMUX_DATA(LCDD8_MARK, PORT200_FN1), \
+ PINMUX_DATA(D16_MARK, PORT200_FN6),
+ PINMUX_DATA(LCDD9_MARK, PORT201_FN1), \
+ PINMUX_DATA(D17_MARK, PORT201_FN6),
+ PINMUX_DATA(LCDD10_MARK, PORT202_FN1), \
+ PINMUX_DATA(D18_MARK, PORT202_FN6),
+ PINMUX_DATA(LCDD11_MARK, PORT203_FN1), \
+ PINMUX_DATA(D19_MARK, PORT203_FN6),
+ PINMUX_DATA(LCDD12_MARK, PORT204_FN1), \
+ PINMUX_DATA(D20_MARK, PORT204_FN6),
+ PINMUX_DATA(LCDD13_MARK, PORT205_FN1), \
+ PINMUX_DATA(D21_MARK, PORT205_FN6),
+ PINMUX_DATA(LCDD14_MARK, PORT206_FN1), \
+ PINMUX_DATA(D22_MARK, PORT206_FN6),
+ PINMUX_DATA(LCDD15_MARK, PORT207_FN1), \
+ PINMUX_DATA(PORT207_MSIOF0L_SS1_MARK, PORT207_FN2, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D23_MARK, PORT207_FN6),
+ PINMUX_DATA(LCDD16_MARK, PORT208_FN1), \
+ PINMUX_DATA(PORT208_MSIOF0L_SS2_MARK, PORT208_FN2, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D24_MARK, PORT208_FN6),
+ PINMUX_DATA(LCDD17_MARK, PORT209_FN1), \
+ PINMUX_DATA(D25_MARK, PORT209_FN6),
+ PINMUX_DATA(LCDD18_MARK, PORT210_FN1), \
+ PINMUX_DATA(DREQ2_MARK, PORT210_FN2), \
+ PINMUX_DATA(PORT210_MSIOF0L_SS1_MARK, PORT210_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D26_MARK, PORT210_FN6),
+ PINMUX_DATA(LCDD19_MARK, PORT211_FN1), \
+ PINMUX_DATA(PORT211_MSIOF0L_SS2_MARK, PORT211_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D27_MARK, PORT211_FN6),
+ PINMUX_DATA(LCDD20_MARK, PORT212_FN1), \
+ PINMUX_DATA(TS_SPSYNC1_MARK, PORT212_FN2), \
+ PINMUX_DATA(MSIOF0L_MCK0_MARK, PORT212_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D28_MARK, PORT212_FN6),
+ PINMUX_DATA(LCDD21_MARK, PORT213_FN1), \
+ PINMUX_DATA(TS_SDAT1_MARK, PORT213_FN2), \
+ PINMUX_DATA(MSIOF0L_MCK1_MARK, PORT213_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D29_MARK, PORT213_FN6),
+ PINMUX_DATA(LCDD22_MARK, PORT214_FN1), \
+ PINMUX_DATA(TS_SDEN1_MARK, PORT214_FN2), \
+ PINMUX_DATA(MSIOF0L_RSCK_MARK, PORT214_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D30_MARK, PORT214_FN6),
+ PINMUX_DATA(LCDD23_MARK, PORT215_FN1), \
+ PINMUX_DATA(TS_SCK1_MARK, PORT215_FN2), \
+ PINMUX_DATA(MSIOF0L_RSYNC_MARK, PORT215_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D31_MARK, PORT215_FN6),
+ PINMUX_DATA(LCDDCK_MARK, PORT216_FN1), \
+ PINMUX_DATA(LCDWR__MARK, PORT216_FN2),
+ PINMUX_DATA(LCDRD__MARK, PORT217_FN1), \
+ PINMUX_DATA(DACK2_MARK, PORT217_FN2), \
+ PINMUX_DATA(PORT217_LCD2RS_MARK, PORT217_FN3), \
+ PINMUX_DATA(MSIOF0L_TSYNC_MARK, PORT217_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(VIO2_FIELD3_MARK, PORT217_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_1), \
+ PINMUX_DATA(PORT217_LCD2DISP_MARK, PORT217_FN7),
+ PINMUX_DATA(LCDHSYN_MARK, PORT218_FN1), \
+ PINMUX_DATA(LCDCS__MARK, PORT218_FN2), \
+ PINMUX_DATA(LCDCS2__MARK, PORT218_FN3), \
+ PINMUX_DATA(DACK3_MARK, PORT218_FN4), \
+ PINMUX_DATA(PORT218_VIO_CKOR_MARK, PORT218_FN5),
+ PINMUX_DATA(LCDDISP_MARK, PORT219_FN1), \
+ PINMUX_DATA(LCDRS_MARK, PORT219_FN2), \
+ PINMUX_DATA(PORT219_LCD2WR__MARK, PORT219_FN3), \
+ PINMUX_DATA(DREQ3_MARK, PORT219_FN4), \
+ PINMUX_DATA(MSIOF0L_TSCK_MARK, PORT219_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(VIO2_CLK3_MARK, PORT219_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_1), \
+ PINMUX_DATA(LCD2DCK_2_MARK, PORT219_FN7),
+ PINMUX_DATA(LCDVSYN_MARK, PORT220_FN1), \
+ PINMUX_DATA(LCDVSYN2_MARK, PORT220_FN2),
+ PINMUX_DATA(LCDLCLK_MARK, PORT221_FN1), \
+ PINMUX_DATA(DREQ1_MARK, PORT221_FN2), \
+ PINMUX_DATA(PORT221_LCD2CS__MARK, PORT221_FN3), \
+ PINMUX_DATA(PWEN_MARK, PORT221_FN4), \
+ PINMUX_DATA(MSIOF0L_RXD_MARK, PORT221_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(VIO2_HD3_MARK, PORT221_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_1), \
+ PINMUX_DATA(PORT221_LCD2HSYN_MARK, PORT221_FN7),
+ PINMUX_DATA(LCDDON_MARK, PORT222_FN1), \
+ PINMUX_DATA(LCDDON2_MARK, PORT222_FN2), \
+ PINMUX_DATA(DACK1_MARK, PORT222_FN3), \
+ PINMUX_DATA(OVCN_MARK, PORT222_FN4), \
+ PINMUX_DATA(MSIOF0L_TXD_MARK, PORT222_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(VIO2_VD3_MARK, PORT222_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_1), \
+ PINMUX_DATA(PORT222_LCD2VSYN_MARK, PORT222_FN7, MSEL3CR_MSEL2_1),
+
+ PINMUX_DATA(SCIFA1_TXD_MARK, PORT225_FN2), \
+ PINMUX_DATA(OVCN2_MARK, PORT225_FN4),
+ PINMUX_DATA(EXTLP_MARK, PORT226_FN1), \
+ PINMUX_DATA(SCIFA1_SCK_MARK, PORT226_FN2), \
+ PINMUX_DATA(PORT226_VIO_CKO2_MARK, PORT226_FN5),
+ PINMUX_DATA(SCIFA1_RTS__MARK, PORT227_FN2), \
+ PINMUX_DATA(IDIN_MARK, PORT227_FN4),
+ PINMUX_DATA(SCIFA1_RXD_MARK, PORT228_FN2),
+ PINMUX_DATA(SCIFA1_CTS__MARK, PORT229_FN2), \
+ PINMUX_DATA(MFG1_IN1_MARK, PORT229_FN3),
+ PINMUX_DATA(MSIOF1_TXD_MARK, PORT230_FN1), \
+ PINMUX_DATA(SCIFA2_TXD2_MARK, PORT230_FN2, MSEL3CR_MSEL9_1),
+ PINMUX_DATA(MSIOF1_TSYNC_MARK, PORT231_FN1), \
+ PINMUX_DATA(SCIFA2_CTS2__MARK, PORT231_FN2, MSEL3CR_MSEL9_1),
+ PINMUX_DATA(MSIOF1_TSCK_MARK, PORT232_FN1), \
+ PINMUX_DATA(SCIFA2_SCK2_MARK, PORT232_FN2, MSEL3CR_MSEL9_1),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PORT233_FN1), \
+ PINMUX_DATA(SCIFA2_RXD2_MARK, PORT233_FN2, MSEL3CR_MSEL9_1),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PORT234_FN1), \
+ PINMUX_DATA(SCIFA2_RTS2__MARK, PORT234_FN2, MSEL3CR_MSEL9_1), \
+ PINMUX_DATA(VIO2_CLK2_MARK, PORT234_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0), \
+ PINMUX_DATA(LCD2D20_MARK, PORT234_FN7),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT235_FN1), \
+ PINMUX_DATA(MFG1_IN2_MARK, PORT235_FN3), \
+ PINMUX_DATA(VIO2_VD2_MARK, PORT235_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0), \
+ PINMUX_DATA(LCD2D21_MARK, PORT235_FN7),
+ PINMUX_DATA(MSIOF1_MCK0_MARK, PORT236_FN1), \
+ PINMUX_DATA(PORT236_I2C_SDA2_MARK, PORT236_FN2, MSEL2CR_MSEL17_0,
+ MSEL2CR_MSEL16_0),
+ PINMUX_DATA(MSIOF1_MCK1_MARK, PORT237_FN1), \
+ PINMUX_DATA(PORT237_I2C_SCL2_MARK, PORT237_FN2, MSEL2CR_MSEL17_0,
+ MSEL2CR_MSEL16_0),
+ PINMUX_DATA(MSIOF1_SS1_MARK, PORT238_FN1), \
+ PINMUX_DATA(VIO2_FIELD2_MARK, PORT238_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0), \
+ PINMUX_DATA(LCD2D22_MARK, PORT238_FN7),
+ PINMUX_DATA(MSIOF1_SS2_MARK, PORT239_FN1), \
+ PINMUX_DATA(VIO2_HD2_MARK, PORT239_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0), \
+ PINMUX_DATA(LCD2D23_MARK, PORT239_FN7),
+ PINMUX_DATA(SCIFA6_TXD_MARK, PORT240_FN1),
+ PINMUX_DATA(PORT241_IRDA_OUT_MARK, PORT241_FN1, MSEL4CR_MSEL19_0), \
+ PINMUX_DATA(PORT241_IROUT_MARK, PORT241_FN2), \
+ PINMUX_DATA(MFG4_OUT1_MARK, PORT241_FN3), \
+ PINMUX_DATA(TPU4TO0_MARK, PORT241_FN4),
+ PINMUX_DATA(PORT242_IRDA_IN_MARK, PORT242_FN1, MSEL4CR_MSEL19_0), \
+ PINMUX_DATA(MFG4_IN2_MARK, PORT242_FN3),
+ PINMUX_DATA(PORT243_IRDA_FIRSEL_MARK, PORT243_FN1, MSEL4CR_MSEL19_0), \
+ PINMUX_DATA(PORT243_VIO_CKO2_MARK, PORT243_FN2),
+ PINMUX_DATA(PORT244_SCIFA5_CTS__MARK, PORT244_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG2_IN1_MARK, PORT244_FN2), \
+ PINMUX_DATA(PORT244_SCIFB_CTS__MARK, PORT244_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(MSIOF2R_RXD_MARK, PORT244_FN7, MSEL4CR_MSEL11_1),
+ PINMUX_DATA(PORT245_SCIFA5_RTS__MARK, PORT245_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG2_IN2_MARK, PORT245_FN2), \
+ PINMUX_DATA(PORT245_SCIFB_RTS__MARK, PORT245_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(MSIOF2R_TXD_MARK, PORT245_FN7, MSEL4CR_MSEL11_1),
+ PINMUX_DATA(PORT246_SCIFA5_RXD_MARK, PORT246_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG1_OUT1_MARK, PORT246_FN2), \
+ PINMUX_DATA(PORT246_SCIFB_RXD_MARK, PORT246_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(TPU1TO0_MARK, PORT246_FN4),
+ PINMUX_DATA(PORT247_SCIFA5_TXD_MARK, PORT247_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG3_OUT2_MARK, PORT247_FN2), \
+ PINMUX_DATA(PORT247_SCIFB_TXD_MARK, PORT247_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(TPU3TO1_MARK, PORT247_FN4),
+ PINMUX_DATA(PORT248_SCIFA5_SCK_MARK, PORT248_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG2_OUT1_MARK, PORT248_FN2), \
+ PINMUX_DATA(PORT248_SCIFB_SCK_MARK, PORT248_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(TPU2TO0_MARK, PORT248_FN4), \
+ PINMUX_DATA(PORT248_I2C_SCL3_MARK, PORT248_FN5, MSEL2CR_MSEL19_0,
+ MSEL2CR_MSEL18_0), \
+ PINMUX_DATA(MSIOF2R_TSCK_MARK, PORT248_FN7, MSEL4CR_MSEL11_1),
+ PINMUX_DATA(PORT249_IROUT_MARK, PORT249_FN1), \
+ PINMUX_DATA(MFG4_IN1_MARK, PORT249_FN2), \
+ PINMUX_DATA(PORT249_I2C_SDA3_MARK, PORT249_FN5, MSEL2CR_MSEL19_0,
+ MSEL2CR_MSEL18_0), \
+ PINMUX_DATA(MSIOF2R_TSYNC_MARK, PORT249_FN7, MSEL4CR_MSEL11_1),
+ PINMUX_DATA(SDHICLK0_MARK, PORT250_FN1),
+ PINMUX_DATA(SDHICD0_MARK, PORT251_FN1),
+ PINMUX_DATA(SDHID0_0_MARK, PORT252_FN1),
+ PINMUX_DATA(SDHID0_1_MARK, PORT253_FN1),
+ PINMUX_DATA(SDHID0_2_MARK, PORT254_FN1),
+ PINMUX_DATA(SDHID0_3_MARK, PORT255_FN1),
+ PINMUX_DATA(SDHICMD0_MARK, PORT256_FN1),
+ PINMUX_DATA(SDHIWP0_MARK, PORT257_FN1),
+ PINMUX_DATA(SDHICLK1_MARK, PORT258_FN1),
+ PINMUX_DATA(SDHID1_0_MARK, PORT259_FN1), \
+ PINMUX_DATA(TS_SPSYNC2_MARK, PORT259_FN3),
+ PINMUX_DATA(SDHID1_1_MARK, PORT260_FN1), \
+ PINMUX_DATA(TS_SDAT2_MARK, PORT260_FN3),
+ PINMUX_DATA(SDHID1_2_MARK, PORT261_FN1), \
+ PINMUX_DATA(TS_SDEN2_MARK, PORT261_FN3),
+ PINMUX_DATA(SDHID1_3_MARK, PORT262_FN1), \
+ PINMUX_DATA(TS_SCK2_MARK, PORT262_FN3),
+ PINMUX_DATA(SDHICMD1_MARK, PORT263_FN1),
+ PINMUX_DATA(SDHICLK2_MARK, PORT264_FN1),
+ PINMUX_DATA(SDHID2_0_MARK, PORT265_FN1), \
+ PINMUX_DATA(TS_SPSYNC4_MARK, PORT265_FN3),
+ PINMUX_DATA(SDHID2_1_MARK, PORT266_FN1), \
+ PINMUX_DATA(TS_SDAT4_MARK, PORT266_FN3),
+ PINMUX_DATA(SDHID2_2_MARK, PORT267_FN1), \
+ PINMUX_DATA(TS_SDEN4_MARK, PORT267_FN3),
+ PINMUX_DATA(SDHID2_3_MARK, PORT268_FN1), \
+ PINMUX_DATA(TS_SCK4_MARK, PORT268_FN3),
+ PINMUX_DATA(SDHICMD2_MARK, PORT269_FN1),
+ PINMUX_DATA(MMCCLK0_MARK, PORT270_FN1, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_0_MARK, PORT271_FN1, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_1_MARK, PORT272_FN1, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_2_MARK, PORT273_FN1, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_3_MARK, PORT274_FN1, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_4_MARK, PORT275_FN1, MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(TS_SPSYNC5_MARK, PORT275_FN3),
+ PINMUX_DATA(MMCD0_5_MARK, PORT276_FN1, MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(TS_SDAT5_MARK, PORT276_FN3),
+ PINMUX_DATA(MMCD0_6_MARK, PORT277_FN1, MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(TS_SDEN5_MARK, PORT277_FN3),
+ PINMUX_DATA(MMCD0_7_MARK, PORT278_FN1, MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(TS_SCK5_MARK, PORT278_FN3),
+ PINMUX_DATA(MMCCMD0_MARK, PORT279_FN1, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(RESETOUTS__MARK, PORT281_FN1), \
+ PINMUX_DATA(EXTAL2OUT_MARK, PORT281_FN2),
+ PINMUX_DATA(MCP_WAIT__MCP_FRB_MARK, PORT288_FN1),
+ PINMUX_DATA(MCP_CKO_MARK, PORT289_FN1), \
+ PINMUX_DATA(MMCCLK1_MARK, PORT289_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D15_MCP_NAF15_MARK, PORT290_FN1),
+ PINMUX_DATA(MCP_D14_MCP_NAF14_MARK, PORT291_FN1),
+ PINMUX_DATA(MCP_D13_MCP_NAF13_MARK, PORT292_FN1),
+ PINMUX_DATA(MCP_D12_MCP_NAF12_MARK, PORT293_FN1),
+ PINMUX_DATA(MCP_D11_MCP_NAF11_MARK, PORT294_FN1),
+ PINMUX_DATA(MCP_D10_MCP_NAF10_MARK, PORT295_FN1),
+ PINMUX_DATA(MCP_D9_MCP_NAF9_MARK, PORT296_FN1),
+ PINMUX_DATA(MCP_D8_MCP_NAF8_MARK, PORT297_FN1), \
+ PINMUX_DATA(MMCCMD1_MARK, PORT297_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D7_MCP_NAF7_MARK, PORT298_FN1), \
+ PINMUX_DATA(MMCD1_7_MARK, PORT298_FN2, MSEL4CR_MSEL15_1),
+
+ PINMUX_DATA(MCP_D6_MCP_NAF6_MARK, PORT299_FN1), \
+ PINMUX_DATA(MMCD1_6_MARK, PORT299_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D5_MCP_NAF5_MARK, PORT300_FN1), \
+ PINMUX_DATA(MMCD1_5_MARK, PORT300_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D4_MCP_NAF4_MARK, PORT301_FN1), \
+ PINMUX_DATA(MMCD1_4_MARK, PORT301_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D3_MCP_NAF3_MARK, PORT302_FN1), \
+ PINMUX_DATA(MMCD1_3_MARK, PORT302_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D2_MCP_NAF2_MARK, PORT303_FN1), \
+ PINMUX_DATA(MMCD1_2_MARK, PORT303_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D1_MCP_NAF1_MARK, PORT304_FN1), \
+ PINMUX_DATA(MMCD1_1_MARK, PORT304_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D0_MCP_NAF0_MARK, PORT305_FN1), \
+ PINMUX_DATA(MMCD1_0_MARK, PORT305_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_NBRSTOUT__MARK, PORT306_FN1),
+ PINMUX_DATA(MCP_WE0__MCP_FWE_MARK, PORT309_FN1), \
+ PINMUX_DATA(MCP_RDWR_MCP_FWE_MARK, PORT309_FN2),
+
+ /* MSEL2 special cases */
+ PINMUX_DATA(TSIF2_TS_XX1_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_0,
+ MSEL2CR_MSEL12_0),
+ PINMUX_DATA(TSIF2_TS_XX2_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_0,
+ MSEL2CR_MSEL12_1),
+ PINMUX_DATA(TSIF2_TS_XX3_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_1,
+ MSEL2CR_MSEL12_0),
+ PINMUX_DATA(TSIF2_TS_XX4_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_1,
+ MSEL2CR_MSEL12_1),
+ PINMUX_DATA(TSIF2_TS_XX5_MARK, MSEL2CR_MSEL14_1, MSEL2CR_MSEL13_0,
+ MSEL2CR_MSEL12_0),
+ PINMUX_DATA(TSIF1_TS_XX1_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_0,
+ MSEL2CR_MSEL9_0),
+ PINMUX_DATA(TSIF1_TS_XX2_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_0,
+ MSEL2CR_MSEL9_1),
+ PINMUX_DATA(TSIF1_TS_XX3_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_1,
+ MSEL2CR_MSEL9_0),
+ PINMUX_DATA(TSIF1_TS_XX4_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_1,
+ MSEL2CR_MSEL9_1),
+ PINMUX_DATA(TSIF1_TS_XX5_MARK, MSEL2CR_MSEL11_1, MSEL2CR_MSEL10_0,
+ MSEL2CR_MSEL9_0),
+ PINMUX_DATA(TSIF0_TS_XX1_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_0,
+ MSEL2CR_MSEL6_0),
+ PINMUX_DATA(TSIF0_TS_XX2_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_0,
+ MSEL2CR_MSEL6_1),
+ PINMUX_DATA(TSIF0_TS_XX3_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_1,
+ MSEL2CR_MSEL6_0),
+ PINMUX_DATA(TSIF0_TS_XX4_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_1,
+ MSEL2CR_MSEL6_1),
+ PINMUX_DATA(TSIF0_TS_XX5_MARK, MSEL2CR_MSEL8_1, MSEL2CR_MSEL7_0,
+ MSEL2CR_MSEL6_0),
+ PINMUX_DATA(MST1_TS_XX1_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_0,
+ MSEL2CR_MSEL3_0),
+ PINMUX_DATA(MST1_TS_XX2_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_0,
+ MSEL2CR_MSEL3_1),
+ PINMUX_DATA(MST1_TS_XX3_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_1,
+ MSEL2CR_MSEL3_0),
+ PINMUX_DATA(MST1_TS_XX4_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_1,
+ MSEL2CR_MSEL3_1),
+ PINMUX_DATA(MST1_TS_XX5_MARK, MSEL2CR_MSEL5_1, MSEL2CR_MSEL4_0,
+ MSEL2CR_MSEL3_0),
+ PINMUX_DATA(MST0_TS_XX1_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_0,
+ MSEL2CR_MSEL0_0),
+ PINMUX_DATA(MST0_TS_XX2_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_0,
+ MSEL2CR_MSEL0_1),
+ PINMUX_DATA(MST0_TS_XX3_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_1,
+ MSEL2CR_MSEL0_0),
+ PINMUX_DATA(MST0_TS_XX4_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_1,
+ MSEL2CR_MSEL0_1),
+ PINMUX_DATA(MST0_TS_XX5_MARK, MSEL2CR_MSEL2_1, MSEL2CR_MSEL1_0,
+ MSEL2CR_MSEL0_0),
+
+ /* MSEL3 special cases */
+ PINMUX_DATA(SDHI0_VCCQ_MC0_ON_MARK, MSEL3CR_MSEL28_1),
+ PINMUX_DATA(SDHI0_VCCQ_MC0_OFF_MARK, MSEL3CR_MSEL28_0),
+ PINMUX_DATA(DEBUG_MON_VIO_MARK, MSEL3CR_MSEL15_0),
+ PINMUX_DATA(DEBUG_MON_LCDD_MARK, MSEL3CR_MSEL15_1),
+ PINMUX_DATA(LCDC_LCDC0_MARK, MSEL3CR_MSEL6_0),
+ PINMUX_DATA(LCDC_LCDC1_MARK, MSEL3CR_MSEL6_1),
+
+ /* MSEL4 special cases */
+ PINMUX_DATA(IRQ9_MEM_INT_MARK, MSEL4CR_MSEL29_0),
+ PINMUX_DATA(IRQ9_MCP_INT_MARK, MSEL4CR_MSEL29_1),
+ PINMUX_DATA(A11_MARK, MSEL4CR_MSEL13_0, MSEL4CR_MSEL12_0),
+ PINMUX_DATA(KEYOUT8_MARK, MSEL4CR_MSEL13_0, MSEL4CR_MSEL12_1),
+ PINMUX_DATA(TPU4TO3_MARK, MSEL4CR_MSEL13_1, MSEL4CR_MSEL12_0),
+ PINMUX_DATA(RESETA_N_PU_ON_MARK, MSEL4CR_MSEL4_0),
+ PINMUX_DATA(RESETA_N_PU_OFF_MARK, MSEL4CR_MSEL4_1),
+ PINMUX_DATA(EDBGREQ_PD_MARK, MSEL4CR_MSEL1_0),
+ PINMUX_DATA(EDBGREQ_PU_MARK, MSEL4CR_MSEL1_1),
+};
+
+#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
+#define GPIO_PORT_310() _310(_GPIO_PORT, , unused)
+#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ GPIO_PORT_310(),
+
+ /* Table 25-1 (Functions 0-7) */
+ GPIO_FN(VBUS_0),
+ GPIO_FN(GPI0),
+ GPIO_FN(GPI1),
+ GPIO_FN(GPI2),
+ GPIO_FN(GPI3),
+ GPIO_FN(GPI4),
+ GPIO_FN(GPI5),
+ GPIO_FN(GPI6),
+ GPIO_FN(GPI7),
+ GPIO_FN(SCIFA7_RXD),
+ GPIO_FN(SCIFA7_CTS_),
+ GPIO_FN(GPO7), \
+ GPIO_FN(MFG0_OUT2),
+ GPIO_FN(GPO6), \
+ GPIO_FN(MFG1_OUT2),
+ GPIO_FN(GPO5), \
+ GPIO_FN(SCIFA0_SCK), \
+ GPIO_FN(FSICOSLDT3), \
+ GPIO_FN(PORT16_VIO_CKOR),
+ GPIO_FN(SCIFA0_TXD),
+ GPIO_FN(SCIFA7_TXD),
+ GPIO_FN(SCIFA7_RTS_), \
+ GPIO_FN(PORT19_VIO_CKO2),
+ GPIO_FN(GPO0),
+ GPIO_FN(GPO1),
+ GPIO_FN(GPO2), \
+ GPIO_FN(STATUS0),
+ GPIO_FN(GPO3), \
+ GPIO_FN(STATUS1),
+ GPIO_FN(GPO4), \
+ GPIO_FN(STATUS2),
+ GPIO_FN(VINT),
+ GPIO_FN(TCKON),
+ GPIO_FN(XDVFS1), \
+ GPIO_FN(PORT27_I2C_SCL2), \
+ GPIO_FN(PORT27_I2C_SCL3), \
+ GPIO_FN(MFG0_OUT1), \
+ GPIO_FN(PORT27_IROUT),
+ GPIO_FN(XDVFS2), \
+ GPIO_FN(PORT28_I2C_SDA2), \
+ GPIO_FN(PORT28_I2C_SDA3), \
+ GPIO_FN(PORT28_TPU1TO1),
+ GPIO_FN(SIM_RST), \
+ GPIO_FN(PORT29_TPU1TO1),
+ GPIO_FN(SIM_CLK), \
+ GPIO_FN(PORT30_VIO_CKOR),
+ GPIO_FN(SIM_D), \
+ GPIO_FN(PORT31_IROUT),
+ GPIO_FN(SCIFA4_TXD),
+ GPIO_FN(SCIFA4_RXD), \
+ GPIO_FN(XWUP),
+ GPIO_FN(SCIFA4_RTS_),
+ GPIO_FN(SCIFA4_CTS_),
+ GPIO_FN(FSIBOBT), \
+ GPIO_FN(FSIBIBT),
+ GPIO_FN(FSIBOLR), \
+ GPIO_FN(FSIBILR),
+ GPIO_FN(FSIBOSLD),
+ GPIO_FN(FSIBISLD),
+ GPIO_FN(VACK),
+ GPIO_FN(XTAL1L),
+ GPIO_FN(SCIFA0_RTS_), \
+ GPIO_FN(FSICOSLDT2),
+ GPIO_FN(SCIFA0_RXD),
+ GPIO_FN(SCIFA0_CTS_), \
+ GPIO_FN(FSICOSLDT1),
+ GPIO_FN(FSICOBT), \
+ GPIO_FN(FSICIBT), \
+ GPIO_FN(FSIDOBT), \
+ GPIO_FN(FSIDIBT),
+ GPIO_FN(FSICOLR), \
+ GPIO_FN(FSICILR), \
+ GPIO_FN(FSIDOLR), \
+ GPIO_FN(FSIDILR),
+ GPIO_FN(FSICOSLD), \
+ GPIO_FN(PORT47_FSICSPDIF),
+ GPIO_FN(FSICISLD), \
+ GPIO_FN(FSIDISLD),
+ GPIO_FN(FSIACK), \
+ GPIO_FN(PORT49_IRDA_OUT), \
+ GPIO_FN(PORT49_IROUT), \
+ GPIO_FN(FSIAOMC),
+ GPIO_FN(FSIAOLR), \
+ GPIO_FN(BBIF2_TSYNC2), \
+ GPIO_FN(TPU2TO2), \
+ GPIO_FN(FSIAILR),
+
+ GPIO_FN(FSIAOBT), \
+ GPIO_FN(BBIF2_TSCK2), \
+ GPIO_FN(TPU2TO3), \
+ GPIO_FN(FSIAIBT),
+ GPIO_FN(FSIAOSLD), \
+ GPIO_FN(BBIF2_TXD2),
+ GPIO_FN(FSIASPDIF), \
+ GPIO_FN(PORT53_IRDA_IN), \
+ GPIO_FN(TPU3TO3), \
+ GPIO_FN(FSIBSPDIF), \
+ GPIO_FN(PORT53_FSICSPDIF),
+ GPIO_FN(FSIBCK), \
+ GPIO_FN(PORT54_IRDA_FIRSEL), \
+ GPIO_FN(TPU3TO2), \
+ GPIO_FN(FSIBOMC), \
+ GPIO_FN(FSICCK), \
+ GPIO_FN(FSICOMC),
+ GPIO_FN(FSIAISLD), \
+ GPIO_FN(TPU0TO0),
+ GPIO_FN(A0), \
+ GPIO_FN(BS_),
+ GPIO_FN(A12), \
+ GPIO_FN(PORT58_KEYOUT7), \
+ GPIO_FN(TPU4TO2),
+ GPIO_FN(A13), \
+ GPIO_FN(PORT59_KEYOUT6), \
+ GPIO_FN(TPU0TO1),
+ GPIO_FN(A14), \
+ GPIO_FN(KEYOUT5),
+ GPIO_FN(A15), \
+ GPIO_FN(KEYOUT4),
+ GPIO_FN(A16), \
+ GPIO_FN(KEYOUT3), \
+ GPIO_FN(MSIOF0_SS1),
+ GPIO_FN(A17), \
+ GPIO_FN(KEYOUT2), \
+ GPIO_FN(MSIOF0_TSYNC),
+ GPIO_FN(A18), \
+ GPIO_FN(KEYOUT1), \
+ GPIO_FN(MSIOF0_TSCK),
+ GPIO_FN(A19), \
+ GPIO_FN(KEYOUT0), \
+ GPIO_FN(MSIOF0_TXD),
+ GPIO_FN(A20), \
+ GPIO_FN(KEYIN0), \
+ GPIO_FN(MSIOF0_RSCK),
+ GPIO_FN(A21), \
+ GPIO_FN(KEYIN1), \
+ GPIO_FN(MSIOF0_RSYNC),
+ GPIO_FN(A22), \
+ GPIO_FN(KEYIN2), \
+ GPIO_FN(MSIOF0_MCK0),
+ GPIO_FN(A23), \
+ GPIO_FN(KEYIN3), \
+ GPIO_FN(MSIOF0_MCK1),
+ GPIO_FN(A24), \
+ GPIO_FN(KEYIN4), \
+ GPIO_FN(MSIOF0_RXD),
+ GPIO_FN(A25), \
+ GPIO_FN(KEYIN5), \
+ GPIO_FN(MSIOF0_SS2),
+ GPIO_FN(A26), \
+ GPIO_FN(KEYIN6),
+ GPIO_FN(KEYIN7),
+ GPIO_FN(D0_NAF0),
+ GPIO_FN(D1_NAF1),
+ GPIO_FN(D2_NAF2),
+ GPIO_FN(D3_NAF3),
+ GPIO_FN(D4_NAF4),
+ GPIO_FN(D5_NAF5),
+ GPIO_FN(D6_NAF6),
+ GPIO_FN(D7_NAF7),
+ GPIO_FN(D8_NAF8),
+ GPIO_FN(D9_NAF9),
+ GPIO_FN(D10_NAF10),
+ GPIO_FN(D11_NAF11),
+ GPIO_FN(D12_NAF12),
+ GPIO_FN(D13_NAF13),
+ GPIO_FN(D14_NAF14),
+ GPIO_FN(D15_NAF15),
+ GPIO_FN(CS4_),
+ GPIO_FN(CS5A_), \
+ GPIO_FN(PORT91_RDWR),
+ GPIO_FN(CS5B_), \
+ GPIO_FN(FCE1_),
+ GPIO_FN(CS6B_), \
+ GPIO_FN(DACK0),
+ GPIO_FN(FCE0_), \
+ GPIO_FN(CS6A_),
+ GPIO_FN(WAIT_), \
+ GPIO_FN(DREQ0),
+ GPIO_FN(RD__FSC),
+ GPIO_FN(WE0__FWE), \
+ GPIO_FN(RDWR_FWE),
+ GPIO_FN(WE1_),
+ GPIO_FN(FRB),
+ GPIO_FN(CKO),
+ GPIO_FN(NBRSTOUT_),
+ GPIO_FN(NBRST_),
+ GPIO_FN(BBIF2_TXD),
+ GPIO_FN(BBIF2_RXD),
+ GPIO_FN(BBIF2_SYNC),
+ GPIO_FN(BBIF2_SCK),
+ GPIO_FN(SCIFA3_CTS_), \
+ GPIO_FN(MFG3_IN2),
+ GPIO_FN(SCIFA3_RXD), \
+ GPIO_FN(MFG3_IN1),
+ GPIO_FN(BBIF1_SS2), \
+ GPIO_FN(SCIFA3_RTS_), \
+ GPIO_FN(MFG3_OUT1),
+ GPIO_FN(SCIFA3_TXD),
+ GPIO_FN(HSI_RX_DATA), \
+ GPIO_FN(BBIF1_RXD),
+ GPIO_FN(HSI_TX_WAKE), \
+ GPIO_FN(BBIF1_TSCK),
+ GPIO_FN(HSI_TX_DATA), \
+ GPIO_FN(BBIF1_TSYNC),
+ GPIO_FN(HSI_TX_READY), \
+ GPIO_FN(BBIF1_TXD),
+ GPIO_FN(HSI_RX_READY), \
+ GPIO_FN(BBIF1_RSCK), \
+ GPIO_FN(PORT115_I2C_SCL2), \
+ GPIO_FN(PORT115_I2C_SCL3),
+ GPIO_FN(HSI_RX_WAKE), \
+ GPIO_FN(BBIF1_RSYNC), \
+ GPIO_FN(PORT116_I2C_SDA2), \
+ GPIO_FN(PORT116_I2C_SDA3),
+ GPIO_FN(HSI_RX_FLAG), \
+ GPIO_FN(BBIF1_SS1), \
+ GPIO_FN(BBIF1_FLOW),
+ GPIO_FN(HSI_TX_FLAG),
+ GPIO_FN(VIO_VD), \
+ GPIO_FN(PORT128_LCD2VSYN), \
+ GPIO_FN(VIO2_VD), \
+ GPIO_FN(LCD2D0),
+
+ GPIO_FN(VIO_HD), \
+ GPIO_FN(PORT129_LCD2HSYN), \
+ GPIO_FN(PORT129_LCD2CS_), \
+ GPIO_FN(VIO2_HD), \
+ GPIO_FN(LCD2D1),
+ GPIO_FN(VIO_D0), \
+ GPIO_FN(PORT130_MSIOF2_RXD), \
+ GPIO_FN(LCD2D10),
+ GPIO_FN(VIO_D1), \
+ GPIO_FN(PORT131_KEYOUT6), \
+ GPIO_FN(PORT131_MSIOF2_SS1), \
+ GPIO_FN(PORT131_KEYOUT11), \
+ GPIO_FN(LCD2D11),
+ GPIO_FN(VIO_D2), \
+ GPIO_FN(PORT132_KEYOUT7), \
+ GPIO_FN(PORT132_MSIOF2_SS2), \
+ GPIO_FN(PORT132_KEYOUT10), \
+ GPIO_FN(LCD2D12),
+ GPIO_FN(VIO_D3), \
+ GPIO_FN(MSIOF2_TSYNC), \
+ GPIO_FN(LCD2D13),
+ GPIO_FN(VIO_D4), \
+ GPIO_FN(MSIOF2_TXD), \
+ GPIO_FN(LCD2D14),
+ GPIO_FN(VIO_D5), \
+ GPIO_FN(MSIOF2_TSCK), \
+ GPIO_FN(LCD2D15),
+ GPIO_FN(VIO_D6), \
+ GPIO_FN(PORT136_KEYOUT8), \
+ GPIO_FN(LCD2D16),
+ GPIO_FN(VIO_D7), \
+ GPIO_FN(PORT137_KEYOUT9), \
+ GPIO_FN(LCD2D17),
+ GPIO_FN(VIO_D8), \
+ GPIO_FN(PORT138_KEYOUT8), \
+ GPIO_FN(VIO2_D0), \
+ GPIO_FN(LCD2D6),
+ GPIO_FN(VIO_D9), \
+ GPIO_FN(PORT139_KEYOUT9), \
+ GPIO_FN(VIO2_D1), \
+ GPIO_FN(LCD2D7),
+ GPIO_FN(VIO_D10), \
+ GPIO_FN(TPU0TO2), \
+ GPIO_FN(VIO2_D2), \
+ GPIO_FN(LCD2D8),
+ GPIO_FN(VIO_D11), \
+ GPIO_FN(TPU0TO3), \
+ GPIO_FN(VIO2_D3), \
+ GPIO_FN(LCD2D9),
+ GPIO_FN(VIO_D12), \
+ GPIO_FN(PORT142_KEYOUT10), \
+ GPIO_FN(VIO2_D4), \
+ GPIO_FN(LCD2D2),
+ GPIO_FN(VIO_D13), \
+ GPIO_FN(PORT143_KEYOUT11), \
+ GPIO_FN(PORT143_KEYOUT6), \
+ GPIO_FN(VIO2_D5), \
+ GPIO_FN(LCD2D3),
+ GPIO_FN(VIO_D14), \
+ GPIO_FN(PORT144_KEYOUT7), \
+ GPIO_FN(VIO2_D6), \
+ GPIO_FN(LCD2D4),
+ GPIO_FN(VIO_D15), \
+ GPIO_FN(TPU1TO3), \
+ GPIO_FN(PORT145_LCD2DISP), \
+ GPIO_FN(PORT145_LCD2RS), \
+ GPIO_FN(VIO2_D7), \
+ GPIO_FN(LCD2D5),
+ GPIO_FN(VIO_CLK), \
+ GPIO_FN(LCD2DCK), \
+ GPIO_FN(PORT146_LCD2WR_), \
+ GPIO_FN(VIO2_CLK), \
+ GPIO_FN(LCD2D18),
+ GPIO_FN(VIO_FIELD), \
+ GPIO_FN(LCD2RD_), \
+ GPIO_FN(VIO2_FIELD), \
+ GPIO_FN(LCD2D19),
+ GPIO_FN(VIO_CKO),
+ GPIO_FN(A27), \
+ GPIO_FN(PORT149_RDWR), \
+ GPIO_FN(MFG0_IN1), \
+ GPIO_FN(PORT149_KEYOUT9),
+ GPIO_FN(MFG0_IN2),
+ GPIO_FN(TS_SPSYNC3), \
+ GPIO_FN(MSIOF2_RSCK),
+ GPIO_FN(TS_SDAT3), \
+ GPIO_FN(MSIOF2_RSYNC),
+ GPIO_FN(TPU1TO2), \
+ GPIO_FN(TS_SDEN3), \
+ GPIO_FN(PORT153_MSIOF2_SS1),
+ GPIO_FN(SCIFA2_TXD1), \
+ GPIO_FN(MSIOF2_MCK0),
+ GPIO_FN(SCIFA2_RXD1), \
+ GPIO_FN(MSIOF2_MCK1),
+ GPIO_FN(SCIFA2_RTS1_), \
+ GPIO_FN(PORT156_MSIOF2_SS2),
+ GPIO_FN(SCIFA2_CTS1_), \
+ GPIO_FN(PORT157_MSIOF2_RXD),
+ GPIO_FN(DINT_), \
+ GPIO_FN(SCIFA2_SCK1), \
+ GPIO_FN(TS_SCK3),
+ GPIO_FN(PORT159_SCIFB_SCK), \
+ GPIO_FN(PORT159_SCIFA5_SCK), \
+ GPIO_FN(NMI),
+ GPIO_FN(PORT160_SCIFB_TXD), \
+ GPIO_FN(PORT160_SCIFA5_TXD),
+ GPIO_FN(PORT161_SCIFB_CTS_), \
+ GPIO_FN(PORT161_SCIFA5_CTS_),
+ GPIO_FN(PORT162_SCIFB_RXD), \
+ GPIO_FN(PORT162_SCIFA5_RXD),
+ GPIO_FN(PORT163_SCIFB_RTS_), \
+ GPIO_FN(PORT163_SCIFA5_RTS_), \
+ GPIO_FN(TPU3TO0),
+ GPIO_FN(LCDD0),
+ GPIO_FN(LCDD1), \
+ GPIO_FN(PORT193_SCIFA5_CTS_), \
+ GPIO_FN(BBIF2_TSYNC1),
+ GPIO_FN(LCDD2), \
+ GPIO_FN(PORT194_SCIFA5_RTS_), \
+ GPIO_FN(BBIF2_TSCK1),
+ GPIO_FN(LCDD3), \
+ GPIO_FN(PORT195_SCIFA5_RXD), \
+ GPIO_FN(BBIF2_TXD1),
+ GPIO_FN(LCDD4), \
+ GPIO_FN(PORT196_SCIFA5_TXD),
+ GPIO_FN(LCDD5), \
+ GPIO_FN(PORT197_SCIFA5_SCK), \
+ GPIO_FN(MFG2_OUT2), \
+ GPIO_FN(TPU2TO1),
+ GPIO_FN(LCDD6),
+ GPIO_FN(LCDD7), \
+ GPIO_FN(TPU4TO1), \
+ GPIO_FN(MFG4_OUT2),
+ GPIO_FN(LCDD8), \
+ GPIO_FN(D16),
+ GPIO_FN(LCDD9), \
+ GPIO_FN(D17),
+ GPIO_FN(LCDD10), \
+ GPIO_FN(D18),
+ GPIO_FN(LCDD11), \
+ GPIO_FN(D19),
+ GPIO_FN(LCDD12), \
+ GPIO_FN(D20),
+ GPIO_FN(LCDD13), \
+ GPIO_FN(D21),
+ GPIO_FN(LCDD14), \
+ GPIO_FN(D22),
+ GPIO_FN(LCDD15), \
+ GPIO_FN(PORT207_MSIOF0L_SS1), \
+ GPIO_FN(D23),
+ GPIO_FN(LCDD16), \
+ GPIO_FN(PORT208_MSIOF0L_SS2), \
+ GPIO_FN(D24),
+ GPIO_FN(LCDD17), \
+ GPIO_FN(D25),
+ GPIO_FN(LCDD18), \
+ GPIO_FN(DREQ2), \
+ GPIO_FN(PORT210_MSIOF0L_SS1), \
+ GPIO_FN(D26),
+ GPIO_FN(LCDD19), \
+ GPIO_FN(PORT211_MSIOF0L_SS2), \
+ GPIO_FN(D27),
+ GPIO_FN(LCDD20), \
+ GPIO_FN(TS_SPSYNC1), \
+ GPIO_FN(MSIOF0L_MCK0), \
+ GPIO_FN(D28),
+ GPIO_FN(LCDD21), \
+ GPIO_FN(TS_SDAT1), \
+ GPIO_FN(MSIOF0L_MCK1), \
+ GPIO_FN(D29),
+ GPIO_FN(LCDD22), \
+ GPIO_FN(TS_SDEN1), \
+ GPIO_FN(MSIOF0L_RSCK), \
+ GPIO_FN(D30),
+ GPIO_FN(LCDD23), \
+ GPIO_FN(TS_SCK1), \
+ GPIO_FN(MSIOF0L_RSYNC), \
+ GPIO_FN(D31),
+ GPIO_FN(LCDDCK), \
+ GPIO_FN(LCDWR_),
+ GPIO_FN(LCDRD_), \
+ GPIO_FN(DACK2), \
+ GPIO_FN(PORT217_LCD2RS), \
+ GPIO_FN(MSIOF0L_TSYNC), \
+ GPIO_FN(VIO2_FIELD3), \
+ GPIO_FN(PORT217_LCD2DISP),
+ GPIO_FN(LCDHSYN), \
+ GPIO_FN(LCDCS_), \
+ GPIO_FN(LCDCS2_), \
+ GPIO_FN(DACK3), \
+ GPIO_FN(PORT218_VIO_CKOR),
+ GPIO_FN(LCDDISP), \
+ GPIO_FN(LCDRS), \
+ GPIO_FN(PORT219_LCD2WR_), \
+ GPIO_FN(DREQ3), \
+ GPIO_FN(MSIOF0L_TSCK), \
+ GPIO_FN(VIO2_CLK3), \
+ GPIO_FN(LCD2DCK_2),
+ GPIO_FN(LCDVSYN), \
+ GPIO_FN(LCDVSYN2),
+ GPIO_FN(LCDLCLK), \
+ GPIO_FN(DREQ1), \
+ GPIO_FN(PORT221_LCD2CS_), \
+ GPIO_FN(PWEN), \
+ GPIO_FN(MSIOF0L_RXD), \
+ GPIO_FN(VIO2_HD3), \
+ GPIO_FN(PORT221_LCD2HSYN),
+ GPIO_FN(LCDDON), \
+ GPIO_FN(LCDDON2), \
+ GPIO_FN(DACK1), \
+ GPIO_FN(OVCN), \
+ GPIO_FN(MSIOF0L_TXD), \
+ GPIO_FN(VIO2_VD3), \
+ GPIO_FN(PORT222_LCD2VSYN),
+
+ GPIO_FN(SCIFA1_TXD), \
+ GPIO_FN(OVCN2),
+ GPIO_FN(EXTLP), \
+ GPIO_FN(SCIFA1_SCK), \
+ GPIO_FN(PORT226_VIO_CKO2),
+ GPIO_FN(SCIFA1_RTS_), \
+ GPIO_FN(IDIN),
+ GPIO_FN(SCIFA1_RXD),
+ GPIO_FN(SCIFA1_CTS_), \
+ GPIO_FN(MFG1_IN1),
+ GPIO_FN(MSIOF1_TXD), \
+ GPIO_FN(SCIFA2_TXD2),
+ GPIO_FN(MSIOF1_TSYNC), \
+ GPIO_FN(SCIFA2_CTS2_),
+ GPIO_FN(MSIOF1_TSCK), \
+ GPIO_FN(SCIFA2_SCK2),
+ GPIO_FN(MSIOF1_RXD), \
+ GPIO_FN(SCIFA2_RXD2),
+ GPIO_FN(MSIOF1_RSCK), \
+ GPIO_FN(SCIFA2_RTS2_), \
+ GPIO_FN(VIO2_CLK2), \
+ GPIO_FN(LCD2D20),
+ GPIO_FN(MSIOF1_RSYNC), \
+ GPIO_FN(MFG1_IN2), \
+ GPIO_FN(VIO2_VD2), \
+ GPIO_FN(LCD2D21),
+ GPIO_FN(MSIOF1_MCK0), \
+ GPIO_FN(PORT236_I2C_SDA2),
+ GPIO_FN(MSIOF1_MCK1), \
+ GPIO_FN(PORT237_I2C_SCL2),
+ GPIO_FN(MSIOF1_SS1), \
+ GPIO_FN(VIO2_FIELD2), \
+ GPIO_FN(LCD2D22),
+ GPIO_FN(MSIOF1_SS2), \
+ GPIO_FN(VIO2_HD2), \
+ GPIO_FN(LCD2D23),
+ GPIO_FN(SCIFA6_TXD),
+ GPIO_FN(PORT241_IRDA_OUT), \
+ GPIO_FN(PORT241_IROUT), \
+ GPIO_FN(MFG4_OUT1), \
+ GPIO_FN(TPU4TO0),
+ GPIO_FN(PORT242_IRDA_IN), \
+ GPIO_FN(MFG4_IN2),
+ GPIO_FN(PORT243_IRDA_FIRSEL), \
+ GPIO_FN(PORT243_VIO_CKO2),
+ GPIO_FN(PORT244_SCIFA5_CTS_), \
+ GPIO_FN(MFG2_IN1), \
+ GPIO_FN(PORT244_SCIFB_CTS_), \
+ GPIO_FN(MSIOF2R_RXD),
+ GPIO_FN(PORT245_SCIFA5_RTS_), \
+ GPIO_FN(MFG2_IN2), \
+ GPIO_FN(PORT245_SCIFB_RTS_), \
+ GPIO_FN(MSIOF2R_TXD),
+ GPIO_FN(PORT246_SCIFA5_RXD), \
+ GPIO_FN(MFG1_OUT1), \
+ GPIO_FN(PORT246_SCIFB_RXD), \
+ GPIO_FN(TPU1TO0),
+ GPIO_FN(PORT247_SCIFA5_TXD), \
+ GPIO_FN(MFG3_OUT2), \
+ GPIO_FN(PORT247_SCIFB_TXD), \
+ GPIO_FN(TPU3TO1),
+ GPIO_FN(PORT248_SCIFA5_SCK), \
+ GPIO_FN(MFG2_OUT1), \
+ GPIO_FN(PORT248_SCIFB_SCK), \
+ GPIO_FN(TPU2TO0), \
+ GPIO_FN(PORT248_I2C_SCL3), \
+ GPIO_FN(MSIOF2R_TSCK),
+ GPIO_FN(PORT249_IROUT), \
+ GPIO_FN(MFG4_IN1), \
+ GPIO_FN(PORT249_I2C_SDA3), \
+ GPIO_FN(MSIOF2R_TSYNC),
+ GPIO_FN(SDHICLK0),
+ GPIO_FN(SDHICD0),
+ GPIO_FN(SDHID0_0),
+ GPIO_FN(SDHID0_1),
+ GPIO_FN(SDHID0_2),
+ GPIO_FN(SDHID0_3),
+ GPIO_FN(SDHICMD0),
+ GPIO_FN(SDHIWP0),
+ GPIO_FN(SDHICLK1),
+ GPIO_FN(SDHID1_0), \
+ GPIO_FN(TS_SPSYNC2),
+ GPIO_FN(SDHID1_1), \
+ GPIO_FN(TS_SDAT2),
+ GPIO_FN(SDHID1_2), \
+ GPIO_FN(TS_SDEN2),
+ GPIO_FN(SDHID1_3), \
+ GPIO_FN(TS_SCK2),
+ GPIO_FN(SDHICMD1),
+ GPIO_FN(SDHICLK2),
+ GPIO_FN(SDHID2_0), \
+ GPIO_FN(TS_SPSYNC4),
+ GPIO_FN(SDHID2_1), \
+ GPIO_FN(TS_SDAT4),
+ GPIO_FN(SDHID2_2), \
+ GPIO_FN(TS_SDEN4),
+ GPIO_FN(SDHID2_3), \
+ GPIO_FN(TS_SCK4),
+ GPIO_FN(SDHICMD2),
+ GPIO_FN(MMCCLK0),
+ GPIO_FN(MMCD0_0),
+ GPIO_FN(MMCD0_1),
+ GPIO_FN(MMCD0_2),
+ GPIO_FN(MMCD0_3),
+ GPIO_FN(MMCD0_4), \
+ GPIO_FN(TS_SPSYNC5),
+ GPIO_FN(MMCD0_5), \
+ GPIO_FN(TS_SDAT5),
+ GPIO_FN(MMCD0_6), \
+ GPIO_FN(TS_SDEN5),
+ GPIO_FN(MMCD0_7), \
+ GPIO_FN(TS_SCK5),
+ GPIO_FN(MMCCMD0),
+ GPIO_FN(RESETOUTS_), \
+ GPIO_FN(EXTAL2OUT),
+ GPIO_FN(MCP_WAIT__MCP_FRB),
+ GPIO_FN(MCP_CKO), \
+ GPIO_FN(MMCCLK1),
+ GPIO_FN(MCP_D15_MCP_NAF15),
+ GPIO_FN(MCP_D14_MCP_NAF14),
+ GPIO_FN(MCP_D13_MCP_NAF13),
+ GPIO_FN(MCP_D12_MCP_NAF12),
+ GPIO_FN(MCP_D11_MCP_NAF11),
+ GPIO_FN(MCP_D10_MCP_NAF10),
+ GPIO_FN(MCP_D9_MCP_NAF9),
+ GPIO_FN(MCP_D8_MCP_NAF8), \
+ GPIO_FN(MMCCMD1),
+ GPIO_FN(MCP_D7_MCP_NAF7), \
+ GPIO_FN(MMCD1_7),
+
+ GPIO_FN(MCP_D6_MCP_NAF6), \
+ GPIO_FN(MMCD1_6),
+ GPIO_FN(MCP_D5_MCP_NAF5), \
+ GPIO_FN(MMCD1_5),
+ GPIO_FN(MCP_D4_MCP_NAF4), \
+ GPIO_FN(MMCD1_4),
+ GPIO_FN(MCP_D3_MCP_NAF3), \
+ GPIO_FN(MMCD1_3),
+ GPIO_FN(MCP_D2_MCP_NAF2), \
+ GPIO_FN(MMCD1_2),
+ GPIO_FN(MCP_D1_MCP_NAF1), \
+ GPIO_FN(MMCD1_1),
+ GPIO_FN(MCP_D0_MCP_NAF0), \
+ GPIO_FN(MMCD1_0),
+ GPIO_FN(MCP_NBRSTOUT_),
+ GPIO_FN(MCP_WE0__MCP_FWE), \
+ GPIO_FN(MCP_RDWR_MCP_FWE),
+
+ /* MSEL2 special cases */
+ GPIO_FN(TSIF2_TS_XX1),
+ GPIO_FN(TSIF2_TS_XX2),
+ GPIO_FN(TSIF2_TS_XX3),
+ GPIO_FN(TSIF2_TS_XX4),
+ GPIO_FN(TSIF2_TS_XX5),
+ GPIO_FN(TSIF1_TS_XX1),
+ GPIO_FN(TSIF1_TS_XX2),
+ GPIO_FN(TSIF1_TS_XX3),
+ GPIO_FN(TSIF1_TS_XX4),
+ GPIO_FN(TSIF1_TS_XX5),
+ GPIO_FN(TSIF0_TS_XX1),
+ GPIO_FN(TSIF0_TS_XX2),
+ GPIO_FN(TSIF0_TS_XX3),
+ GPIO_FN(TSIF0_TS_XX4),
+ GPIO_FN(TSIF0_TS_XX5),
+ GPIO_FN(MST1_TS_XX1),
+ GPIO_FN(MST1_TS_XX2),
+ GPIO_FN(MST1_TS_XX3),
+ GPIO_FN(MST1_TS_XX4),
+ GPIO_FN(MST1_TS_XX5),
+ GPIO_FN(MST0_TS_XX1),
+ GPIO_FN(MST0_TS_XX2),
+ GPIO_FN(MST0_TS_XX3),
+ GPIO_FN(MST0_TS_XX4),
+ GPIO_FN(MST0_TS_XX5),
+
+ /* MSEL3 special cases */
+ GPIO_FN(SDHI0_VCCQ_MC0_ON),
+ GPIO_FN(SDHI0_VCCQ_MC0_OFF),
+ GPIO_FN(DEBUG_MON_VIO),
+ GPIO_FN(DEBUG_MON_LCDD),
+ GPIO_FN(LCDC_LCDC0),
+ GPIO_FN(LCDC_LCDC1),
+
+ /* MSEL4 special cases */
+ GPIO_FN(IRQ9_MEM_INT),
+ GPIO_FN(IRQ9_MCP_INT),
+ GPIO_FN(A11),
+ GPIO_FN(KEYOUT8),
+ GPIO_FN(TPU4TO3),
+ GPIO_FN(RESETA_N_PU_ON),
+ GPIO_FN(RESETA_N_PU_OFF),
+ GPIO_FN(EDBGREQ_PD),
+ GPIO_FN(EDBGREQ_PU),
+};
+
+#define PORTCR(nr, reg) \
+ { PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
+ 0, \
+ /*0001*/ PORT##nr##_OUT , \
+ /*0010*/ PORT##nr##_IN , 0, 0, 0, 0, 0, 0, 0, \
+ /*1010*/ PORT##nr##_IN_PD, 0, 0, 0, \
+ /*1110*/ PORT##nr##_IN_PU, 0, \
+ PORT##nr##_FN0, PORT##nr##_FN1, PORT##nr##_FN2, \
+ PORT##nr##_FN3, PORT##nr##_FN4, PORT##nr##_FN5, \
+ PORT##nr##_FN6, PORT##nr##_FN7, 0, 0, 0, 0, 0, 0, 0, 0 } \
+ }
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ PORTCR(0, 0xe6050000), /* PORT0CR */
+ PORTCR(1, 0xe6050001), /* PORT1CR */
+ PORTCR(2, 0xe6050002), /* PORT2CR */
+ PORTCR(3, 0xe6050003), /* PORT3CR */
+ PORTCR(4, 0xe6050004), /* PORT4CR */
+ PORTCR(5, 0xe6050005), /* PORT5CR */
+ PORTCR(6, 0xe6050006), /* PORT6CR */
+ PORTCR(7, 0xe6050007), /* PORT7CR */
+ PORTCR(8, 0xe6050008), /* PORT8CR */
+ PORTCR(9, 0xe6050009), /* PORT9CR */
+
+ PORTCR(10, 0xe605000a), /* PORT10CR */
+ PORTCR(11, 0xe605000b), /* PORT11CR */
+ PORTCR(12, 0xe605000c), /* PORT12CR */
+ PORTCR(13, 0xe605000d), /* PORT13CR */
+ PORTCR(14, 0xe605000e), /* PORT14CR */
+ PORTCR(15, 0xe605000f), /* PORT15CR */
+ PORTCR(16, 0xe6050010), /* PORT16CR */
+ PORTCR(17, 0xe6050011), /* PORT17CR */
+ PORTCR(18, 0xe6050012), /* PORT18CR */
+ PORTCR(19, 0xe6050013), /* PORT19CR */
+
+ PORTCR(20, 0xe6050014), /* PORT20CR */
+ PORTCR(21, 0xe6050015), /* PORT21CR */
+ PORTCR(22, 0xe6050016), /* PORT22CR */
+ PORTCR(23, 0xe6050017), /* PORT23CR */
+ PORTCR(24, 0xe6050018), /* PORT24CR */
+ PORTCR(25, 0xe6050019), /* PORT25CR */
+ PORTCR(26, 0xe605001a), /* PORT26CR */
+ PORTCR(27, 0xe605001b), /* PORT27CR */
+ PORTCR(28, 0xe605001c), /* PORT28CR */
+ PORTCR(29, 0xe605001d), /* PORT29CR */
+
+ PORTCR(30, 0xe605001e), /* PORT30CR */
+ PORTCR(31, 0xe605001f), /* PORT31CR */
+ PORTCR(32, 0xe6051020), /* PORT32CR */
+ PORTCR(33, 0xe6051021), /* PORT33CR */
+ PORTCR(34, 0xe6051022), /* PORT34CR */
+ PORTCR(35, 0xe6051023), /* PORT35CR */
+ PORTCR(36, 0xe6051024), /* PORT36CR */
+ PORTCR(37, 0xe6051025), /* PORT37CR */
+ PORTCR(38, 0xe6051026), /* PORT38CR */
+ PORTCR(39, 0xe6051027), /* PORT39CR */
+
+ PORTCR(40, 0xe6051028), /* PORT40CR */
+ PORTCR(41, 0xe6051029), /* PORT41CR */
+ PORTCR(42, 0xe605102a), /* PORT42CR */
+ PORTCR(43, 0xe605102b), /* PORT43CR */
+ PORTCR(44, 0xe605102c), /* PORT44CR */
+ PORTCR(45, 0xe605102d), /* PORT45CR */
+ PORTCR(46, 0xe605102e), /* PORT46CR */
+ PORTCR(47, 0xe605102f), /* PORT47CR */
+ PORTCR(48, 0xe6051030), /* PORT48CR */
+ PORTCR(49, 0xe6051031), /* PORT49CR */
+
+ PORTCR(50, 0xe6051032), /* PORT50CR */
+ PORTCR(51, 0xe6051033), /* PORT51CR */
+ PORTCR(52, 0xe6051034), /* PORT52CR */
+ PORTCR(53, 0xe6051035), /* PORT53CR */
+ PORTCR(54, 0xe6051036), /* PORT54CR */
+ PORTCR(55, 0xe6051037), /* PORT55CR */
+ PORTCR(56, 0xe6051038), /* PORT56CR */
+ PORTCR(57, 0xe6051039), /* PORT57CR */
+ PORTCR(58, 0xe605103a), /* PORT58CR */
+ PORTCR(59, 0xe605103b), /* PORT59CR */
+
+ PORTCR(60, 0xe605103c), /* PORT60CR */
+ PORTCR(61, 0xe605103d), /* PORT61CR */
+ PORTCR(62, 0xe605103e), /* PORT62CR */
+ PORTCR(63, 0xe605103f), /* PORT63CR */
+ PORTCR(64, 0xe6051040), /* PORT64CR */
+ PORTCR(65, 0xe6051041), /* PORT65CR */
+ PORTCR(66, 0xe6051042), /* PORT66CR */
+ PORTCR(67, 0xe6051043), /* PORT67CR */
+ PORTCR(68, 0xe6051044), /* PORT68CR */
+ PORTCR(69, 0xe6051045), /* PORT69CR */
+
+ PORTCR(70, 0xe6051046), /* PORT70CR */
+ PORTCR(71, 0xe6051047), /* PORT71CR */
+ PORTCR(72, 0xe6051048), /* PORT72CR */
+ PORTCR(73, 0xe6051049), /* PORT73CR */
+ PORTCR(74, 0xe605104a), /* PORT74CR */
+ PORTCR(75, 0xe605104b), /* PORT75CR */
+ PORTCR(76, 0xe605104c), /* PORT76CR */
+ PORTCR(77, 0xe605104d), /* PORT77CR */
+ PORTCR(78, 0xe605104e), /* PORT78CR */
+ PORTCR(79, 0xe605104f), /* PORT79CR */
+
+ PORTCR(80, 0xe6051050), /* PORT80CR */
+ PORTCR(81, 0xe6051051), /* PORT81CR */
+ PORTCR(82, 0xe6051052), /* PORT82CR */
+ PORTCR(83, 0xe6051053), /* PORT83CR */
+ PORTCR(84, 0xe6051054), /* PORT84CR */
+ PORTCR(85, 0xe6051055), /* PORT85CR */
+ PORTCR(86, 0xe6051056), /* PORT86CR */
+ PORTCR(87, 0xe6051057), /* PORT87CR */
+ PORTCR(88, 0xe6051058), /* PORT88CR */
+ PORTCR(89, 0xe6051059), /* PORT89CR */
+
+ PORTCR(90, 0xe605105a), /* PORT90CR */
+ PORTCR(91, 0xe605105b), /* PORT91CR */
+ PORTCR(92, 0xe605105c), /* PORT92CR */
+ PORTCR(93, 0xe605105d), /* PORT93CR */
+ PORTCR(94, 0xe605105e), /* PORT94CR */
+ PORTCR(95, 0xe605105f), /* PORT95CR */
+ PORTCR(96, 0xe6052060), /* PORT96CR */
+ PORTCR(97, 0xe6052061), /* PORT97CR */
+ PORTCR(98, 0xe6052062), /* PORT98CR */
+ PORTCR(99, 0xe6052063), /* PORT99CR */
+
+ PORTCR(100, 0xe6052064), /* PORT100CR */
+ PORTCR(101, 0xe6052065), /* PORT101CR */
+ PORTCR(102, 0xe6052066), /* PORT102CR */
+ PORTCR(103, 0xe6052067), /* PORT103CR */
+ PORTCR(104, 0xe6052068), /* PORT104CR */
+ PORTCR(105, 0xe6052069), /* PORT105CR */
+ PORTCR(106, 0xe605206a), /* PORT106CR */
+ PORTCR(107, 0xe605206b), /* PORT107CR */
+ PORTCR(108, 0xe605206c), /* PORT108CR */
+ PORTCR(109, 0xe605206d), /* PORT109CR */
+
+ PORTCR(110, 0xe605206e), /* PORT110CR */
+ PORTCR(111, 0xe605206f), /* PORT111CR */
+ PORTCR(112, 0xe6052070), /* PORT112CR */
+ PORTCR(113, 0xe6052071), /* PORT113CR */
+ PORTCR(114, 0xe6052072), /* PORT114CR */
+ PORTCR(115, 0xe6052073), /* PORT115CR */
+ PORTCR(116, 0xe6052074), /* PORT116CR */
+ PORTCR(117, 0xe6052075), /* PORT117CR */
+ PORTCR(118, 0xe6052076), /* PORT118CR */
+
+ PORTCR(128, 0xe6052080), /* PORT128CR */
+ PORTCR(129, 0xe6052081), /* PORT129CR */
+
+ PORTCR(130, 0xe6052082), /* PORT130CR */
+ PORTCR(131, 0xe6052083), /* PORT131CR */
+ PORTCR(132, 0xe6052084), /* PORT132CR */
+ PORTCR(133, 0xe6052085), /* PORT133CR */
+ PORTCR(134, 0xe6052086), /* PORT134CR */
+ PORTCR(135, 0xe6052087), /* PORT135CR */
+ PORTCR(136, 0xe6052088), /* PORT136CR */
+ PORTCR(137, 0xe6052089), /* PORT137CR */
+ PORTCR(138, 0xe605208a), /* PORT138CR */
+ PORTCR(139, 0xe605208b), /* PORT139CR */
+
+ PORTCR(140, 0xe605208c), /* PORT140CR */
+ PORTCR(141, 0xe605208d), /* PORT141CR */
+ PORTCR(142, 0xe605208e), /* PORT142CR */
+ PORTCR(143, 0xe605208f), /* PORT143CR */
+ PORTCR(144, 0xe6052090), /* PORT144CR */
+ PORTCR(145, 0xe6052091), /* PORT145CR */
+ PORTCR(146, 0xe6052092), /* PORT146CR */
+ PORTCR(147, 0xe6052093), /* PORT147CR */
+ PORTCR(148, 0xe6052094), /* PORT148CR */
+ PORTCR(149, 0xe6052095), /* PORT149CR */
+
+ PORTCR(150, 0xe6052096), /* PORT150CR */
+ PORTCR(151, 0xe6052097), /* PORT151CR */
+ PORTCR(152, 0xe6052098), /* PORT152CR */
+ PORTCR(153, 0xe6052099), /* PORT153CR */
+ PORTCR(154, 0xe605209a), /* PORT154CR */
+ PORTCR(155, 0xe605209b), /* PORT155CR */
+ PORTCR(156, 0xe605209c), /* PORT156CR */
+ PORTCR(157, 0xe605209d), /* PORT157CR */
+ PORTCR(158, 0xe605209e), /* PORT158CR */
+ PORTCR(159, 0xe605209f), /* PORT159CR */
+
+ PORTCR(160, 0xe60520a0), /* PORT160CR */
+ PORTCR(161, 0xe60520a1), /* PORT161CR */
+ PORTCR(162, 0xe60520a2), /* PORT162CR */
+ PORTCR(163, 0xe60520a3), /* PORT163CR */
+ PORTCR(164, 0xe60520a4), /* PORT164CR */
+
+ PORTCR(192, 0xe60520c0), /* PORT192CR */
+ PORTCR(193, 0xe60520c1), /* PORT193CR */
+ PORTCR(194, 0xe60520c2), /* PORT194CR */
+ PORTCR(195, 0xe60520c3), /* PORT195CR */
+ PORTCR(196, 0xe60520c4), /* PORT196CR */
+ PORTCR(197, 0xe60520c5), /* PORT197CR */
+ PORTCR(198, 0xe60520c6), /* PORT198CR */
+ PORTCR(199, 0xe60520c7), /* PORT199CR */
+
+ PORTCR(200, 0xe60520c8), /* PORT200CR */
+ PORTCR(201, 0xe60520c9), /* PORT201CR */
+ PORTCR(202, 0xe60520ca), /* PORT202CR */
+ PORTCR(203, 0xe60520cb), /* PORT203CR */
+ PORTCR(204, 0xe60520cc), /* PORT204CR */
+ PORTCR(205, 0xe60520cd), /* PORT205CR */
+ PORTCR(206, 0xe60520ce), /* PORT206CR */
+ PORTCR(207, 0xe60520cf), /* PORT207CR */
+ PORTCR(208, 0xe60520d0), /* PORT208CR */
+ PORTCR(209, 0xe60520d1), /* PORT209CR */
+
+ PORTCR(210, 0xe60520d2), /* PORT210CR */
+ PORTCR(211, 0xe60520d3), /* PORT211CR */
+ PORTCR(212, 0xe60520d4), /* PORT212CR */
+ PORTCR(213, 0xe60520d5), /* PORT213CR */
+ PORTCR(214, 0xe60520d6), /* PORT214CR */
+ PORTCR(215, 0xe60520d7), /* PORT215CR */
+ PORTCR(216, 0xe60520d8), /* PORT216CR */
+ PORTCR(217, 0xe60520d9), /* PORT217CR */
+ PORTCR(218, 0xe60520da), /* PORT218CR */
+ PORTCR(219, 0xe60520db), /* PORT219CR */
+
+ PORTCR(220, 0xe60520dc), /* PORT220CR */
+ PORTCR(221, 0xe60520dd), /* PORT221CR */
+ PORTCR(222, 0xe60520de), /* PORT222CR */
+ PORTCR(223, 0xe60520df), /* PORT223CR */
+ PORTCR(224, 0xe60530e0), /* PORT224CR */
+ PORTCR(225, 0xe60530e1), /* PORT225CR */
+ PORTCR(226, 0xe60530e2), /* PORT226CR */
+ PORTCR(227, 0xe60530e3), /* PORT227CR */
+ PORTCR(228, 0xe60530e4), /* PORT228CR */
+ PORTCR(229, 0xe60530e5), /* PORT229CR */
+
+ PORTCR(230, 0xe60530e6), /* PORT230CR */
+ PORTCR(231, 0xe60530e7), /* PORT231CR */
+ PORTCR(232, 0xe60530e8), /* PORT232CR */
+ PORTCR(233, 0xe60530e9), /* PORT233CR */
+ PORTCR(234, 0xe60530ea), /* PORT234CR */
+ PORTCR(235, 0xe60530eb), /* PORT235CR */
+ PORTCR(236, 0xe60530ec), /* PORT236CR */
+ PORTCR(237, 0xe60530ed), /* PORT237CR */
+ PORTCR(238, 0xe60530ee), /* PORT238CR */
+ PORTCR(239, 0xe60530ef), /* PORT239CR */
+
+ PORTCR(240, 0xe60530f0), /* PORT240CR */
+ PORTCR(241, 0xe60530f1), /* PORT241CR */
+ PORTCR(242, 0xe60530f2), /* PORT242CR */
+ PORTCR(243, 0xe60530f3), /* PORT243CR */
+ PORTCR(244, 0xe60530f4), /* PORT244CR */
+ PORTCR(245, 0xe60530f5), /* PORT245CR */
+ PORTCR(246, 0xe60530f6), /* PORT246CR */
+ PORTCR(247, 0xe60530f7), /* PORT247CR */
+ PORTCR(248, 0xe60530f8), /* PORT248CR */
+ PORTCR(249, 0xe60530f9), /* PORT249CR */
+
+ PORTCR(250, 0xe60530fa), /* PORT250CR */
+ PORTCR(251, 0xe60530fb), /* PORT251CR */
+ PORTCR(252, 0xe60530fc), /* PORT252CR */
+ PORTCR(253, 0xe60530fd), /* PORT253CR */
+ PORTCR(254, 0xe60530fe), /* PORT254CR */
+ PORTCR(255, 0xe60530ff), /* PORT255CR */
+ PORTCR(256, 0xe6053100), /* PORT256CR */
+ PORTCR(257, 0xe6053101), /* PORT257CR */
+ PORTCR(258, 0xe6053102), /* PORT258CR */
+ PORTCR(259, 0xe6053103), /* PORT259CR */
+
+ PORTCR(260, 0xe6053104), /* PORT260CR */
+ PORTCR(261, 0xe6053105), /* PORT261CR */
+ PORTCR(262, 0xe6053106), /* PORT262CR */
+ PORTCR(263, 0xe6053107), /* PORT263CR */
+ PORTCR(264, 0xe6053108), /* PORT264CR */
+ PORTCR(265, 0xe6053109), /* PORT265CR */
+ PORTCR(266, 0xe605310a), /* PORT266CR */
+ PORTCR(267, 0xe605310b), /* PORT267CR */
+ PORTCR(268, 0xe605310c), /* PORT268CR */
+ PORTCR(269, 0xe605310d), /* PORT269CR */
+
+ PORTCR(270, 0xe605310e), /* PORT270CR */
+ PORTCR(271, 0xe605310f), /* PORT271CR */
+ PORTCR(272, 0xe6053110), /* PORT272CR */
+ PORTCR(273, 0xe6053111), /* PORT273CR */
+ PORTCR(274, 0xe6053112), /* PORT274CR */
+ PORTCR(275, 0xe6053113), /* PORT275CR */
+ PORTCR(276, 0xe6053114), /* PORT276CR */
+ PORTCR(277, 0xe6053115), /* PORT277CR */
+ PORTCR(278, 0xe6053116), /* PORT278CR */
+ PORTCR(279, 0xe6053117), /* PORT279CR */
+
+ PORTCR(280, 0xe6053118), /* PORT280CR */
+ PORTCR(281, 0xe6053119), /* PORT281CR */
+ PORTCR(282, 0xe605311a), /* PORT282CR */
+
+ PORTCR(288, 0xe6052120), /* PORT288CR */
+ PORTCR(289, 0xe6052121), /* PORT289CR */
+
+ PORTCR(290, 0xe6052122), /* PORT290CR */
+ PORTCR(291, 0xe6052123), /* PORT291CR */
+ PORTCR(292, 0xe6052124), /* PORT292CR */
+ PORTCR(293, 0xe6052125), /* PORT293CR */
+ PORTCR(294, 0xe6052126), /* PORT294CR */
+ PORTCR(295, 0xe6052127), /* PORT295CR */
+ PORTCR(296, 0xe6052128), /* PORT296CR */
+ PORTCR(297, 0xe6052129), /* PORT297CR */
+ PORTCR(298, 0xe605212a), /* PORT298CR */
+ PORTCR(299, 0xe605212b), /* PORT299CR */
+
+ PORTCR(300, 0xe605212c), /* PORT300CR */
+ PORTCR(301, 0xe605212d), /* PORT301CR */
+ PORTCR(302, 0xe605212e), /* PORT302CR */
+ PORTCR(303, 0xe605212f), /* PORT303CR */
+ PORTCR(304, 0xe6052130), /* PORT304CR */
+ PORTCR(305, 0xe6052131), /* PORT305CR */
+ PORTCR(306, 0xe6052132), /* PORT306CR */
+ PORTCR(307, 0xe6052133), /* PORT307CR */
+ PORTCR(308, 0xe6052134), /* PORT308CR */
+ PORTCR(309, 0xe6052135), /* PORT309CR */
+
+ { PINMUX_CFG_REG("MSEL2CR", 0xe605801c, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1,
+ MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1,
+ MSEL2CR_MSEL17_0, MSEL2CR_MSEL17_1,
+ MSEL2CR_MSEL16_0, MSEL2CR_MSEL16_1,
+ 0, 0,
+ MSEL2CR_MSEL14_0, MSEL2CR_MSEL14_1,
+ MSEL2CR_MSEL13_0, MSEL2CR_MSEL13_1,
+ MSEL2CR_MSEL12_0, MSEL2CR_MSEL12_1,
+ MSEL2CR_MSEL11_0, MSEL2CR_MSEL11_1,
+ MSEL2CR_MSEL10_0, MSEL2CR_MSEL10_1,
+ MSEL2CR_MSEL9_0, MSEL2CR_MSEL9_1,
+ MSEL2CR_MSEL8_0, MSEL2CR_MSEL8_1,
+ MSEL2CR_MSEL7_0, MSEL2CR_MSEL7_1,
+ MSEL2CR_MSEL6_0, MSEL2CR_MSEL6_1,
+ MSEL2CR_MSEL5_0, MSEL2CR_MSEL5_1,
+ MSEL2CR_MSEL4_0, MSEL2CR_MSEL4_1,
+ MSEL2CR_MSEL3_0, MSEL2CR_MSEL3_1,
+ MSEL2CR_MSEL2_0, MSEL2CR_MSEL2_1,
+ MSEL2CR_MSEL1_0, MSEL2CR_MSEL1_1,
+ MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL28_0, MSEL3CR_MSEL28_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL15_0, MSEL3CR_MSEL15_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL11_0, MSEL3CR_MSEL11_1,
+ 0, 0,
+ MSEL3CR_MSEL9_0, MSEL3CR_MSEL9_1,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL6_0, MSEL3CR_MSEL6_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1,
+ 0, 0,
+ 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1) {
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1,
+ 0, 0,
+ MSEL4CR_MSEL27_0, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0, MSEL4CR_MSEL26_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL22_0, MSEL4CR_MSEL22_1,
+ MSEL4CR_MSEL21_0, MSEL4CR_MSEL21_1,
+ MSEL4CR_MSEL20_0, MSEL4CR_MSEL20_1,
+ MSEL4CR_MSEL19_0, MSEL4CR_MSEL19_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL15_0, MSEL4CR_MSEL15_1,
+ 0, 0,
+ MSEL4CR_MSEL13_0, MSEL4CR_MSEL13_1,
+ MSEL4CR_MSEL12_0, MSEL4CR_MSEL12_1,
+ MSEL4CR_MSEL11_0, MSEL4CR_MSEL11_1,
+ MSEL4CR_MSEL10_0, MSEL4CR_MSEL10_1,
+ MSEL4CR_MSEL9_0, MSEL4CR_MSEL9_1,
+ MSEL4CR_MSEL8_0, MSEL4CR_MSEL8_1,
+ MSEL4CR_MSEL7_0, MSEL4CR_MSEL7_1,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL4_0, MSEL4CR_MSEL4_1,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1,
+ 0, 0,
+ }
+ },
+ { },
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32) {
+ PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
+ PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
+ PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
+ PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA,
+ PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
+ PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
+ PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
+ PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA }
+ },
+ { PINMUX_DATA_REG("PORTD063_032DR", 0xe6055000, 32) {
+ PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA,
+ PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA,
+ PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA,
+ PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA,
+ PORT47_DATA, PORT46_DATA, PORT45_DATA, PORT44_DATA,
+ PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA,
+ PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
+ PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA }
+ },
+ { PINMUX_DATA_REG("PORTD095_064DR", 0xe6055004, 32) {
+ PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
+ PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
+ PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA,
+ PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA,
+ PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
+ PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
+ PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
+ PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR127_096DR", 0xe6056000, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, PORT118_DATA, PORT117_DATA, PORT116_DATA,
+ PORT115_DATA, PORT114_DATA, PORT113_DATA, PORT112_DATA,
+ PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
+ PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
+ PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
+ PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR159_128DR", 0xe6056004, 32) {
+ PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA,
+ PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA,
+ PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA,
+ PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA,
+ PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA,
+ PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA,
+ PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA,
+ PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056008, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PORT164_DATA,
+ PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR223_192DR", 0xe605600C, 32) {
+ PORT223_DATA, PORT222_DATA, PORT221_DATA, PORT220_DATA,
+ PORT219_DATA, PORT218_DATA, PORT217_DATA, PORT216_DATA,
+ PORT215_DATA, PORT214_DATA, PORT213_DATA, PORT212_DATA,
+ PORT211_DATA, PORT210_DATA, PORT209_DATA, PORT208_DATA,
+ PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA,
+ PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA,
+ PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA,
+ PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA }
+ },
+ { PINMUX_DATA_REG("PORTU255_224DR", 0xe6057000, 32) {
+ PORT255_DATA, PORT254_DATA, PORT253_DATA, PORT252_DATA,
+ PORT251_DATA, PORT250_DATA, PORT249_DATA, PORT248_DATA,
+ PORT247_DATA, PORT246_DATA, PORT245_DATA, PORT244_DATA,
+ PORT243_DATA, PORT242_DATA, PORT241_DATA, PORT240_DATA,
+ PORT239_DATA, PORT238_DATA, PORT237_DATA, PORT236_DATA,
+ PORT235_DATA, PORT234_DATA, PORT233_DATA, PORT232_DATA,
+ PORT231_DATA, PORT230_DATA, PORT229_DATA, PORT228_DATA,
+ PORT227_DATA, PORT226_DATA, PORT225_DATA, PORT224_DATA }
+ },
+ { PINMUX_DATA_REG("PORTU287_256DR", 0xe6057004, 32) {
+ 0, 0, 0, 0,
+ 0, PORT282_DATA, PORT281_DATA, PORT280_DATA,
+ PORT279_DATA, PORT278_DATA, PORT277_DATA, PORT276_DATA,
+ PORT275_DATA, PORT274_DATA, PORT273_DATA, PORT272_DATA,
+ PORT271_DATA, PORT270_DATA, PORT269_DATA, PORT268_DATA,
+ PORT267_DATA, PORT266_DATA, PORT265_DATA, PORT264_DATA,
+ PORT263_DATA, PORT262_DATA, PORT261_DATA, PORT260_DATA,
+ PORT259_DATA, PORT258_DATA, PORT257_DATA, PORT256_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR319_288DR", 0xe6056010, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, PORT309_DATA, PORT308_DATA,
+ PORT307_DATA, PORT306_DATA, PORT305_DATA, PORT304_DATA,
+ PORT303_DATA, PORT302_DATA, PORT301_DATA, PORT300_DATA,
+ PORT299_DATA, PORT298_DATA, PORT297_DATA, PORT296_DATA,
+ PORT295_DATA, PORT294_DATA, PORT293_DATA, PORT292_DATA,
+ PORT291_DATA, PORT290_DATA, PORT289_DATA, PORT288_DATA }
+ },
+ { },
+};
+
+static struct pinmux_info sh73a0_pinmux_info = {
+ .name = "sh73a0_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PORT0,
+ .last_gpio = GPIO_FN_EDBGREQ_PU,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+void sh73a0_pinmux_init(void)
+{
+ register_pinmux(&sh73a0_pinmux_info);
+}
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 564a6d0be473..2e3e11ee7c43 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -416,6 +416,16 @@ static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
.addr = 0xe6870030,
.chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xce,
+ }, {
+ .slave_id = SHDMA_SLAVE_MMCIF_TX,
+ .addr = 0xe6bd0034,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xd1,
+ }, {
+ .slave_id = SHDMA_SLAVE_MMCIF_RX,
+ .addr = 0xe6bd0034,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xd2,
},
};
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
new file mode 100644
index 000000000000..53f1ea66efbc
--- /dev/null
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -0,0 +1,349 @@
+/*
+ * sh73a0 processor support
+ *
+ * Copyright (C) 2010 Takashi Yoshii
+ * Copyright (C) 2010 Magnus Damm
+ * Copyright (C) 2008 Yoshihiro Shimoda
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_intc.h>
+#include <linux/sh_timer.h>
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xe6c40000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { gic_spi(72), gic_spi(72),
+ gic_spi(72), gic_spi(72) },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xe6c50000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { gic_spi(73), gic_spi(73),
+ gic_spi(73), gic_spi(73) },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xe6c60000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { gic_spi(74), gic_spi(74),
+ gic_spi(74), gic_spi(74) },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xe6c70000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { gic_spi(75), gic_spi(75),
+ gic_spi(75), gic_spi(75) },
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xe6c80000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { gic_spi(78), gic_spi(78),
+ gic_spi(78), gic_spi(78) },
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xe6cb0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { gic_spi(79), gic_spi(79),
+ gic_spi(79), gic_spi(79) },
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct plat_sci_port scif6_platform_data = {
+ .mapbase = 0xe6cc0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { gic_spi(156), gic_spi(156),
+ gic_spi(156), gic_spi(156) },
+};
+
+static struct platform_device scif6_device = {
+ .name = "sh-sci",
+ .id = 6,
+ .dev = {
+ .platform_data = &scif6_platform_data,
+ },
+};
+
+static struct plat_sci_port scif7_platform_data = {
+ .mapbase = 0xe6cd0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { gic_spi(143), gic_spi(143),
+ gic_spi(143), gic_spi(143) },
+};
+
+static struct platform_device scif7_device = {
+ .name = "sh-sci",
+ .id = 7,
+ .dev = {
+ .platform_data = &scif7_platform_data,
+ },
+};
+
+static struct plat_sci_port scif8_platform_data = {
+ .mapbase = 0xe6c30000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFB,
+ .irqs = { gic_spi(80), gic_spi(80),
+ gic_spi(80), gic_spi(80) },
+};
+
+static struct platform_device scif8_device = {
+ .name = "sh-sci",
+ .id = 8,
+ .dev = {
+ .platform_data = &scif8_platform_data,
+ },
+};
+
+static struct sh_timer_config cmt10_platform_data = {
+ .name = "CMT10",
+ .channel_offset = 0x10,
+ .timer_bit = 0,
+ .clockevent_rating = 125,
+ .clocksource_rating = 125,
+};
+
+static struct resource cmt10_resources[] = {
+ [0] = {
+ .name = "CMT10",
+ .start = 0xe6138010,
+ .end = 0xe613801b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(65),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt10_device = {
+ .name = "sh_cmt",
+ .id = 10,
+ .dev = {
+ .platform_data = &cmt10_platform_data,
+ },
+ .resource = cmt10_resources,
+ .num_resources = ARRAY_SIZE(cmt10_resources),
+};
+
+static struct resource i2c0_resources[] = {
+ [0] = {
+ .name = "IIC0",
+ .start = 0xe6820000,
+ .end = 0xe6820425 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(167),
+ .end = gic_spi(170),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource i2c1_resources[] = {
+ [0] = {
+ .name = "IIC1",
+ .start = 0xe6822000,
+ .end = 0xe6822425 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(51),
+ .end = gic_spi(54),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource i2c2_resources[] = {
+ [0] = {
+ .name = "IIC2",
+ .start = 0xe6824000,
+ .end = 0xe6824425 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(171),
+ .end = gic_spi(174),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource i2c3_resources[] = {
+ [0] = {
+ .name = "IIC3",
+ .start = 0xe6826000,
+ .end = 0xe6826425 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(183),
+ .end = gic_spi(186),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource i2c4_resources[] = {
+ [0] = {
+ .name = "IIC4",
+ .start = 0xe6828000,
+ .end = 0xe6828425 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(187),
+ .end = gic_spi(190),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device i2c0_device = {
+ .name = "i2c-sh_mobile",
+ .id = 0,
+ .resource = i2c0_resources,
+ .num_resources = ARRAY_SIZE(i2c0_resources),
+};
+
+static struct platform_device i2c1_device = {
+ .name = "i2c-sh_mobile",
+ .id = 1,
+ .resource = i2c1_resources,
+ .num_resources = ARRAY_SIZE(i2c1_resources),
+};
+
+static struct platform_device i2c2_device = {
+ .name = "i2c-sh_mobile",
+ .id = 2,
+ .resource = i2c2_resources,
+ .num_resources = ARRAY_SIZE(i2c2_resources),
+};
+
+static struct platform_device i2c3_device = {
+ .name = "i2c-sh_mobile",
+ .id = 3,
+ .resource = i2c3_resources,
+ .num_resources = ARRAY_SIZE(i2c3_resources),
+};
+
+static struct platform_device i2c4_device = {
+ .name = "i2c-sh_mobile",
+ .id = 4,
+ .resource = i2c4_resources,
+ .num_resources = ARRAY_SIZE(i2c4_resources),
+};
+
+static struct platform_device *sh73a0_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &scif8_device,
+ &cmt10_device,
+};
+
+static struct platform_device *sh73a0_late_devices[] __initdata = {
+ &i2c0_device,
+ &i2c1_device,
+ &i2c2_device,
+ &i2c3_device,
+ &i2c4_device,
+};
+
+void __init sh73a0_add_standard_devices(void)
+{
+ platform_add_devices(sh73a0_early_devices,
+ ARRAY_SIZE(sh73a0_early_devices));
+ platform_add_devices(sh73a0_late_devices,
+ ARRAY_SIZE(sh73a0_late_devices));
+}
+
+void __init sh73a0_add_early_devices(void)
+{
+ early_platform_add_devices(sh73a0_early_devices,
+ ARRAY_SIZE(sh73a0_early_devices));
+}
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index 73fb1a551ec6..608a1372b172 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -75,14 +75,14 @@ void __init ux500_init_irq(void)
static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask)
{
/* wait for the operation to complete */
- while (readl(reg) & mask)
+ while (readl_relaxed(reg) & mask)
;
}
static inline void ux500_cache_sync(void)
{
void __iomem *base = __io_address(UX500_L2CC_BASE);
- writel(0, base + L2X0_CACHE_SYNC);
+ writel_relaxed(0, base + L2X0_CACHE_SYNC);
ux500_cache_wait(base + L2X0_CACHE_SYNC, 1);
}
@@ -107,7 +107,7 @@ static void ux500_l2x0_inv_all(void)
uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */
/* invalidate all ways */
- writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
+ writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
ux500_cache_sync();
}
diff --git a/arch/arm/mach-versatile/include/mach/vmalloc.h b/arch/arm/mach-versatile/include/mach/vmalloc.h
index ebd8a2543d3b..7d8e069ad51b 100644
--- a/arch/arm/mach-versatile/include/mach/vmalloc.h
+++ b/arch/arm/mach-versatile/include/mach/vmalloc.h
@@ -18,4 +18,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xd8000000
+#define VMALLOC_END 0xd8000000UL
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 4414a01e1e8a..8206842ddd9e 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -772,7 +772,7 @@ config CACHE_L2X0
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \
ARCH_NOMADIK || ARCH_OMAP4 || ARCH_S5PV310 || ARCH_TEGRA || \
- ARCH_U8500 || ARCH_VEXPRESS_CA9X4
+ ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE
default y
select OUTER_CACHE
select OUTER_CACHE_SYNC
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 17e7b0b57e49..55c17a6fb22f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -206,8 +206,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
*/
if (pfn_valid(pfn)) {
printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
- KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
- KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n");
+ "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
+ "will fail in the next kernel release. Please fix your driver.\n");
WARN_ON(1);
}
diff --git a/arch/arm/plat-mxc/devices/platform-imx-dma.c b/arch/arm/plat-mxc/devices/platform-imx-dma.c
index 02d989018059..3a705c7877dd 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-dma.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-dma.c
@@ -12,15 +12,7 @@
#include <mach/hardware.h>
#include <mach/devices-common.h>
-#ifdef SDMA_IS_MERGED
#include <mach/sdma.h>
-#else
-struct sdma_platform_data {
- int sdma_version;
- char *cpu_name;
- int to_version;
-};
-#endif
struct imx_imx_sdma_data {
resource_size_t iobase;
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c
index e48340ec331e..17f724c9452d 100644
--- a/arch/arm/plat-mxc/devices/platform-spi_imx.c
+++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c
@@ -27,6 +27,7 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = {
imx_spi_imx_data_entry(MX21, CSPI, "imx21-cspi", _id, _hwid, SZ_4K)
imx21_cspi_data_entry(0, 1),
imx21_cspi_data_entry(1, 2),
+};
#endif
#ifdef CONFIG_ARCH_MX25
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index aedf9c1d645e..63cdc6025bd7 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2008 STMicroelectronics
* Copyright (C) 2010 Alessandro Rubini
+ * Copyright (C) 2010 Linus Walleij for ST-Ericsson
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
@@ -16,11 +17,13 @@
#include <linux/clk.h>
#include <linux/jiffies.h>
#include <linux/err.h>
+#include <linux/cnt32_to_63.h>
+#include <linux/timer.h>
#include <asm/mach/time.h>
#include <plat/mtu.h>
-void __iomem *mtu_base; /* ssigned by machine code */
+void __iomem *mtu_base; /* Assigned by machine code */
/*
* Kernel assumes that sched_clock can be called early
@@ -48,16 +51,82 @@ static struct clocksource nmdk_clksrc = {
/*
* Override the global weak sched_clock symbol with this
* local implementation which uses the clocksource to get some
- * better resolution when scheduling the kernel. We accept that
- * this wraps around for now, since it is just a relative time
- * stamp. (Inspired by OMAP implementation.)
+ * better resolution when scheduling the kernel.
+ *
+ * Because the hardware timer period may be quite short
+ * (32.3 secs on the 133 MHz MTU timer selection on ux500)
+ * and because cnt32_to_63() needs to be called at least once per
+ * half period to work properly, a kernel keepwarm() timer is set up
+ * to ensure this requirement is always met.
+ *
+ * Also the sched_clock timer will wrap around at some point,
+ * here we set it to run continously for a year.
*/
+#define SCHED_CLOCK_MIN_WRAP 3600*24*365
+static struct timer_list cnt32_to_63_keepwarm_timer;
+static u32 sched_mult;
+static u32 sched_shift;
+
unsigned long long notrace sched_clock(void)
{
- return clocksource_cyc2ns(nmdk_clksrc.read(
- &nmdk_clksrc),
- nmdk_clksrc.mult,
- nmdk_clksrc.shift);
+ u64 cycles;
+
+ if (unlikely(!mtu_base))
+ return 0;
+
+ cycles = cnt32_to_63(-readl(mtu_base + MTU_VAL(0)));
+ /*
+ * sched_mult is guaranteed to be even so will
+ * shift out bit 63
+ */
+ return (cycles * sched_mult) >> sched_shift;
+}
+
+/* Just kick sched_clock every so often */
+static void cnt32_to_63_keepwarm(unsigned long data)
+{
+ mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data));
+ (void) sched_clock();
+}
+
+/*
+ * Set up a timer to keep sched_clock():s 32_to_63 algorithm warm
+ * once in half a 32bit timer wrap interval.
+ */
+static void __init nmdk_sched_clock_init(unsigned long rate)
+{
+ u32 v;
+ unsigned long delta;
+ u64 days;
+
+ /* Find the apropriate mult and shift factors */
+ clocks_calc_mult_shift(&sched_mult, &sched_shift,
+ rate, NSEC_PER_SEC, SCHED_CLOCK_MIN_WRAP);
+ /* We need to multiply by an even number to get rid of bit 63 */
+ if (sched_mult & 1)
+ sched_mult++;
+
+ /* Let's see what we get, take max counter and scale it */
+ days = (0xFFFFFFFFFFFFFFFFLLU * sched_mult) >> sched_shift;
+ do_div(days, NSEC_PER_SEC);
+ do_div(days, (3600*24));
+
+ pr_info("sched_clock: using %d bits @ %lu Hz wrap in %lu days\n",
+ (64 - sched_shift), rate, (unsigned long) days);
+
+ /*
+ * Program a timer to kick us at half 32bit wraparound
+ * Formula: seconds per wrap = (2^32) / f
+ */
+ v = 0xFFFFFFFFUL / rate;
+ /* We want half of the wrap time to keep cnt32_to_63 warm */
+ v /= 2;
+ pr_debug("sched_clock: prescaled timer rate: %lu Hz, "
+ "initialize keepwarm timer every %d seconds\n", rate, v);
+ /* Convert seconds to jiffies */
+ delta = msecs_to_jiffies(v*1000);
+ setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, delta);
+ mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + delta));
}
/* Clockevent device: use one-shot mode */
@@ -161,13 +230,15 @@ void __init nmdk_timer_init(void)
writel(0, mtu_base + MTU_BGLR(0));
writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
- /* Now the scheduling clock is ready */
+ /* Now the clock source is ready */
nmdk_clksrc.read = nmdk_read_timer;
if (clocksource_register(&nmdk_clksrc))
pr_err("timer: failed to initialize clock source %s\n",
nmdk_clksrc.name);
+ nmdk_sched_clock_init(rate);
+
/* Timer 1 is used for events */
clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index cd0c090ebc54..b407bc8ad918 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -7,7 +7,6 @@
*/
#include <linux/module.h>
-#include <linux/smp_lock.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/uaccess.h>
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 2b63b0191f52..efad12071c2e 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 97478138e361..933bd388efb2 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 3a078ad3aa44..331de723c676 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -202,7 +202,7 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode)
}
static int
-simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+simscsi_queuecommand_lck (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
unsigned int target_id = sc->device->id;
char fname[MAX_ROOT_LEN+16];
@@ -326,6 +326,8 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(simscsi_queuecommand)
+
static int
simscsi_host_reset (struct scsi_cmnd *sc)
{
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 18732ab23292..c2a1fc23dd75 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index 6d3390590e5b..e2a63af5d517 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index 0d0f8049a17b..e1b14a6ed544 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index ba430a03bc7a..30394081d9b6 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -28,7 +28,6 @@
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/utsname.h>
#include <linux/vfs.h>
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 9779ece2b070..88a0ad14a9c9 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -20,7 +20,6 @@
#include <linux/times.h>
#include <linux/time.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b6447190e1a2..e625e9e034ae 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -4,6 +4,10 @@ config PPC32
bool
default y if !PPC64
+config 32BIT
+ bool
+ default y if PPC32
+
config 64BIT
bool
default y if PPC64
diff --git a/arch/powerpc/boot/div64.S b/arch/powerpc/boot/div64.S
index 722f360a32a9..d271ab542673 100644
--- a/arch/powerpc/boot/div64.S
+++ b/arch/powerpc/boot/div64.S
@@ -33,9 +33,10 @@ __div64_32:
cntlzw r0,r5 # we are shifting the dividend right
li r10,-1 # to make it < 2^32, and shifting
srw r10,r10,r0 # the divisor right the same amount,
- add r9,r4,r10 # rounding up (so the estimate cannot
+ addc r9,r4,r10 # rounding up (so the estimate cannot
andc r11,r6,r10 # ever be too large, only too small)
andc r9,r9,r10
+ addze r9,r9
or r11,r5,r11
rotlw r9,r9,r0
rotlw r11,r11,r0
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 7a9db64f3f04..42850ee00ada 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -337,7 +337,7 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
/* FP registers 32 -> 63 */
#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
if (current)
- memcpy(mem, current->thread.evr[regno-32],
+ memcpy(mem, &current->thread.evr[regno-32],
dbg_reg_def[regno].size);
#else
/* fp registers not used by kernel, leave zero */
@@ -362,7 +362,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
if (regno >= 32 && regno < 64) {
/* FP registers 32 -> 63 */
#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
- memcpy(current->thread.evr[regno-32], mem,
+ memcpy(&current->thread.evr[regno-32], mem,
dbg_reg_def[regno].size);
#else
/* fp registers not used by kernel, leave zero */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2a178b0ebcdf..ce6f61c6f871 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -497,9 +497,8 @@ static void __init emergency_stack_init(void)
}
/*
- * Called into from start_kernel, after lock_kernel has been called.
- * Initializes bootmem, which is unsed to manage page allocation until
- * mem_init is called.
+ * Called into from start_kernel this initializes bootmem, which is used
+ * to manage page allocation until mem_init is called.
*/
void __init setup_arch(char **cmdline_p)
{
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index b1b6043a56c4..4e5bf1edc0f2 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -23,7 +23,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 83f534d862db..5e9584405c45 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1123,7 +1123,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
else
#endif /* CONFIG_PPC_HAS_HASH_64K */
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
- subpage_protection(pgdir, ea));
+ subpage_protection(mm, ea));
/* Dump some info in case of hash insertion failure, they should
* never happen so it is really useful to know if/when they do
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 8b04c54e596f..8526bd9d2aa3 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -138,8 +138,11 @@
cmpldi cr0,r15,0 /* Check for user region */
std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */
beq normal_tlb_miss
+
+ li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
+ oris r11,r11,_PAGE_ACCESSED@h
/* XXX replace the RMW cycles with immediate loads + writes */
-1: mfspr r10,SPRN_MAS1
+ mfspr r10,SPRN_MAS1
cmpldi cr0,r15,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 36c0c449a899..2a030d89bbc6 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -585,6 +585,6 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
/* Finally limit subsequent allocations */
- memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size);
+ memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index c667f0f02c34..3139814f6439 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -47,6 +47,12 @@ config LPARCFG
config PPC_PSERIES_DEBUG
depends on PPC_PSERIES && PPC_EARLY_DEBUG
bool "Enable extra debug logging in platforms/pseries"
+ help
+ Say Y here if you want the pseries core to produce a bunch of
+ debug messages to the system log. Select this if you are having a
+ problem with the pseries core and want to see more of what is
+ going on. This does not enable debugging in lpar.c, which must
+ be manually done due to its verbosity.
default y
config PPC_SMLPAR
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 34b7dc12e731..17a11c82e6f8 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -21,8 +21,6 @@
* Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
*/
-#undef DEBUG
-
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/list.h>
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 4b7a062dee15..5fcc92a12d3e 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -25,8 +25,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#undef DEBUG
-
#include <linux/pci.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 45e0c6199f36..05221b13ffb1 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -6,6 +6,18 @@ config TRACE_IRQFLAGS_SUPPORT
source "lib/Kconfig.debug"
+config STRICT_DEVMEM
+ def_bool y
+ prompt "Filter access to /dev/mem"
+ ---help---
+ This option restricts access to /dev/mem. If this option is
+ disabled, you allow userspace access to all memory, including
+ kernel and userspace memory. Accidental memory access is likely
+ to be disastrous.
+ Memory access is required for experts who want to debug the kernel.
+
+ If you are unsure, say Y.
+
config DEBUG_STRICT_USER_COPY_CHECKS
bool "Strict user copy size checks"
---help---
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index a8729ea7e9ac..3c987e9ec8d6 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -130,6 +130,11 @@ struct page;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
+static inline int devmem_is_allowed(unsigned long pfn)
+{
+ return 0;
+}
+
#define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 1e6449c79ab6..53acaa86dd94 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -25,7 +25,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d60fc4398516..2564793ec2b6 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -30,6 +30,7 @@
#include <asm/sections.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/hardirq.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -212,7 +213,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
/* Set the PER control regs, turns on single step for this address */
__ctl_load(kprobe_per_regs, 9, 11);
regs->psw.mask |= PSW_MASK_PER;
- regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+ regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
@@ -239,7 +240,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
__get_cpu_var(current_kprobe) = p;
/* Save the interrupt and per flags */
kcb->kprobe_saved_imask = regs->psw.mask &
- (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+ (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
/* Save the control regs that govern PER */
__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
}
@@ -316,8 +317,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
return 1;
ss_probe:
- if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO))
- local_irq_disable();
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
@@ -350,6 +349,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+ kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
@@ -372,10 +372,32 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
/* another task is sharing our hash bucket */
continue;
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
+ orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ correct_ret_addr = ri->ret_addr;
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (ri->rp && ri->rp->handler) {
+ ri->ret_addr = correct_ret_addr;
+ ri->rp->handler(ri, regs);
+ }
+
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address) {
@@ -387,7 +409,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
break;
}
}
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
reset_current_kprobe();
@@ -465,8 +487,6 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
goto out;
}
reset_current_kprobe();
- if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO))
- local_irq_enable();
out:
preempt_enable_no_resched();
@@ -482,7 +502,7 @@ out:
return 1;
}
-int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -508,8 +528,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
restore_previous_kprobe(kcb);
else {
reset_current_kprobe();
- if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO))
- local_irq_enable();
}
preempt_enable_no_resched();
break;
@@ -553,6 +571,18 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
return 0;
}
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ int ret;
+
+ if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
+ local_irq_disable();
+ ret = kprobe_trap_handler(regs, trapnr);
+ if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
+ local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
+ return ret;
+}
+
/*
* Wrapper routine to for handling exceptions.
*/
@@ -560,8 +590,12 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
+ struct pt_regs *regs = args->regs;
int ret = NOTIFY_DONE;
+ if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
+ local_irq_disable();
+
switch (val) {
case DIE_BPT:
if (kprobe_handler(args->regs))
@@ -572,16 +606,17 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
ret = NOTIFY_STOP;
break;
case DIE_TRAP:
- /* kprobe_running() needs smp_processor_id() */
- preempt_disable();
- if (kprobe_running() &&
- kprobe_fault_handler(args->regs, args->trapnr))
+ if (!preemptible() && kprobe_running() &&
+ kprobe_trap_handler(args->regs, args->trapnr))
ret = NOTIFY_STOP;
- preempt_enable();
break;
default:
break;
}
+
+ if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
+ local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
+
return ret;
}
@@ -595,6 +630,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
/* setup return addr to the jprobe handler routine */
regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
+ regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
/* r14 is the function return address */
kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 38e641cdd977..45b405ca2567 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -20,18 +20,17 @@
static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
- unsigned long mask, result;
+ unsigned long mask;
pte_t *ptep, pte;
struct page *page;
- result = write ? 0 : _PAGE_RO;
- mask = result | _PAGE_INVALID | _PAGE_SPECIAL;
+ mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
do {
pte = *ptep;
barrier();
- if ((pte_val(pte) & mask) != result)
+ if ((pte_val(pte) & mask) != 0)
return 0;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 46d5179c9f49..e3c73cdd8c90 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -199,10 +199,13 @@ extern unsigned long get_wchan(struct task_struct *p);
#define ARCH_HAS_PREFETCHW
static inline void prefetch(void *x)
{
- __asm__ __volatile__ ("pref @%0\n\t" : : "r" (x) : "memory");
+ __builtin_prefetch(x, 0, 3);
}
-#define prefetchw(x) prefetch(x)
+static inline void prefetchw(void *x)
+{
+ __builtin_prefetch(x, 1, 3);
+}
#endif
#endif /* __KERNEL__ */
diff --git a/arch/sh/include/mach-common/mach/romimage.h b/arch/sh/include/mach-common/mach/romimage.h
index 08fb42269ecd..3670455faaac 100644
--- a/arch/sh/include/mach-common/mach/romimage.h
+++ b/arch/sh/include/mach-common/mach/romimage.h
@@ -4,7 +4,7 @@
#else /* __ASSEMBLY__ */
-extern inline void mmcif_update_progress(int nr)
+static inline void mmcif_update_progress(int nr)
{
}
diff --git a/arch/sh/include/mach-ecovec24/mach/romimage.h b/arch/sh/include/mach-ecovec24/mach/romimage.h
index 1dcf5e6c8d83..d63ef51ec186 100644
--- a/arch/sh/include/mach-ecovec24/mach/romimage.h
+++ b/arch/sh/include/mach-ecovec24/mach/romimage.h
@@ -35,7 +35,7 @@
#define HIZCRA 0xa4050158
#define PGDR 0xa405012c
-extern inline void mmcif_update_progress(int nr)
+static inline void mmcif_update_progress(int nr)
{
/* disable Hi-Z for LED pins */
__raw_writew(__raw_readw(HIZCRA) & ~(1 << 1), HIZCRA);
diff --git a/arch/sh/include/mach-kfr2r09/mach/romimage.h b/arch/sh/include/mach-kfr2r09/mach/romimage.h
index 976256a323f2..7a883167c846 100644
--- a/arch/sh/include/mach-kfr2r09/mach/romimage.h
+++ b/arch/sh/include/mach-kfr2r09/mach/romimage.h
@@ -23,7 +23,7 @@
#else /* __ASSEMBLY__ */
-extern inline void mmcif_update_progress(int nr)
+static inline void mmcif_update_progress(int nr)
{
}
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index 4eabc68cd753..b601fa3978d1 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -110,7 +110,7 @@ static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
return 0;
}
-static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id)
+static int shoc_clk_set_rate(struct clk *clk, unsigned long rate)
{
unsigned long frqcr3;
unsigned int tmp;
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 81f58371613d..8c6a350df751 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -88,7 +88,7 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
}
if (op & CACHEFLUSH_I)
- flush_cache_all();
+ flush_icache_range(addr, addr+len);
up_read(&current->mm->mmap_sem);
return 0;
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
index 3b6eb34c43fa..3e70f851cdc6 100644
--- a/arch/sh/kernel/vsyscall/vsyscall-trapa.S
+++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
@@ -8,9 +8,9 @@ __kernel_vsyscall:
* fill out .eh_frame -- PFM. */
.LEND_vsyscall:
.size __kernel_vsyscall,.-.LSTART_vsyscall
- .previous
.section .eh_frame,"a",@progbits
+ .previous
.LCIE:
.ualong .LCIE_end - .LCIE_start
.LCIE_start:
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 7524689b03d2..16582d85368a 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -12,7 +12,6 @@
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/init.h>
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index e6375a750d9a..6db18c6927fb 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -17,7 +17,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 675c9e11ada5..42b282fa6112 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -19,7 +19,6 @@
#include <linux/mman.h>
#include <linux/utsname.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/ipc.h>
#include <asm/uaccess.h>
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index 12b9f352595f..4491f4cb2695 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -16,7 +16,6 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/perf_event.h>
enum direction {
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
index b351770cbdd6..3107381e576d 100644
--- a/arch/sparc/kernel/windows.c
+++ b/arch/sparc/kernel/windows.c
@@ -9,7 +9,6 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 07ec8a865c1d..e11b5fcb70eb 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -329,6 +329,18 @@ endmenu # Tilera-specific configuration
menu "Bus options"
+config PCI
+ bool "PCI support"
+ default y
+ select PCI_DOMAINS
+ ---help---
+ Enable PCI root complex support, so PCIe endpoint devices can
+ be attached to the Tile chip. Many, but not all, PCI devices
+ are supported under Tilera's root complex driver.
+
+config PCI_DOMAINS
+ bool
+
config NO_IOMEM
def_bool !PCI
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h
index c5741da4eeac..14a3f8556ace 100644
--- a/arch/tile/include/asm/cacheflush.h
+++ b/arch/tile/include/asm/cacheflush.h
@@ -137,4 +137,56 @@ static inline void finv_buffer(void *buffer, size_t size)
mb_incoherent();
}
+/*
+ * Flush & invalidate a VA range that is homed remotely on a single core,
+ * waiting until the memory controller holds the flushed values.
+ */
+static inline void finv_buffer_remote(void *buffer, size_t size)
+{
+ char *p;
+ int i;
+
+ /*
+ * Flush and invalidate the buffer out of the local L1/L2
+ * and request the home cache to flush and invalidate as well.
+ */
+ __finv_buffer(buffer, size);
+
+ /*
+ * Wait for the home cache to acknowledge that it has processed
+ * all the flush-and-invalidate requests. This does not mean
+ * that the flushed data has reached the memory controller yet,
+ * but it does mean the home cache is processing the flushes.
+ */
+ __insn_mf();
+
+ /*
+ * Issue a load to the last cache line, which can't complete
+ * until all the previously-issued flushes to the same memory
+ * controller have also completed. If we weren't striping
+ * memory, that one load would be sufficient, but since we may
+ * be, we also need to back up to the last load issued to
+ * another memory controller, which would be the point where
+ * we crossed an 8KB boundary (the granularity of striping
+ * across memory controllers). Keep backing up and doing this
+ * until we are before the beginning of the buffer, or have
+ * hit all the controllers.
+ */
+ for (i = 0, p = (char *)buffer + size - 1;
+ i < (1 << CHIP_LOG_NUM_MSHIMS()) && p >= (char *)buffer;
+ ++i) {
+ const unsigned long STRIPE_WIDTH = 8192;
+
+ /* Force a load instruction to issue. */
+ *(volatile char *)p;
+
+ /* Jump to end of previous stripe. */
+ p -= STRIPE_WIDTH;
+ p = (char *)((unsigned long)p | (STRIPE_WIDTH - 1));
+ }
+
+ /* Wait for the loads (and thus flushes) to have completed. */
+ __insn_mf();
+}
+
#endif /* _ASM_TILE_CACHEFLUSH_H */
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index ee43328713ab..d3cbb9b14cbe 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -55,9 +55,6 @@ extern void iounmap(volatile void __iomem *addr);
#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size)
#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
-void __iomem *ioport_map(unsigned long port, unsigned int len);
-extern inline void ioport_unmap(void __iomem *addr) {}
-
#define mmiowb()
/* Conversion between virtual and physical mappings. */
@@ -189,12 +186,22 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
* we never run, uses them unconditionally.
*/
-static inline int ioport_panic(void)
+static inline long ioport_panic(void)
{
panic("inb/outb and friends do not exist on tile");
return 0;
}
+static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
+{
+ return (void __iomem *) ioport_panic();
+}
+
+static inline void ioport_unmap(void __iomem *addr)
+{
+ ioport_panic();
+}
+
static inline u8 inb(unsigned long addr)
{
return ioport_panic();
diff --git a/arch/tile/include/asm/pci-bridge.h b/arch/tile/include/asm/pci-bridge.h
deleted file mode 100644
index e853b0e2793b..000000000000
--- a/arch/tile/include/asm/pci-bridge.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_PCI_BRIDGE_H
-#define _ASM_TILE_PCI_BRIDGE_H
-
-#include <linux/ioport.h>
-#include <linux/pci.h>
-
-struct device_node;
-struct pci_controller;
-
-/*
- * pci_io_base returns the memory address at which you can access
- * the I/O space for PCI bus number `bus' (or NULL on error).
- */
-extern void __iomem *pci_bus_io_base(unsigned int bus);
-extern unsigned long pci_bus_io_base_phys(unsigned int bus);
-extern unsigned long pci_bus_mem_base_phys(unsigned int bus);
-
-/* Allocate a new PCI host bridge structure */
-extern struct pci_controller *pcibios_alloc_controller(void);
-
-/* Helper function for setting up resources */
-extern void pci_init_resource(struct resource *res, unsigned long start,
- unsigned long end, int flags, char *name);
-
-/* Get the PCI host controller for a bus */
-extern struct pci_controller *pci_bus_to_hose(int bus);
-
-/*
- * Structure of a PCI controller (host bridge)
- */
-struct pci_controller {
- int index; /* PCI domain number */
- struct pci_bus *root_bus;
-
- int first_busno;
- int last_busno;
-
- int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
- int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
-
- struct pci_ops *ops;
-
- int irq_base; /* Base IRQ from the Hypervisor */
- int plx_gen1; /* flag for PLX Gen 1 configuration */
-
- /* Address ranges that are routed to this controller/bridge. */
- struct resource mem_resources[3];
-};
-
-static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
-{
- return bus->sysdata;
-}
-
-extern void setup_indirect_pci_nomap(struct pci_controller *hose,
- void __iomem *cfg_addr, void __iomem *cfg_data);
-extern void setup_indirect_pci(struct pci_controller *hose,
- u32 cfg_addr, u32 cfg_data);
-extern void setup_grackle(struct pci_controller *hose);
-
-extern unsigned char common_swizzle(struct pci_dev *, unsigned char *);
-
-/*
- * The following code swizzles for exactly one bridge. The routine
- * common_swizzle below handles multiple bridges. But there are a
- * some boards that don't follow the PCI spec's suggestion so we
- * break this piece out separately.
- */
-static inline unsigned char bridge_swizzle(unsigned char pin,
- unsigned char idsel)
-{
- return (((pin-1) + idsel) % 4) + 1;
-}
-
-/*
- * The following macro is used to lookup irqs in a standard table
- * format for those PPC systems that do not already have PCI
- * interrupts properly routed.
- */
-/* FIXME - double check this */
-#define PCI_IRQ_TABLE_LOOKUP ({ \
- long _ctl_ = -1; \
- if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \
- _ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \
- _ctl_; \
-})
-
-/*
- * Scan the buses below a given PCI host bridge and assign suitable
- * resources to all devices found.
- */
-extern int pciauto_bus_scan(struct pci_controller *, int);
-
-#ifdef CONFIG_PCI
-extern unsigned long pci_address_to_pio(phys_addr_t address);
-#else
-static inline unsigned long pci_address_to_pio(phys_addr_t address)
-{
- return (unsigned long)-1;
-}
-#endif
-
-#endif /* _ASM_TILE_PCI_BRIDGE_H */
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h
index b0c15da2d5d5..c3fc458a0d32 100644
--- a/arch/tile/include/asm/pci.h
+++ b/arch/tile/include/asm/pci.h
@@ -15,7 +15,29 @@
#ifndef _ASM_TILE_PCI_H
#define _ASM_TILE_PCI_H
-#include <asm/pci-bridge.h>
+#include <linux/pci.h>
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+struct pci_controller {
+ int index; /* PCI domain number */
+ struct pci_bus *root_bus;
+
+ int first_busno;
+ int last_busno;
+
+ int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
+ int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
+
+ struct pci_ops *ops;
+
+ int irq_base; /* Base IRQ from the Hypervisor */
+ int plx_gen1; /* flag for PLX Gen 1 configuration */
+
+ /* Address ranges that are routed to this controller/bridge. */
+ struct resource mem_resources[3];
+};
/*
* The hypervisor maps the entirety of CPA-space as bus addresses, so
@@ -24,56 +46,12 @@
*/
#define PCI_DMA_BUS_IS_PHYS 1
-struct pci_controller *pci_bus_to_hose(int bus);
-unsigned char __init common_swizzle(struct pci_dev *dev, unsigned char *pinp);
int __init tile_pci_init(void);
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-void __devinit pcibios_fixup_bus(struct pci_bus *bus);
-int __devinit _tile_cfg_read(struct pci_controller *hose,
- int bus,
- int slot,
- int function,
- int offset,
- int size,
- u32 *val);
-int __devinit _tile_cfg_write(struct pci_controller *hose,
- int bus,
- int slot,
- int function,
- int offset,
- int size,
- u32 val);
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
-/*
- * These are used to to config reads and writes in the early stages of
- * setup before the driver infrastructure has been set up enough to be
- * able to do config reads and writes.
- */
-#define early_cfg_read(where, size, value) \
- _tile_cfg_read(controller, \
- current_bus, \
- pci_slot, \
- pci_fn, \
- where, \
- size, \
- value)
-
-#define early_cfg_write(where, size, value) \
- _tile_cfg_write(controller, \
- current_bus, \
- pci_slot, \
- pci_fn, \
- where, \
- size, \
- value)
-
-
-
-#define PCICFG_BYTE 1
-#define PCICFG_WORD 2
-#define PCICFG_DWORD 4
+void __devinit pcibios_fixup_bus(struct pci_bus *bus);
#define TILE_NUM_PCIE 2
@@ -88,33 +66,33 @@ static inline int pci_proc_domain(struct pci_bus *bus)
}
/*
- * I/O space is currently not supported.
+ * pcibios_assign_all_busses() tells whether or not the bus numbers
+ * should be reassigned, in case the BIOS didn't do it correctly, or
+ * in case we don't have a BIOS and we want to let Linux do it.
*/
+static inline int pcibios_assign_all_busses(void)
+{
+ return 1;
+}
-#define TILE_PCIE_LOWER_IO 0x0
-#define TILE_PCIE_UPPER_IO 0x10000
-#define TILE_PCIE_PCIE_IO_SIZE 0x0000FFFF
-
-#define _PAGE_NO_CACHE 0
-#define _PAGE_GUARDED 0
-
-
-#define pcibios_assign_all_busses() pci_assign_all_buses
-extern int pci_assign_all_buses;
-
+/*
+ * No special bus mastering setup handling.
+ */
static inline void pcibios_set_master(struct pci_dev *dev)
{
- /* No special bus mastering setup handling */
}
#define PCIBIOS_MIN_MEM 0
-#define PCIBIOS_MIN_IO TILE_PCIE_LOWER_IO
+#define PCIBIOS_MIN_IO 0
/*
* This flag tells if the platform is TILEmpower that needs
* special configuration for the PLX switch chip.
*/
-extern int blade_pci;
+extern int tile_plx_gen1;
+
+/* Use any cpu for PCI. */
+#define cpumask_of_pcibus(bus) cpu_online_mask
/* implement the pci_ DMA API in terms of the generic device dma_ one */
#include <asm-generic/pci-dma-compat.h>
@@ -122,7 +100,4 @@ extern int blade_pci;
/* generic pci stuff */
#include <asm-generic/pci.h>
-/* Use any cpu for PCI. */
-#define cpumask_of_pcibus(bus) cpu_online_mask
-
#endif /* _ASM_TILE_PCI_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 1747ff3946b2..a9e7c8760334 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -292,8 +292,18 @@ extern int kstack_hash;
/* Are we using huge pages in the TLB for kernel data? */
extern int kdata_huge;
+/* Support standard Linux prefetching. */
+#define ARCH_HAS_PREFETCH
+#define prefetch(x) __builtin_prefetch(x)
#define PREFETCH_STRIDE CHIP_L2_LINE_SIZE()
+/* Bring a value into the L1D, faulting the TLB if necessary. */
+#ifdef __tilegx__
+#define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x))
+#else
+#define prefetch_L1(x) __insn_prefetch_L1((void *)(x))
+#endif
+
#else /* __ASSEMBLY__ */
/* Do some slow action (e.g. read a slow SPR). */
diff --git a/arch/tile/include/hv/drv_xgbe_impl.h b/arch/tile/include/hv/drv_xgbe_impl.h
new file mode 100644
index 000000000000..3a73b2b44913
--- /dev/null
+++ b/arch/tile/include/hv/drv_xgbe_impl.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * @file drivers/xgbe/impl.h
+ * Implementation details for the NetIO library.
+ */
+
+#ifndef __DRV_XGBE_IMPL_H__
+#define __DRV_XGBE_IMPL_H__
+
+#include <hv/netio_errors.h>
+#include <hv/netio_intf.h>
+#include <hv/drv_xgbe_intf.h>
+
+
+/** How many groups we have (log2). */
+#define LOG2_NUM_GROUPS (12)
+/** How many groups we have. */
+#define NUM_GROUPS (1 << LOG2_NUM_GROUPS)
+
+/** Number of output requests we'll buffer per tile. */
+#define EPP_REQS_PER_TILE (32)
+
+/** Words used in an eDMA command without checksum acceleration. */
+#define EDMA_WDS_NO_CSUM 8
+/** Words used in an eDMA command with checksum acceleration. */
+#define EDMA_WDS_CSUM 10
+/** Total available words in the eDMA command FIFO. */
+#define EDMA_WDS_TOTAL 128
+
+
+/*
+ * FIXME: These definitions are internal and should have underscores!
+ * NOTE: The actual numeric values here are intentional and allow us to
+ * optimize the concept "if small ... else if large ... else ...", by
+ * checking for the low bit being set, and then for non-zero.
+ * These are used as array indices, so they must have the values (0, 1, 2)
+ * in some order.
+ */
+#define SIZE_SMALL (1) /**< Small packet queue. */
+#define SIZE_LARGE (2) /**< Large packet queue. */
+#define SIZE_JUMBO (0) /**< Jumbo packet queue. */
+
+/** The number of "SIZE_xxx" values. */
+#define NETIO_NUM_SIZES 3
+
+
+/*
+ * Default numbers of packets for IPP drivers. These values are chosen
+ * such that CIPP1 will not overflow its L2 cache.
+ */
+
+/** The default number of small packets. */
+#define NETIO_DEFAULT_SMALL_PACKETS 2750
+/** The default number of large packets. */
+#define NETIO_DEFAULT_LARGE_PACKETS 2500
+/** The default number of jumbo packets. */
+#define NETIO_DEFAULT_JUMBO_PACKETS 250
+
+
+/** Log2 of the size of a memory arena. */
+#define NETIO_ARENA_SHIFT 24 /* 16 MB */
+/** Size of a memory arena. */
+#define NETIO_ARENA_SIZE (1 << NETIO_ARENA_SHIFT)
+
+
+/** A queue of packets.
+ *
+ * This structure partially defines a queue of packets waiting to be
+ * processed. The queue as a whole is written to by an interrupt handler and
+ * read by non-interrupt code; this data structure is what's touched by the
+ * interrupt handler. The other part of the queue state, the read offset, is
+ * kept in user space, not in hypervisor space, so it is in a separate data
+ * structure.
+ *
+ * The read offset (__packet_receive_read in the user part of the queue
+ * structure) points to the next packet to be read. When the read offset is
+ * equal to the write offset, the queue is empty; therefore the queue must
+ * contain one more slot than the required maximum queue size.
+ *
+ * Here's an example of all 3 state variables and what they mean. All
+ * pointers move left to right.
+ *
+ * @code
+ * I I V V V V I I I I
+ * 0 1 2 3 4 5 6 7 8 9 10
+ * ^ ^ ^ ^
+ * | | |
+ * | | __last_packet_plus_one
+ * | __buffer_write
+ * __packet_receive_read
+ * @endcode
+ *
+ * This queue has 10 slots, and thus can hold 9 packets (_last_packet_plus_one
+ * = 10). The read pointer is at 2, and the write pointer is at 6; thus,
+ * there are valid, unread packets in slots 2, 3, 4, and 5. The remaining
+ * slots are invalid (do not contain a packet).
+ */
+typedef struct {
+ /** Byte offset of the next notify packet to be written: zero for the first
+ * packet on the queue, sizeof (netio_pkt_t) for the second packet on the
+ * queue, etc. */
+ volatile uint32_t __packet_write;
+
+ /** Offset of the packet after the last valid packet (i.e., when any
+ * pointer is incremented to this value, it wraps back to zero). */
+ uint32_t __last_packet_plus_one;
+}
+__netio_packet_queue_t;
+
+
+/** A queue of buffers.
+ *
+ * This structure partially defines a queue of empty buffers which have been
+ * obtained via requests to the IPP. (The elements of the queue are packet
+ * handles, which are transformed into a full netio_pkt_t when the buffer is
+ * retrieved.) The queue as a whole is written to by an interrupt handler and
+ * read by non-interrupt code; this data structure is what's touched by the
+ * interrupt handler. The other parts of the queue state, the read offset and
+ * requested write offset, are kept in user space, not in hypervisor space, so
+ * they are in a separate data structure.
+ *
+ * The read offset (__buffer_read in the user part of the queue structure)
+ * points to the next buffer to be read. When the read offset is equal to the
+ * write offset, the queue is empty; therefore the queue must contain one more
+ * slot than the required maximum queue size.
+ *
+ * The requested write offset (__buffer_requested_write in the user part of
+ * the queue structure) points to the slot which will hold the next buffer we
+ * request from the IPP, once we get around to sending such a request. When
+ * the requested write offset is equal to the write offset, no requests for
+ * new buffers are outstanding; when the requested write offset is one greater
+ * than the read offset, no more requests may be sent.
+ *
+ * Note that, unlike the packet_queue, the buffer_queue places incoming
+ * buffers at decreasing addresses. This makes the check for "is it time to
+ * wrap the buffer pointer" cheaper in the assembly code which receives new
+ * buffers, and means that the value which defines the queue size,
+ * __last_buffer, is different than in the packet queue. Also, the offset
+ * used in the packet_queue is already scaled by the size of a packet; here we
+ * use unscaled slot indices for the offsets. (These differences are
+ * historical, and in the future it's possible that the packet_queue will look
+ * more like this queue.)
+ *
+ * @code
+ * Here's an example of all 4 state variables and what they mean. Remember:
+ * all pointers move right to left.
+ *
+ * V V V I I R R V V V
+ * 0 1 2 3 4 5 6 7 8 9
+ * ^ ^ ^ ^
+ * | | | |
+ * | | | __last_buffer
+ * | | __buffer_write
+ * | __buffer_requested_write
+ * __buffer_read
+ * @endcode
+ *
+ * This queue has 10 slots, and thus can hold 9 buffers (_last_buffer = 9).
+ * The read pointer is at 2, and the write pointer is at 6; thus, there are
+ * valid, unread buffers in slots 2, 1, 0, 9, 8, and 7. The requested write
+ * pointer is at 4; thus, requests have been made to the IPP for buffers which
+ * will be placed in slots 6 and 5 when they arrive. Finally, the remaining
+ * slots are invalid (do not contain a buffer).
+ */
+typedef struct
+{
+ /** Ordinal number of the next buffer to be written: 0 for the first slot in
+ * the queue, 1 for the second slot in the queue, etc. */
+ volatile uint32_t __buffer_write;
+
+ /** Ordinal number of the last buffer (i.e., when any pointer is decremented
+ * below zero, it is reloaded with this value). */
+ uint32_t __last_buffer;
+}
+__netio_buffer_queue_t;
+
+
+/**
+ * An object for providing Ethernet packets to a process.
+ */
+typedef struct __netio_queue_impl_t
+{
+ /** The queue of packets waiting to be received. */
+ __netio_packet_queue_t __packet_receive_queue;
+ /** The intr bit mask that IDs this device. */
+ unsigned int __intr_id;
+ /** Offset to queues of empty buffers, one per size. */
+ uint32_t __buffer_queue[NETIO_NUM_SIZES];
+ /** The address of the first EPP tile, or -1 if no EPP. */
+ /* ISSUE: Actually this is always "0" or "~0". */
+ uint32_t __epp_location;
+ /** The queue ID that this queue represents. */
+ unsigned int __queue_id;
+ /** Number of acknowledgements received. */
+ volatile uint32_t __acks_received;
+ /** Last completion number received for packet_sendv. */
+ volatile uint32_t __last_completion_rcv;
+ /** Number of packets allowed to be outstanding. */
+ uint32_t __max_outstanding;
+ /** First VA available for packets. */
+ void* __va_0;
+ /** First VA in second range available for packets. */
+ void* __va_1;
+ /** Padding to align the "__packets" field to the size of a netio_pkt_t. */
+ uint32_t __padding[3];
+ /** The packets themselves. */
+ netio_pkt_t __packets[0];
+}
+netio_queue_impl_t;
+
+
+/**
+ * An object for managing the user end of a NetIO queue.
+ */
+typedef struct __netio_queue_user_impl_t
+{
+ /** The next incoming packet to be read. */
+ uint32_t __packet_receive_read;
+ /** The next empty buffers to be read, one index per size. */
+ uint8_t __buffer_read[NETIO_NUM_SIZES];
+ /** Where the empty buffer we next request from the IPP will go, one index
+ * per size. */
+ uint8_t __buffer_requested_write[NETIO_NUM_SIZES];
+ /** PCIe interface flag. */
+ uint8_t __pcie;
+ /** Number of packets left to be received before we send a credit update. */
+ uint32_t __receive_credit_remaining;
+ /** Value placed in __receive_credit_remaining when it reaches zero. */
+ uint32_t __receive_credit_interval;
+ /** First fast I/O routine index. */
+ uint32_t __fastio_index;
+ /** Number of acknowledgements expected. */
+ uint32_t __acks_outstanding;
+ /** Last completion number requested. */
+ uint32_t __last_completion_req;
+ /** File descriptor for driver. */
+ int __fd;
+}
+netio_queue_user_impl_t;
+
+
+#define NETIO_GROUP_CHUNK_SIZE 64 /**< Max # groups in one IPP request */
+#define NETIO_BUCKET_CHUNK_SIZE 64 /**< Max # buckets in one IPP request */
+
+
+/** Internal structure used to convey packet send information to the
+ * hypervisor. FIXME: Actually, it's not used for that anymore, but
+ * netio_packet_send() still uses it internally.
+ */
+typedef struct
+{
+ uint16_t flags; /**< Packet flags (__NETIO_SEND_FLG_xxx) */
+ uint16_t transfer_size; /**< Size of packet */
+ uint32_t va; /**< VA of start of packet */
+ __netio_pkt_handle_t handle; /**< Packet handle */
+ uint32_t csum0; /**< First checksum word */
+ uint32_t csum1; /**< Second checksum word */
+}
+__netio_send_cmd_t;
+
+
+/** Flags used in two contexts:
+ * - As the "flags" member in the __netio_send_cmd_t, above; used only
+ * for netio_pkt_send_{prepare,commit}.
+ * - As part of the flags passed to the various send packet fast I/O calls.
+ */
+
+/** Need acknowledgement on this packet. Note that some code in the
+ * normal send_pkt fast I/O handler assumes that this is equal to 1. */
+#define __NETIO_SEND_FLG_ACK 0x1
+
+/** Do checksum on this packet. (Only used with the __netio_send_cmd_t;
+ * normal packet sends use a special fast I/O index to denote checksumming,
+ * and multi-segment sends test the checksum descriptor.) */
+#define __NETIO_SEND_FLG_CSUM 0x2
+
+/** Get a completion on this packet. Only used with multi-segment sends. */
+#define __NETIO_SEND_FLG_COMPLETION 0x4
+
+/** Position of the number-of-extra-segments value in the flags word.
+ Only used with multi-segment sends. */
+#define __NETIO_SEND_FLG_XSEG_SHIFT 3
+
+/** Width of the number-of-extra-segments value in the flags word. */
+#define __NETIO_SEND_FLG_XSEG_WIDTH 2
+
+#endif /* __DRV_XGBE_IMPL_H__ */
diff --git a/arch/tile/include/hv/drv_xgbe_intf.h b/arch/tile/include/hv/drv_xgbe_intf.h
new file mode 100644
index 000000000000..146e47d5334b
--- /dev/null
+++ b/arch/tile/include/hv/drv_xgbe_intf.h
@@ -0,0 +1,615 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * @file drv_xgbe_intf.h
+ * Interface to the hypervisor XGBE driver.
+ */
+
+#ifndef __DRV_XGBE_INTF_H__
+#define __DRV_XGBE_INTF_H__
+
+/**
+ * An object for forwarding VAs and PAs to the hypervisor.
+ * @ingroup types
+ *
+ * This allows the supervisor to specify a number of areas of memory to
+ * store packet buffers.
+ */
+typedef struct
+{
+ /** The physical address of the memory. */
+ HV_PhysAddr pa;
+ /** Page table entry for the memory. This is only used to derive the
+ * memory's caching mode; the PA bits are ignored. */
+ HV_PTE pte;
+ /** The virtual address of the memory. */
+ HV_VirtAddr va;
+ /** Size (in bytes) of the memory area. */
+ int size;
+
+}
+netio_ipp_address_t;
+
+/** The various pread/pwrite offsets into the hypervisor-level driver.
+ * @ingroup types
+ */
+typedef enum
+{
+ /** Inform the Linux driver of the address of the NetIO arena memory.
+ * This offset is actually only used to convey information from netio
+ * to the Linux driver; it never makes it from there to the hypervisor.
+ * Write-only; takes a uint32_t specifying the VA address. */
+ NETIO_FIXED_ADDR = 0x5000000000000000ULL,
+
+ /** Inform the Linux driver of the size of the NetIO arena memory.
+ * This offset is actually only used to convey information from netio
+ * to the Linux driver; it never makes it from there to the hypervisor.
+ * Write-only; takes a uint32_t specifying the VA size. */
+ NETIO_FIXED_SIZE = 0x5100000000000000ULL,
+
+ /** Register current tile with IPP. Write then read: write, takes a
+ * netio_input_config_t, read returns a pointer to a netio_queue_impl_t. */
+ NETIO_IPP_INPUT_REGISTER_OFF = 0x6000000000000000ULL,
+
+ /** Unregister current tile from IPP. Write-only, takes a dummy argument. */
+ NETIO_IPP_INPUT_UNREGISTER_OFF = 0x6100000000000000ULL,
+
+ /** Start packets flowing. Write-only, takes a dummy argument. */
+ NETIO_IPP_INPUT_INIT_OFF = 0x6200000000000000ULL,
+
+ /** Stop packets flowing. Write-only, takes a dummy argument. */
+ NETIO_IPP_INPUT_UNINIT_OFF = 0x6300000000000000ULL,
+
+ /** Configure group (typically we group on VLAN). Write-only: takes an
+ * array of netio_group_t's, low 24 bits of the offset is the base group
+ * number times the size of a netio_group_t. */
+ NETIO_IPP_INPUT_GROUP_CFG_OFF = 0x6400000000000000ULL,
+
+ /** Configure bucket. Write-only: takes an array of netio_bucket_t's, low
+ * 24 bits of the offset is the base bucket number times the size of a
+ * netio_bucket_t. */
+ NETIO_IPP_INPUT_BUCKET_CFG_OFF = 0x6500000000000000ULL,
+
+ /** Get/set a parameter. Read or write: read or write data is the parameter
+ * value, low 32 bits of the offset is a __netio_getset_offset_t. */
+ NETIO_IPP_PARAM_OFF = 0x6600000000000000ULL,
+
+ /** Get fast I/O index. Read-only; returns a 4-byte base index value. */
+ NETIO_IPP_GET_FASTIO_OFF = 0x6700000000000000ULL,
+
+ /** Configure hijack IP address. Packets with this IPv4 dest address
+ * go to bucket NETIO_NUM_BUCKETS - 1. Write-only: takes an IP address
+ * in some standard form. FIXME: Define the form! */
+ NETIO_IPP_INPUT_HIJACK_CFG_OFF = 0x6800000000000000ULL,
+
+ /**
+ * Offsets beyond this point are reserved for the supervisor (although that
+ * enforcement must be done by the supervisor driver itself).
+ */
+ NETIO_IPP_USER_MAX_OFF = 0x6FFFFFFFFFFFFFFFULL,
+
+ /** Register I/O memory. Write-only, takes a netio_ipp_address_t. */
+ NETIO_IPP_IOMEM_REGISTER_OFF = 0x7000000000000000ULL,
+
+ /** Unregister I/O memory. Write-only, takes a netio_ipp_address_t. */
+ NETIO_IPP_IOMEM_UNREGISTER_OFF = 0x7100000000000000ULL,
+
+ /* Offsets greater than 0x7FFFFFFF can't be used directly from Linux
+ * userspace code due to limitations in the pread/pwrite syscalls. */
+
+ /** Drain LIPP buffers. */
+ NETIO_IPP_DRAIN_OFF = 0xFA00000000000000ULL,
+
+ /** Supply a netio_ipp_address_t to be used as shared memory for the
+ * LEPP command queue. */
+ NETIO_EPP_SHM_OFF = 0xFB00000000000000ULL,
+
+ /* 0xFC... is currently unused. */
+
+ /** Stop IPP/EPP tiles. Write-only, takes a dummy argument. */
+ NETIO_IPP_STOP_SHIM_OFF = 0xFD00000000000000ULL,
+
+ /** Start IPP/EPP tiles. Write-only, takes a dummy argument. */
+ NETIO_IPP_START_SHIM_OFF = 0xFE00000000000000ULL,
+
+ /** Supply packet arena. Write-only, takes an array of
+ * netio_ipp_address_t values. */
+ NETIO_IPP_ADDRESS_OFF = 0xFF00000000000000ULL,
+} netio_hv_offset_t;
+
+/** Extract the base offset from an offset */
+#define NETIO_BASE_OFFSET(off) ((off) & 0xFF00000000000000ULL)
+/** Extract the local offset from an offset */
+#define NETIO_LOCAL_OFFSET(off) ((off) & 0x00FFFFFFFFFFFFFFULL)
+
+
+/**
+ * Get/set offset.
+ */
+typedef union
+{
+ struct
+ {
+ uint64_t addr:48; /**< Class-specific address */
+ unsigned int class:8; /**< Class (e.g., NETIO_PARAM) */
+ unsigned int opcode:8; /**< High 8 bits of NETIO_IPP_PARAM_OFF */
+ }
+ bits; /**< Bitfields */
+ uint64_t word; /**< Aggregated value to use as the offset */
+}
+__netio_getset_offset_t;
+
+/**
+ * Fast I/O index offsets (must be contiguous).
+ */
+typedef enum
+{
+ NETIO_FASTIO_ALLOCATE = 0, /**< Get empty packet buffer */
+ NETIO_FASTIO_FREE_BUFFER = 1, /**< Give buffer back to IPP */
+ NETIO_FASTIO_RETURN_CREDITS = 2, /**< Give credits to IPP */
+ NETIO_FASTIO_SEND_PKT_NOCK = 3, /**< Send a packet, no checksum */
+ NETIO_FASTIO_SEND_PKT_CK = 4, /**< Send a packet, with checksum */
+ NETIO_FASTIO_SEND_PKT_VEC = 5, /**< Send a vector of packets */
+ NETIO_FASTIO_SENDV_PKT = 6, /**< Sendv one packet */
+ NETIO_FASTIO_NUM_INDEX = 7, /**< Total number of fast I/O indices */
+} netio_fastio_index_t;
+
+/** 3-word return type for Fast I/O call. */
+typedef struct
+{
+ int err; /**< Error code. */
+ uint32_t val0; /**< Value. Meaning depends upon the specific call. */
+ uint32_t val1; /**< Value. Meaning depends upon the specific call. */
+} netio_fastio_rv3_t;
+
+/** 0-argument fast I/O call */
+int __netio_fastio0(uint32_t fastio_index);
+/** 1-argument fast I/O call */
+int __netio_fastio1(uint32_t fastio_index, uint32_t arg0);
+/** 3-argument fast I/O call, 2-word return value */
+netio_fastio_rv3_t __netio_fastio3_rv3(uint32_t fastio_index, uint32_t arg0,
+ uint32_t arg1, uint32_t arg2);
+/** 4-argument fast I/O call */
+int __netio_fastio4(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
+ uint32_t arg2, uint32_t arg3);
+/** 6-argument fast I/O call */
+int __netio_fastio6(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
+ uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5);
+/** 9-argument fast I/O call */
+int __netio_fastio9(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
+ uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5,
+ uint32_t arg6, uint32_t arg7, uint32_t arg8);
+
+/** Allocate an empty packet.
+ * @param fastio_index Fast I/O index.
+ * @param size Size of the packet to allocate.
+ */
+#define __netio_fastio_allocate(fastio_index, size) \
+ __netio_fastio1((fastio_index) + NETIO_FASTIO_ALLOCATE, size)
+
+/** Free a buffer.
+ * @param fastio_index Fast I/O index.
+ * @param handle Handle for the packet to free.
+ */
+#define __netio_fastio_free_buffer(fastio_index, handle) \
+ __netio_fastio1((fastio_index) + NETIO_FASTIO_FREE_BUFFER, handle)
+
+/** Increment our receive credits.
+ * @param fastio_index Fast I/O index.
+ * @param credits Number of credits to add.
+ */
+#define __netio_fastio_return_credits(fastio_index, credits) \
+ __netio_fastio1((fastio_index) + NETIO_FASTIO_RETURN_CREDITS, credits)
+
+/** Send packet, no checksum.
+ * @param fastio_index Fast I/O index.
+ * @param ackflag Nonzero if we want an ack.
+ * @param size Size of the packet.
+ * @param va Virtual address of start of packet.
+ * @param handle Packet handle.
+ */
+#define __netio_fastio_send_pkt_nock(fastio_index, ackflag, size, va, handle) \
+ __netio_fastio4((fastio_index) + NETIO_FASTIO_SEND_PKT_NOCK, ackflag, \
+ size, va, handle)
+
+/** Send packet, calculate checksum.
+ * @param fastio_index Fast I/O index.
+ * @param ackflag Nonzero if we want an ack.
+ * @param size Size of the packet.
+ * @param va Virtual address of start of packet.
+ * @param handle Packet handle.
+ * @param csum0 Shim checksum header.
+ * @param csum1 Checksum seed.
+ */
+#define __netio_fastio_send_pkt_ck(fastio_index, ackflag, size, va, handle, \
+ csum0, csum1) \
+ __netio_fastio6((fastio_index) + NETIO_FASTIO_SEND_PKT_CK, ackflag, \
+ size, va, handle, csum0, csum1)
+
+
+/** Format for the "csum0" argument to the __netio_fastio_send routines
+ * and LEPP. Note that this is currently exactly identical to the
+ * ShimProtocolOffloadHeader.
+ */
+typedef union
+{
+ struct
+ {
+ unsigned int start_byte:7; /**< The first byte to be checksummed */
+ unsigned int count:14; /**< Number of bytes to be checksummed. */
+ unsigned int destination_byte:7; /**< The byte to write the checksum to. */
+ unsigned int reserved:4; /**< Reserved. */
+ } bits; /**< Decomposed method of access. */
+ unsigned int word; /**< To send out the IDN. */
+} __netio_checksum_header_t;
+
+
+/** Sendv packet with 1 or 2 segments.
+ * @param fastio_index Fast I/O index.
+ * @param flags Ack/csum/notify flags in low 3 bits; number of segments minus
+ * 1 in next 2 bits; expected checksum in high 16 bits.
+ * @param confno Confirmation number to request, if notify flag set.
+ * @param csum0 Checksum descriptor; if zero, no checksum.
+ * @param va_F Virtual address of first segment.
+ * @param va_L Virtual address of last segment, if 2 segments.
+ * @param len_F_L Length of first segment in low 16 bits; length of last
+ * segment, if 2 segments, in high 16 bits.
+ */
+#define __netio_fastio_sendv_pkt_1_2(fastio_index, flags, confno, csum0, \
+ va_F, va_L, len_F_L) \
+ __netio_fastio6((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \
+ csum0, va_F, va_L, len_F_L)
+
+/** Send packet on PCIe interface.
+ * @param fastio_index Fast I/O index.
+ * @param flags Ack/csum/notify flags in low 3 bits.
+ * @param confno Confirmation number to request, if notify flag set.
+ * @param csum0 Checksum descriptor; Hard wired 0, not needed for PCIe.
+ * @param va_F Virtual address of the packet buffer.
+ * @param va_L Virtual address of last segment, if 2 segments. Hard wired 0.
+ * @param len_F_L Length of the packet buffer in low 16 bits.
+ */
+#define __netio_fastio_send_pcie_pkt(fastio_index, flags, confno, csum0, \
+ va_F, va_L, len_F_L) \
+ __netio_fastio6((fastio_index) + PCIE_FASTIO_SENDV_PKT, flags, confno, \
+ csum0, va_F, va_L, len_F_L)
+
+/** Sendv packet with 3 or 4 segments.
+ * @param fastio_index Fast I/O index.
+ * @param flags Ack/csum/notify flags in low 3 bits; number of segments minus
+ * 1 in next 2 bits; expected checksum in high 16 bits.
+ * @param confno Confirmation number to request, if notify flag set.
+ * @param csum0 Checksum descriptor; if zero, no checksum.
+ * @param va_F Virtual address of first segment.
+ * @param va_L Virtual address of last segment (third segment if 3 segments,
+ * fourth segment if 4 segments).
+ * @param len_F_L Length of first segment in low 16 bits; length of last
+ * segment in high 16 bits.
+ * @param va_M0 Virtual address of "middle 0" segment; this segment is sent
+ * second when there are three segments, and third if there are four.
+ * @param va_M1 Virtual address of "middle 1" segment; this segment is sent
+ * second when there are four segments.
+ * @param len_M0_M1 Length of middle 0 segment in low 16 bits; length of middle
+ * 1 segment, if 4 segments, in high 16 bits.
+ */
+#define __netio_fastio_sendv_pkt_3_4(fastio_index, flags, confno, csum0, va_F, \
+ va_L, len_F_L, va_M0, va_M1, len_M0_M1) \
+ __netio_fastio9((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \
+ csum0, va_F, va_L, len_F_L, va_M0, va_M1, len_M0_M1)
+
+/** Send vector of packets.
+ * @param fastio_index Fast I/O index.
+ * @param seqno Number of packets transmitted so far on this interface;
+ * used to decide which packets should be acknowledged.
+ * @param nentries Number of entries in vector.
+ * @param va Virtual address of start of vector entry array.
+ * @return 3-word netio_fastio_rv3_t structure. The structure's err member
+ * is an error code, or zero if no error. The val0 member is the
+ * updated value of seqno; it has been incremented by 1 for each
+ * packet sent. That increment may be less than nentries if an
+ * error occured, or if some of the entries in the vector contain
+ * handles equal to NETIO_PKT_HANDLE_NONE. The val1 member is the
+ * updated value of nentries; it has been decremented by 1 for each
+ * vector entry processed. Again, that decrement may be less than
+ * nentries (leaving the returned value positive) if an error
+ * occurred.
+ */
+#define __netio_fastio_send_pkt_vec(fastio_index, seqno, nentries, va) \
+ __netio_fastio3_rv3((fastio_index) + NETIO_FASTIO_SEND_PKT_VEC, seqno, \
+ nentries, va)
+
+
+/** An egress DMA command for LEPP. */
+typedef struct
+{
+ /** Is this a TSO transfer?
+ *
+ * NOTE: This field is always 0, to distinguish it from
+ * lepp_tso_cmd_t. It must come first!
+ */
+ uint8_t tso : 1;
+
+ /** Unused padding bits. */
+ uint8_t _unused : 3;
+
+ /** Should this packet be sent directly from caches instead of DRAM,
+ * using hash-for-home to locate the packet data?
+ */
+ uint8_t hash_for_home : 1;
+
+ /** Should we compute a checksum? */
+ uint8_t compute_checksum : 1;
+
+ /** Is this the final buffer for this packet?
+ *
+ * A single packet can be split over several input buffers (a "gather"
+ * operation). This flag indicates that this is the last buffer
+ * in a packet.
+ */
+ uint8_t end_of_packet : 1;
+
+ /** Should LEPP advance 'comp_busy' when this DMA is fully finished? */
+ uint8_t send_completion : 1;
+
+ /** High bits of Client Physical Address of the start of the buffer
+ * to be egressed.
+ *
+ * NOTE: Only 6 bits are actually needed here, as CPAs are
+ * currently 38 bits. So two bits could be scavenged from this.
+ */
+ uint8_t cpa_hi;
+
+ /** The number of bytes to be egressed. */
+ uint16_t length;
+
+ /** Low 32 bits of Client Physical Address of the start of the buffer
+ * to be egressed.
+ */
+ uint32_t cpa_lo;
+
+ /** Checksum information (only used if 'compute_checksum'). */
+ __netio_checksum_header_t checksum_data;
+
+} lepp_cmd_t;
+
+
+/** A chunk of physical memory for a TSO egress. */
+typedef struct
+{
+ /** The low bits of the CPA. */
+ uint32_t cpa_lo;
+ /** The high bits of the CPA. */
+ uint16_t cpa_hi : 15;
+ /** Should this packet be sent directly from caches instead of DRAM,
+ * using hash-for-home to locate the packet data?
+ */
+ uint16_t hash_for_home : 1;
+ /** The length in bytes. */
+ uint16_t length;
+} lepp_frag_t;
+
+
+/** An LEPP command that handles TSO. */
+typedef struct
+{
+ /** Is this a TSO transfer?
+ *
+ * NOTE: This field is always 1, to distinguish it from
+ * lepp_cmd_t. It must come first!
+ */
+ uint8_t tso : 1;
+
+ /** Unused padding bits. */
+ uint8_t _unused : 7;
+
+ /** Size of the header[] array in bytes. It must be in the range
+ * [40, 127], which are the smallest header for a TCP packet over
+ * Ethernet and the maximum possible prepend size supported by
+ * hardware, respectively. Note that the array storage must be
+ * padded out to a multiple of four bytes so that the following
+ * LEPP command is aligned properly.
+ */
+ uint8_t header_size;
+
+ /** Byte offset of the IP header in header[]. */
+ uint8_t ip_offset;
+
+ /** Byte offset of the TCP header in header[]. */
+ uint8_t tcp_offset;
+
+ /** The number of bytes to use for the payload of each packet,
+ * except of course the last one, which may not have enough bytes.
+ * This means that each Ethernet packet except the last will have a
+ * size of header_size + payload_size.
+ */
+ uint16_t payload_size;
+
+ /** The length of the 'frags' array that follows this struct. */
+ uint16_t num_frags;
+
+ /** The actual frags. */
+ lepp_frag_t frags[0 /* Variable-sized; num_frags entries. */];
+
+ /*
+ * The packet header template logically follows frags[],
+ * but you can't declare that in C.
+ *
+ * uint32_t header[header_size_in_words_rounded_up];
+ */
+
+} lepp_tso_cmd_t;
+
+
+/** An LEPP completion ring entry. */
+typedef void* lepp_comp_t;
+
+
+/** Maximum number of frags for one TSO command. This is adapted from
+ * linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
+ * our page size of exactly 65536. We add one for a "body" fragment.
+ */
+#define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1)
+
+/** Total number of bytes needed for an lepp_tso_cmd_t. */
+#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
+ (sizeof(lepp_tso_cmd_t) + \
+ (num_frags) * sizeof(lepp_frag_t) + \
+ (((header_size) + 3) & -4))
+
+/** The size of the lepp "cmd" queue. */
+#define LEPP_CMD_QUEUE_BYTES \
+ (((CHIP_L2_CACHE_SIZE() - 2 * CHIP_L2_LINE_SIZE()) / \
+ (sizeof(lepp_cmd_t) + sizeof(lepp_comp_t))) * sizeof(lepp_cmd_t))
+
+/** The largest possible command that can go in lepp_queue_t::cmds[]. */
+#define LEPP_MAX_CMD_SIZE LEPP_TSO_CMD_SIZE(LEPP_MAX_FRAGS, 128)
+
+/** The largest possible value of lepp_queue_t::cmd_{head, tail} (inclusive).
+ */
+#define LEPP_CMD_LIMIT \
+ (LEPP_CMD_QUEUE_BYTES - LEPP_MAX_CMD_SIZE)
+
+/** The maximum number of completions in an LEPP queue. */
+#define LEPP_COMP_QUEUE_SIZE \
+ ((LEPP_CMD_LIMIT + sizeof(lepp_cmd_t) - 1) / sizeof(lepp_cmd_t))
+
+/** Increment an index modulo the queue size. */
+#define LEPP_QINC(var) \
+ (var = __insn_mnz(var - (LEPP_COMP_QUEUE_SIZE - 1), var + 1))
+
+/** A queue used to convey egress commands from the client to LEPP. */
+typedef struct
+{
+ /** Index of first completion not yet processed by user code.
+ * If this is equal to comp_busy, there are no such completions.
+ *
+ * NOTE: This is only read/written by the user.
+ */
+ unsigned int comp_head;
+
+ /** Index of first completion record not yet completed.
+ * If this is equal to comp_tail, there are no such completions.
+ * This index gets advanced (modulo LEPP_QUEUE_SIZE) whenever
+ * a command with the 'completion' bit set is finished.
+ *
+ * NOTE: This is only written by LEPP, only read by the user.
+ */
+ volatile unsigned int comp_busy;
+
+ /** Index of the first empty slot in the completion ring.
+ * Entries from this up to but not including comp_head (in ring order)
+ * can be filled in with completion data.
+ *
+ * NOTE: This is only read/written by the user.
+ */
+ unsigned int comp_tail;
+
+ /** Byte index of first command enqueued for LEPP but not yet processed.
+ *
+ * This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT.
+ *
+ * NOTE: LEPP advances this counter as soon as it no longer needs
+ * the cmds[] storage for this entry, but the transfer is not actually
+ * complete (i.e. the buffer pointed to by the command is no longer
+ * needed) until comp_busy advances.
+ *
+ * If this is equal to cmd_tail, the ring is empty.
+ *
+ * NOTE: This is only written by LEPP, only read by the user.
+ */
+ volatile unsigned int cmd_head;
+
+ /** Byte index of first empty slot in the command ring. This field can
+ * be incremented up to but not equal to cmd_head (because that would
+ * mean the ring is empty).
+ *
+ * This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT.
+ *
+ * NOTE: This is read/written by the user, only read by LEPP.
+ */
+ volatile unsigned int cmd_tail;
+
+ /** A ring of variable-sized egress DMA commands.
+ *
+ * NOTE: Only written by the user, only read by LEPP.
+ */
+ char cmds[LEPP_CMD_QUEUE_BYTES]
+ __attribute__((aligned(CHIP_L2_LINE_SIZE())));
+
+ /** A ring of user completion data.
+ * NOTE: Only read/written by the user.
+ */
+ lepp_comp_t comps[LEPP_COMP_QUEUE_SIZE]
+ __attribute__((aligned(CHIP_L2_LINE_SIZE())));
+} lepp_queue_t;
+
+
+/** An internal helper function for determining the number of entries
+ * available in a ring buffer, given that there is one sentinel.
+ */
+static inline unsigned int
+_lepp_num_free_slots(unsigned int head, unsigned int tail)
+{
+ /*
+ * One entry is reserved for use as a sentinel, to distinguish
+ * "empty" from "full". So we compute
+ * (head - tail - 1) % LEPP_QUEUE_SIZE, but without using a slow % operation.
+ */
+ return (head - tail - 1) + ((head <= tail) ? LEPP_COMP_QUEUE_SIZE : 0);
+}
+
+
+/** Returns how many new comp entries can be enqueued. */
+static inline unsigned int
+lepp_num_free_comp_slots(const lepp_queue_t* q)
+{
+ return _lepp_num_free_slots(q->comp_head, q->comp_tail);
+}
+
+static inline int
+lepp_qsub(int v1, int v2)
+{
+ int delta = v1 - v2;
+ return delta + ((delta >> 31) & LEPP_COMP_QUEUE_SIZE);
+}
+
+
+/** FIXME: Check this from linux, via a new "pwrite()" call. */
+#define LIPP_VERSION 1
+
+
+/** We use exactly two bytes of alignment padding. */
+#define LIPP_PACKET_PADDING 2
+
+/** The minimum size of a "small" buffer (including the padding). */
+#define LIPP_SMALL_PACKET_SIZE 128
+
+/*
+ * NOTE: The following two values should total to less than around
+ * 13582, to keep the total size used for "lipp_state_t" below 64K.
+ */
+
+/** The maximum number of "small" buffers.
+ * This is enough for 53 network cpus with 128 credits. Note that
+ * if these are exhausted, we will fall back to using large buffers.
+ */
+#define LIPP_SMALL_BUFFERS 6785
+
+/** The maximum number of "large" buffers.
+ * This is enough for 53 network cpus with 128 credits.
+ */
+#define LIPP_LARGE_BUFFERS 6785
+
+#endif /* __DRV_XGBE_INTF_H__ */
diff --git a/arch/tile/include/hv/netio_errors.h b/arch/tile/include/hv/netio_errors.h
new file mode 100644
index 000000000000..e1591bff61b5
--- /dev/null
+++ b/arch/tile/include/hv/netio_errors.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * Error codes returned from NetIO routines.
+ */
+
+#ifndef __NETIO_ERRORS_H__
+#define __NETIO_ERRORS_H__
+
+/**
+ * @addtogroup error
+ *
+ * @brief The error codes returned by NetIO functions.
+ *
+ * NetIO functions return 0 (defined as ::NETIO_NO_ERROR) on success, and
+ * a negative value if an error occurs.
+ *
+ * In cases where a NetIO function failed due to a error reported by
+ * system libraries, the error code will be the negation of the
+ * system errno at the time of failure. The @ref netio_strerror()
+ * function will deliver error strings for both NetIO and system error
+ * codes.
+ *
+ * @{
+ */
+
+/** The set of all NetIO errors. */
+typedef enum
+{
+ /** Operation successfully completed. */
+ NETIO_NO_ERROR = 0,
+
+ /** A packet was successfully retrieved from an input queue. */
+ NETIO_PKT = 0,
+
+ /** Largest NetIO error number. */
+ NETIO_ERR_MAX = -701,
+
+ /** The tile is not registered with the IPP. */
+ NETIO_NOT_REGISTERED = -701,
+
+ /** No packet was available to retrieve from the input queue. */
+ NETIO_NOPKT = -702,
+
+ /** The requested function is not implemented. */
+ NETIO_NOT_IMPLEMENTED = -703,
+
+ /** On a registration operation, the target queue already has the maximum
+ * number of tiles registered for it, and no more may be added. On a
+ * packet send operation, the output queue is full and nothing more can
+ * be queued until some of the queued packets are actually transmitted. */
+ NETIO_QUEUE_FULL = -704,
+
+ /** The calling process or thread is not bound to exactly one CPU. */
+ NETIO_BAD_AFFINITY = -705,
+
+ /** Cannot allocate memory on requested controllers. */
+ NETIO_CANNOT_HOME = -706,
+
+ /** On a registration operation, the IPP specified is not configured
+ * to support the options requested; for instance, the application
+ * wants a specific type of tagged headers which the configured IPP
+ * doesn't support. Or, the supplied configuration information is
+ * not self-consistent, or is out of range; for instance, specifying
+ * both NETIO_RECV and NETIO_NO_RECV, or asking for more than
+ * NETIO_MAX_SEND_BUFFERS to be preallocated. On a VLAN or bucket
+ * configure operation, the number of items, or the base item, was
+ * out of range.
+ */
+ NETIO_BAD_CONFIG = -707,
+
+ /** Too many tiles have registered to transmit packets. */
+ NETIO_TOOMANY_XMIT = -708,
+
+ /** Packet transmission was attempted on a queue which was registered
+ with transmit disabled. */
+ NETIO_UNREG_XMIT = -709,
+
+ /** This tile is already registered with the IPP. */
+ NETIO_ALREADY_REGISTERED = -710,
+
+ /** The Ethernet link is down. The application should try again later. */
+ NETIO_LINK_DOWN = -711,
+
+ /** An invalid memory buffer has been specified. This may be an unmapped
+ * virtual address, or one which does not meet alignment requirements.
+ * For netio_input_register(), this error may be returned when multiple
+ * processes specify different memory regions to be used for NetIO
+ * buffers. That can happen if these processes specify explicit memory
+ * regions with the ::NETIO_FIXED_BUFFER_VA flag, or if tmc_cmem_init()
+ * has not been called by a common ancestor of the processes.
+ */
+ NETIO_FAULT = -712,
+
+ /** Cannot combine user-managed shared memory and cache coherence. */
+ NETIO_BAD_CACHE_CONFIG = -713,
+
+ /** Smallest NetIO error number. */
+ NETIO_ERR_MIN = -713,
+
+#ifndef __DOXYGEN__
+ /** Used internally to mean that no response is needed; never returned to
+ * an application. */
+ NETIO_NO_RESPONSE = 1
+#endif
+} netio_error_t;
+
+/** @} */
+
+#endif /* __NETIO_ERRORS_H__ */
diff --git a/arch/tile/include/hv/netio_intf.h b/arch/tile/include/hv/netio_intf.h
new file mode 100644
index 000000000000..8d20972aba2c
--- /dev/null
+++ b/arch/tile/include/hv/netio_intf.h
@@ -0,0 +1,2975 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * NetIO interface structures and macros.
+ */
+
+#ifndef __NETIO_INTF_H__
+#define __NETIO_INTF_H__
+
+#include <hv/netio_errors.h>
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+#if !defined(__HV__) && !defined(__BOGUX__) && !defined(__KERNEL__)
+#include <assert.h>
+#define netio_assert assert /**< Enable assertions from macros */
+#else
+#define netio_assert(...) ((void)(0)) /**< Disable assertions from macros */
+#endif
+
+/*
+ * If none of these symbols are defined, we're building libnetio in an
+ * environment where we have pthreads, so we'll enable locking.
+ */
+#if !defined(__HV__) && !defined(__BOGUX__) && !defined(__KERNEL__) && \
+ !defined(__NEWLIB__)
+#define _NETIO_PTHREAD /**< Include a mutex in netio_queue_t below */
+
+/*
+ * If NETIO_UNLOCKED is defined, we don't do use per-cpu locks on
+ * per-packet NetIO operations. We still do pthread locking on things
+ * like netio_input_register, though. This is used for building
+ * libnetio_unlocked.
+ */
+#ifndef NETIO_UNLOCKED
+
+/* Avoid PLT overhead by using our own inlined per-cpu lock. */
+#include <sched.h>
+typedef int _netio_percpu_mutex_t;
+
+static __inline int
+_netio_percpu_mutex_init(_netio_percpu_mutex_t* lock)
+{
+ *lock = 0;
+ return 0;
+}
+
+static __inline int
+_netio_percpu_mutex_lock(_netio_percpu_mutex_t* lock)
+{
+ while (__builtin_expect(__insn_tns(lock), 0))
+ sched_yield();
+ return 0;
+}
+
+static __inline int
+_netio_percpu_mutex_unlock(_netio_percpu_mutex_t* lock)
+{
+ *lock = 0;
+ return 0;
+}
+
+#else /* NETIO_UNLOCKED */
+
+/* Don't do any locking for per-packet NetIO operations. */
+typedef int _netio_percpu_mutex_t;
+#define _netio_percpu_mutex_init(L)
+#define _netio_percpu_mutex_lock(L)
+#define _netio_percpu_mutex_unlock(L)
+
+#endif /* NETIO_UNLOCKED */
+#endif /* !__HV__, !__BOGUX, !__KERNEL__, !__NEWLIB__ */
+
+/** How many tiles can register for a given queue.
+ * @ingroup setup */
+#define NETIO_MAX_TILES_PER_QUEUE 64
+
+
+/** Largest permissible queue identifier.
+ * @ingroup setup */
+#define NETIO_MAX_QUEUE_ID 255
+
+
+#ifndef __DOXYGEN__
+
+/* Metadata packet checksum/ethertype flags. */
+
+/** The L4 checksum has not been calculated. */
+#define _NETIO_PKT_NO_L4_CSUM_SHIFT 0
+#define _NETIO_PKT_NO_L4_CSUM_RMASK 1
+#define _NETIO_PKT_NO_L4_CSUM_MASK \
+ (_NETIO_PKT_NO_L4_CSUM_RMASK << _NETIO_PKT_NO_L4_CSUM_SHIFT)
+
+/** The L3 checksum has not been calculated. */
+#define _NETIO_PKT_NO_L3_CSUM_SHIFT 1
+#define _NETIO_PKT_NO_L3_CSUM_RMASK 1
+#define _NETIO_PKT_NO_L3_CSUM_MASK \
+ (_NETIO_PKT_NO_L3_CSUM_RMASK << _NETIO_PKT_NO_L3_CSUM_SHIFT)
+
+/** The L3 checksum is incorrect (or perhaps has not been calculated). */
+#define _NETIO_PKT_BAD_L3_CSUM_SHIFT 2
+#define _NETIO_PKT_BAD_L3_CSUM_RMASK 1
+#define _NETIO_PKT_BAD_L3_CSUM_MASK \
+ (_NETIO_PKT_BAD_L3_CSUM_RMASK << _NETIO_PKT_BAD_L3_CSUM_SHIFT)
+
+/** The Ethernet packet type is unrecognized. */
+#define _NETIO_PKT_TYPE_UNRECOGNIZED_SHIFT 3
+#define _NETIO_PKT_TYPE_UNRECOGNIZED_RMASK 1
+#define _NETIO_PKT_TYPE_UNRECOGNIZED_MASK \
+ (_NETIO_PKT_TYPE_UNRECOGNIZED_RMASK << \
+ _NETIO_PKT_TYPE_UNRECOGNIZED_SHIFT)
+
+/* Metadata packet type flags. */
+
+/** Where the packet type bits are; this field is the index into
+ * _netio_pkt_info. */
+#define _NETIO_PKT_TYPE_SHIFT 4
+#define _NETIO_PKT_TYPE_RMASK 0x3F
+
+/** How many VLAN tags the packet has, and, if we have two, which one we
+ * actually grouped on. A VLAN within a proprietary (Marvell or Broadcom)
+ * tag is counted here. */
+#define _NETIO_PKT_VLAN_SHIFT 4
+#define _NETIO_PKT_VLAN_RMASK 0x3
+#define _NETIO_PKT_VLAN_MASK \
+ (_NETIO_PKT_VLAN_RMASK << _NETIO_PKT_VLAN_SHIFT)
+#define _NETIO_PKT_VLAN_NONE 0 /* No VLAN tag. */
+#define _NETIO_PKT_VLAN_ONE 1 /* One VLAN tag. */
+#define _NETIO_PKT_VLAN_TWO_OUTER 2 /* Two VLAN tags, outer one used. */
+#define _NETIO_PKT_VLAN_TWO_INNER 3 /* Two VLAN tags, inner one used. */
+
+/** Which proprietary tags the packet has. */
+#define _NETIO_PKT_TAG_SHIFT 6
+#define _NETIO_PKT_TAG_RMASK 0x3
+#define _NETIO_PKT_TAG_MASK \
+ (_NETIO_PKT_TAG_RMASK << _NETIO_PKT_TAG_SHIFT)
+#define _NETIO_PKT_TAG_NONE 0 /* No proprietary tags. */
+#define _NETIO_PKT_TAG_MRVL 1 /* Marvell HyperG.Stack tags. */
+#define _NETIO_PKT_TAG_MRVL_EXT 2 /* HyperG.Stack extended tags. */
+#define _NETIO_PKT_TAG_BRCM 3 /* Broadcom HiGig tags. */
+
+/** Whether a packet has an LLC + SNAP header. */
+#define _NETIO_PKT_SNAP_SHIFT 8
+#define _NETIO_PKT_SNAP_RMASK 0x1
+#define _NETIO_PKT_SNAP_MASK \
+ (_NETIO_PKT_SNAP_RMASK << _NETIO_PKT_SNAP_SHIFT)
+
+/* NOTE: Bits 9 and 10 are unused. */
+
+/** Length of any custom data before the L2 header, in words. */
+#define _NETIO_PKT_CUSTOM_LEN_SHIFT 11
+#define _NETIO_PKT_CUSTOM_LEN_RMASK 0x1F
+#define _NETIO_PKT_CUSTOM_LEN_MASK \
+ (_NETIO_PKT_CUSTOM_LEN_RMASK << _NETIO_PKT_CUSTOM_LEN_SHIFT)
+
+/** The L4 checksum is incorrect (or perhaps has not been calculated). */
+#define _NETIO_PKT_BAD_L4_CSUM_SHIFT 16
+#define _NETIO_PKT_BAD_L4_CSUM_RMASK 0x1
+#define _NETIO_PKT_BAD_L4_CSUM_MASK \
+ (_NETIO_PKT_BAD_L4_CSUM_RMASK << _NETIO_PKT_BAD_L4_CSUM_SHIFT)
+
+/** Length of the L2 header, in words. */
+#define _NETIO_PKT_L2_LEN_SHIFT 17
+#define _NETIO_PKT_L2_LEN_RMASK 0x1F
+#define _NETIO_PKT_L2_LEN_MASK \
+ (_NETIO_PKT_L2_LEN_RMASK << _NETIO_PKT_L2_LEN_SHIFT)
+
+
+/* Flags in minimal packet metadata. */
+
+/** We need an eDMA checksum on this packet. */
+#define _NETIO_PKT_NEED_EDMA_CSUM_SHIFT 0
+#define _NETIO_PKT_NEED_EDMA_CSUM_RMASK 1
+#define _NETIO_PKT_NEED_EDMA_CSUM_MASK \
+ (_NETIO_PKT_NEED_EDMA_CSUM_RMASK << _NETIO_PKT_NEED_EDMA_CSUM_SHIFT)
+
+/* Data within the packet information table. */
+
+/* Note that, for efficiency, code which uses these fields assumes that none
+ * of the shift values below are zero. See uses below for an explanation. */
+
+/** Offset within the L2 header of the innermost ethertype (in halfwords). */
+#define _NETIO_PKT_INFO_ETYPE_SHIFT 6
+#define _NETIO_PKT_INFO_ETYPE_RMASK 0x1F
+
+/** Offset within the L2 header of the VLAN tag (in halfwords). */
+#define _NETIO_PKT_INFO_VLAN_SHIFT 11
+#define _NETIO_PKT_INFO_VLAN_RMASK 0x1F
+
+#endif
+
+
+/** The size of a memory buffer representing a small packet.
+ * @ingroup egress */
+#define SMALL_PACKET_SIZE 256
+
+/** The size of a memory buffer representing a large packet.
+ * @ingroup egress */
+#define LARGE_PACKET_SIZE 2048
+
+/** The size of a memory buffer representing a jumbo packet.
+ * @ingroup egress */
+#define JUMBO_PACKET_SIZE (12 * 1024)
+
+
+/* Common ethertypes.
+ * @ingroup ingress */
+/** @{ */
+/** The ethertype of IPv4. */
+#define ETHERTYPE_IPv4 (0x0800)
+/** The ethertype of ARP. */
+#define ETHERTYPE_ARP (0x0806)
+/** The ethertype of VLANs. */
+#define ETHERTYPE_VLAN (0x8100)
+/** The ethertype of a Q-in-Q header. */
+#define ETHERTYPE_Q_IN_Q (0x9100)
+/** The ethertype of IPv6. */
+#define ETHERTYPE_IPv6 (0x86DD)
+/** The ethertype of MPLS. */
+#define ETHERTYPE_MPLS (0x8847)
+/** @} */
+
+
+/** The possible return values of NETIO_PKT_STATUS.
+ * @ingroup ingress
+ */
+typedef enum
+{
+ /** No problems were detected with this packet. */
+ NETIO_PKT_STATUS_OK,
+ /** The packet is undersized; this is expected behavior if the packet's
+ * ethertype is unrecognized, but otherwise the packet is likely corrupt. */
+ NETIO_PKT_STATUS_UNDERSIZE,
+ /** The packet is oversized and some trailing bytes have been discarded.
+ This is expected behavior for short packets, since it's impossible to
+ precisely determine the amount of padding which may have been added to
+ them to make them meet the minimum Ethernet packet size. */
+ NETIO_PKT_STATUS_OVERSIZE,
+ /** The packet was judged to be corrupt by hardware (for instance, it had
+ a bad CRC, or part of it was discarded due to lack of buffer space in
+ the I/O shim) and should be discarded. */
+ NETIO_PKT_STATUS_BAD
+} netio_pkt_status_t;
+
+
+/** Log2 of how many buckets we have. */
+#define NETIO_LOG2_NUM_BUCKETS (10)
+
+/** How many buckets we have.
+ * @ingroup ingress */
+#define NETIO_NUM_BUCKETS (1 << NETIO_LOG2_NUM_BUCKETS)
+
+
+/**
+ * @brief A group-to-bucket identifier.
+ *
+ * @ingroup setup
+ *
+ * This tells us what to do with a given group.
+ */
+typedef union {
+ /** The header broken down into bits. */
+ struct {
+ /** Whether we should balance on L4, if available */
+ unsigned int __balance_on_l4:1;
+ /** Whether we should balance on L3, if available */
+ unsigned int __balance_on_l3:1;
+ /** Whether we should balance on L2, if available */
+ unsigned int __balance_on_l2:1;
+ /** Reserved for future use */
+ unsigned int __reserved:1;
+ /** The base bucket to use to send traffic */
+ unsigned int __bucket_base:NETIO_LOG2_NUM_BUCKETS;
+ /** The mask to apply to the balancing value. This must be one less
+ * than a power of two, e.g. 0x3 or 0xFF.
+ */
+ unsigned int __bucket_mask:NETIO_LOG2_NUM_BUCKETS;
+ /** Pad to 32 bits */
+ unsigned int __padding:(32 - 4 - 2 * NETIO_LOG2_NUM_BUCKETS);
+ } bits;
+ /** To send out the IDN. */
+ unsigned int word;
+}
+netio_group_t;
+
+
+/**
+ * @brief A VLAN-to-bucket identifier.
+ *
+ * @ingroup setup
+ *
+ * This tells us what to do with a given VLAN.
+ */
+typedef netio_group_t netio_vlan_t;
+
+
+/**
+ * A bucket-to-queue mapping.
+ * @ingroup setup
+ */
+typedef unsigned char netio_bucket_t;
+
+
+/**
+ * A packet size can always fit in a netio_size_t.
+ * @ingroup setup
+ */
+typedef unsigned int netio_size_t;
+
+
+/**
+ * @brief Ethernet standard (ingress) packet metadata.
+ *
+ * @ingroup ingress
+ *
+ * This is additional data associated with each packet.
+ * This structure is opaque and accessed through the @ref ingress.
+ *
+ * Also, the buffer population operation currently assumes that standard
+ * metadata is at least as large as minimal metadata, and will need to be
+ * modified if that is no longer the case.
+ */
+typedef struct
+{
+#ifdef __DOXYGEN__
+ /** This structure is opaque. */
+ unsigned char opaque[24];
+#else
+ /** The overall ordinal of the packet */
+ unsigned int __packet_ordinal;
+ /** The ordinal of the packet within the group */
+ unsigned int __group_ordinal;
+ /** The best flow hash IPP could compute. */
+ unsigned int __flow_hash;
+ /** Flags pertaining to checksum calculation, packet type, etc. */
+ unsigned int __flags;
+ /** The first word of "user data". */
+ unsigned int __user_data_0;
+ /** The second word of "user data". */
+ unsigned int __user_data_1;
+#endif
+}
+netio_pkt_metadata_t;
+
+
+/** To ensure that the L3 header is aligned mod 4, the L2 header should be
+ * aligned mod 4 plus 2, since every supported L2 header is 4n + 2 bytes
+ * long. The standard way to do this is to simply add 2 bytes of padding
+ * before the L2 header.
+ */
+#define NETIO_PACKET_PADDING 2
+
+
+
+/**
+ * @brief Ethernet minimal (egress) packet metadata.
+ *
+ * @ingroup egress
+ *
+ * This structure represents information about packets which have
+ * been processed by @ref netio_populate_buffer() or
+ * @ref netio_populate_prepend_buffer(). This structure is opaque
+ * and accessed through the @ref egress.
+ *
+ * @internal This structure is actually copied into the memory used by
+ * standard metadata, which is assumed to be large enough.
+ */
+typedef struct
+{
+#ifdef __DOXYGEN__
+ /** This structure is opaque. */
+ unsigned char opaque[14];
+#else
+ /** The offset of the L2 header from the start of the packet data. */
+ unsigned short l2_offset;
+ /** The offset of the L3 header from the start of the packet data. */
+ unsigned short l3_offset;
+ /** Where to write the checksum. */
+ unsigned char csum_location;
+ /** Where to start checksumming from. */
+ unsigned char csum_start;
+ /** Flags pertaining to checksum calculation etc. */
+ unsigned short flags;
+ /** The L2 length of the packet. */
+ unsigned short l2_length;
+ /** The checksum with which to seed the checksum generator. */
+ unsigned short csum_seed;
+ /** How much to checksum. */
+ unsigned short csum_length;
+#endif
+}
+netio_pkt_minimal_metadata_t;
+
+
+#ifndef __DOXYGEN__
+
+/**
+ * @brief An I/O notification header.
+ *
+ * This is the first word of data received from an I/O shim in a notification
+ * packet. It contains framing and status information.
+ */
+typedef union
+{
+ unsigned int word; /**< The whole word. */
+ /** The various fields. */
+ struct
+ {
+ unsigned int __channel:7; /**< Resource channel. */
+ unsigned int __type:4; /**< Type. */
+ unsigned int __ack:1; /**< Whether an acknowledgement is needed. */
+ unsigned int __reserved:1; /**< Reserved. */
+ unsigned int __protocol:1; /**< A protocol-specific word is added. */
+ unsigned int __status:2; /**< Status of the transfer. */
+ unsigned int __framing:2; /**< Framing of the transfer. */
+ unsigned int __transfer_size:14; /**< Transfer size in bytes (total). */
+ } bits;
+}
+__netio_pkt_notif_t;
+
+
+/**
+ * Returns the base address of the packet.
+ */
+#define _NETIO_PKT_HANDLE_BASE(p) \
+ ((unsigned char*)((p).word & 0xFFFFFFC0))
+
+/**
+ * Returns the base address of the packet.
+ */
+#define _NETIO_PKT_BASE(p) \
+ _NETIO_PKT_HANDLE_BASE(p->__packet)
+
+/**
+ * @brief An I/O notification packet (second word)
+ *
+ * This is the second word of data received from an I/O shim in a notification
+ * packet. This is the virtual address of the packet buffer, plus some flag
+ * bits. (The virtual address of the packet is always 256-byte aligned so we
+ * have room for 8 bits' worth of flags in the low 8 bits.)
+ *
+ * @internal
+ * NOTE: The low two bits must contain "__queue", so the "packet size"
+ * (SIZE_SMALL, SIZE_LARGE, or SIZE_JUMBO) can be determined quickly.
+ *
+ * If __addr or __offset are moved, _NETIO_PKT_BASE
+ * (defined right below this) must be changed.
+ */
+typedef union
+{
+ unsigned int word; /**< The whole word. */
+ /** The various fields. */
+ struct
+ {
+ /** Which queue the packet will be returned to once it is sent back to
+ the IPP. This is one of the SIZE_xxx values. */
+ unsigned int __queue:2;
+
+ /** The IPP handle of the sending IPP. */
+ unsigned int __ipp_handle:2;
+
+ /** Reserved for future use. */
+ unsigned int __reserved:1;
+
+ /** If 1, this packet has minimal (egress) metadata; otherwise, it
+ has standard (ingress) metadata. */
+ unsigned int __minimal:1;
+
+ /** Offset of the metadata within the packet. This value is multiplied
+ * by 64 and added to the base packet address to get the metadata
+ * address. Note that this field is aligned within the word such that
+ * you can easily extract the metadata address with a 26-bit mask. */
+ unsigned int __offset:2;
+
+ /** The top 24 bits of the packet's virtual address. */
+ unsigned int __addr:24;
+ } bits;
+}
+__netio_pkt_handle_t;
+
+#endif /* !__DOXYGEN__ */
+
+
+/**
+ * @brief A handle for an I/O packet's storage.
+ * @ingroup ingress
+ *
+ * netio_pkt_handle_t encodes the concept of a ::netio_pkt_t with its
+ * packet metadata removed. It is a much smaller type that exists to
+ * facilitate applications where the full ::netio_pkt_t type is too
+ * large, such as those that cache enormous numbers of packets or wish
+ * to transmit packet descriptors over the UDN.
+ *
+ * Because there is no metadata, most ::netio_pkt_t operations cannot be
+ * performed on a netio_pkt_handle_t. It supports only
+ * netio_free_handle() (to free the buffer) and
+ * NETIO_PKT_CUSTOM_DATA_H() (to access a pointer to its contents).
+ * The application must acquire any additional metadata it wants from the
+ * original ::netio_pkt_t and record it separately.
+ *
+ * A netio_pkt_handle_t can be extracted from a ::netio_pkt_t by calling
+ * NETIO_PKT_HANDLE(). An invalid handle (analogous to NULL) can be
+ * created by assigning the value ::NETIO_PKT_HANDLE_NONE. A handle can
+ * be tested for validity with NETIO_PKT_HANDLE_IS_VALID().
+ */
+typedef struct
+{
+ unsigned int word; /**< Opaque bits. */
+} netio_pkt_handle_t;
+
+/**
+ * @brief A packet descriptor.
+ *
+ * @ingroup ingress
+ * @ingroup egress
+ *
+ * This data structure represents a packet. The structure is manipulated
+ * through the @ref ingress and the @ref egress.
+ *
+ * While the contents of a netio_pkt_t are opaque, the structure itself is
+ * portable. This means that it may be shared between all tiles which have
+ * done a netio_input_register() call for the interface on which the pkt_t
+ * was initially received (via netio_get_packet()) or retrieved (via
+ * netio_get_buffer()). The contents of a netio_pkt_t can be transmitted to
+ * another tile via shared memory, or via a UDN message, or by other means.
+ * The destination tile may then use the pkt_t as if it had originally been
+ * received locally; it may read or write the packet's data, read its
+ * metadata, free the packet, send the packet, transfer the netio_pkt_t to
+ * yet another tile, and so forth.
+ *
+ * Once a netio_pkt_t has been transferred to a second tile, the first tile
+ * should not reference the original copy; in particular, if more than one
+ * tile frees or sends the same netio_pkt_t, the IPP's packet free lists will
+ * become corrupted. Note also that each tile which reads or modifies
+ * packet data must obey the memory coherency rules outlined in @ref input.
+ */
+typedef struct
+{
+#ifdef __DOXYGEN__
+ /** This structure is opaque. */
+ unsigned char opaque[32];
+#else
+ /** For an ingress packet (one with standard metadata), this is the
+ * notification header we got from the I/O shim. For an egress packet
+ * (one with minimal metadata), this word is zero if the packet has not
+ * been populated, and nonzero if it has. */
+ __netio_pkt_notif_t __notif_header;
+
+ /** Virtual address of the packet buffer, plus state flags. */
+ __netio_pkt_handle_t __packet;
+
+ /** Metadata associated with the packet. */
+ netio_pkt_metadata_t __metadata;
+#endif
+}
+netio_pkt_t;
+
+
+#ifndef __DOXYGEN__
+
+#define __NETIO_PKT_NOTIF_HEADER(pkt) ((pkt)->__notif_header)
+#define __NETIO_PKT_IPP_HANDLE(pkt) ((pkt)->__packet.bits.__ipp_handle)
+#define __NETIO_PKT_QUEUE(pkt) ((pkt)->__packet.bits.__queue)
+#define __NETIO_PKT_NOTIF_HEADER_M(mda, pkt) ((pkt)->__notif_header)
+#define __NETIO_PKT_IPP_HANDLE_M(mda, pkt) ((pkt)->__packet.bits.__ipp_handle)
+#define __NETIO_PKT_MINIMAL(pkt) ((pkt)->__packet.bits.__minimal)
+#define __NETIO_PKT_QUEUE_M(mda, pkt) ((pkt)->__packet.bits.__queue)
+#define __NETIO_PKT_FLAGS_M(mda, pkt) ((mda)->__flags)
+
+/* Packet information table, used by the attribute access functions below. */
+extern const uint16_t _netio_pkt_info[];
+
+#endif /* __DOXYGEN__ */
+
+
+#ifndef __DOXYGEN__
+/* These macros are deprecated and will disappear in a future MDE release. */
+#define NETIO_PKT_GOOD_CHECKSUM(pkt) \
+ NETIO_PKT_L4_CSUM_CORRECT(pkt)
+#define NETIO_PKT_GOOD_CHECKSUM_M(mda, pkt) \
+ NETIO_PKT_L4_CSUM_CORRECT_M(mda, pkt)
+#endif /* __DOXYGEN__ */
+
+
+/* Packet attribute access functions. */
+
+/** Return a pointer to the metadata for a packet.
+ * @ingroup ingress
+ *
+ * Calling this function once and passing the result to other retrieval
+ * functions with a "_M" suffix usually improves performance. This
+ * function must be called on an 'ingress' packet (i.e. one retrieved
+ * by @ref netio_get_packet(), on which @ref netio_populate_buffer() or
+ * @ref netio_populate_prepend_buffer have not been called). Use of this
+ * function on an 'egress' packet will cause an assertion failure.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to the packet's standard metadata.
+ */
+static __inline netio_pkt_metadata_t*
+NETIO_PKT_METADATA(netio_pkt_t* pkt)
+{
+ netio_assert(!pkt->__packet.bits.__minimal);
+ return &pkt->__metadata;
+}
+
+
+/** Return a pointer to the minimal metadata for a packet.
+ * @ingroup egress
+ *
+ * Calling this function once and passing the result to other retrieval
+ * functions with a "_MM" suffix usually improves performance. This
+ * function must be called on an 'egress' packet (i.e. one on which
+ * @ref netio_populate_buffer() or @ref netio_populate_prepend_buffer()
+ * have been called, or one retrieved by @ref netio_get_buffer()). Use of
+ * this function on an 'ingress' packet will cause an assertion failure.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to the packet's standard metadata.
+ */
+static __inline netio_pkt_minimal_metadata_t*
+NETIO_PKT_MINIMAL_METADATA(netio_pkt_t* pkt)
+{
+ netio_assert(pkt->__packet.bits.__minimal);
+ return (netio_pkt_minimal_metadata_t*) &pkt->__metadata;
+}
+
+
+/** Determine whether a packet has 'minimal' metadata.
+ * @ingroup pktfuncs
+ *
+ * This function will return nonzero if the packet is an 'egress'
+ * packet (i.e. one on which @ref netio_populate_buffer() or
+ * @ref netio_populate_prepend_buffer() have been called, or one
+ * retrieved by @ref netio_get_buffer()), and zero if the packet
+ * is an 'ingress' packet (i.e. one retrieved by @ref netio_get_packet(),
+ * which has not been converted into an 'egress' packet).
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the packet has minimal metadata.
+ */
+static __inline unsigned int
+NETIO_PKT_IS_MINIMAL(netio_pkt_t* pkt)
+{
+ return pkt->__packet.bits.__minimal;
+}
+
+
+/** Return a handle for a packet's storage.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return A handle for the packet's storage.
+ */
+static __inline netio_pkt_handle_t
+NETIO_PKT_HANDLE(netio_pkt_t* pkt)
+{
+ netio_pkt_handle_t h;
+ h.word = pkt->__packet.word;
+ return h;
+}
+
+
+/** A special reserved value indicating the absence of a packet handle.
+ *
+ * @ingroup pktfuncs
+ */
+#define NETIO_PKT_HANDLE_NONE ((netio_pkt_handle_t) { 0 })
+
+
+/** Test whether a packet handle is valid.
+ *
+ * Applications may wish to use the reserved value NETIO_PKT_HANDLE_NONE
+ * to indicate no packet at all. This function tests to see if a packet
+ * handle is a real handle, not this special reserved value.
+ *
+ * @ingroup pktfuncs
+ *
+ * @param[in] handle Handle on which to operate.
+ * @return One if the packet handle is valid, else zero.
+ */
+static __inline unsigned int
+NETIO_PKT_HANDLE_IS_VALID(netio_pkt_handle_t handle)
+{
+ return handle.word != 0;
+}
+
+
+
+/** Return a pointer to the start of the packet's custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ * @ingroup ingress
+ *
+ * @param[in] handle Handle on which to operate.
+ * @return A pointer to start of the packet.
+ */
+static __inline unsigned char*
+NETIO_PKT_CUSTOM_DATA_H(netio_pkt_handle_t handle)
+{
+ return _NETIO_PKT_HANDLE_BASE(handle) + NETIO_PACKET_PADDING;
+}
+
+
+/** Return the length of the packet's custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ *
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet's custom header, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_CUSTOM_HEADER_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ /*
+ * Note that we effectively need to extract a quantity from the flags word
+ * which is measured in words, and then turn it into bytes by shifting
+ * it left by 2. We do this all at once by just shifting right two less
+ * bits, and shifting the mask up two bits.
+ */
+ return ((mda->__flags >> (_NETIO_PKT_CUSTOM_LEN_SHIFT - 2)) &
+ (_NETIO_PKT_CUSTOM_LEN_RMASK << 2));
+}
+
+
+/** Return the length of the packet, starting with the custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_CUSTOM_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (__NETIO_PKT_NOTIF_HEADER(pkt).bits.__transfer_size -
+ NETIO_PACKET_PADDING);
+}
+
+
+/** Return a pointer to the start of the packet's custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to start of the packet.
+ */
+static __inline unsigned char*
+NETIO_PKT_CUSTOM_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return NETIO_PKT_CUSTOM_DATA_H(NETIO_PKT_HANDLE(pkt));
+}
+
+
+/** Return the length of the packet's L2 (Ethernet plus VLAN or SNAP) header.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet's L2 header, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L2_HEADER_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ /*
+ * Note that we effectively need to extract a quantity from the flags word
+ * which is measured in words, and then turn it into bytes by shifting
+ * it left by 2. We do this all at once by just shifting right two less
+ * bits, and shifting the mask up two bits. We then add two bytes.
+ */
+ return ((mda->__flags >> (_NETIO_PKT_L2_LEN_SHIFT - 2)) &
+ (_NETIO_PKT_L2_LEN_RMASK << 2)) + 2;
+}
+
+
+/** Return the length of the packet, starting with the L2 (Ethernet) header.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L2_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (NETIO_PKT_CUSTOM_LENGTH_M(mda, pkt) -
+ NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda,pkt));
+}
+
+
+/** Return a pointer to the start of the packet's L2 (Ethernet) header.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to start of the packet.
+ */
+static __inline unsigned char*
+NETIO_PKT_L2_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (NETIO_PKT_CUSTOM_DATA_M(mda, pkt) +
+ NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt));
+}
+
+
+/** Retrieve the length of the packet, starting with the L3 (generally,
+ * the IP) header.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Length of the packet's L3 header and data, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L3_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (NETIO_PKT_L2_LENGTH_M(mda, pkt) -
+ NETIO_PKT_L2_HEADER_LENGTH_M(mda,pkt));
+}
+
+
+/** Return a pointer to the packet's L3 (generally, the IP) header.
+ * @ingroup ingress
+ *
+ * Note that we guarantee word alignment of the L3 header.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to the packet's L3 header.
+ */
+static __inline unsigned char*
+NETIO_PKT_L3_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (NETIO_PKT_L2_DATA_M(mda, pkt) +
+ NETIO_PKT_L2_HEADER_LENGTH_M(mda, pkt));
+}
+
+
+/** Return the ordinal of the packet.
+ * @ingroup ingress
+ *
+ * Each packet is given an ordinal number when it is delivered by the IPP.
+ * In the medium term, the ordinal is unique and monotonically increasing,
+ * being incremented by 1 for each packet; the ordinal of the first packet
+ * delivered after the IPP starts is zero. (Since the ordinal is of finite
+ * size, given enough input packets, it will eventually wrap around to zero;
+ * in the long term, therefore, ordinals are not unique.) The ordinals
+ * handed out by different IPPs are not disjoint, so two packets from
+ * different IPPs may have identical ordinals. Packets dropped by the
+ * IPP or by the I/O shim are not assigned ordinals.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's per-IPP packet ordinal.
+ */
+static __inline unsigned int
+NETIO_PKT_ORDINAL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return mda->__packet_ordinal;
+}
+
+
+/** Return the per-group ordinal of the packet.
+ * @ingroup ingress
+ *
+ * Each packet is given a per-group ordinal number when it is
+ * delivered by the IPP. By default, the group is the packet's VLAN,
+ * although IPP can be recompiled to use different values. In
+ * the medium term, the ordinal is unique and monotonically
+ * increasing, being incremented by 1 for each packet; the ordinal of
+ * the first packet distributed to a particular group is zero.
+ * (Since the ordinal is of finite size, given enough input packets,
+ * it will eventually wrap around to zero; in the long term,
+ * therefore, ordinals are not unique.) The ordinals handed out by
+ * different IPPs are not disjoint, so two packets from different IPPs
+ * may have identical ordinals; similarly, packets distributed to
+ * different groups may have identical ordinals. Packets dropped by
+ * the IPP or by the I/O shim are not assigned ordinals.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's per-IPP, per-group ordinal.
+ */
+static __inline unsigned int
+NETIO_PKT_GROUP_ORDINAL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return mda->__group_ordinal;
+}
+
+
+/** Return the VLAN ID assigned to the packet.
+ * @ingroup ingress
+ *
+ * This value is usually contained within the packet header.
+ *
+ * This value will be zero if the packet does not have a VLAN tag, or if
+ * this value was not extracted from the packet.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's VLAN ID.
+ */
+static __inline unsigned short
+NETIO_PKT_VLAN_ID_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ int vl = (mda->__flags >> _NETIO_PKT_VLAN_SHIFT) & _NETIO_PKT_VLAN_RMASK;
+ unsigned short* pkt_p;
+ int index;
+ unsigned short val;
+
+ if (vl == _NETIO_PKT_VLAN_NONE)
+ return 0;
+
+ pkt_p = (unsigned short*) NETIO_PKT_L2_DATA_M(mda, pkt);
+ index = (mda->__flags >> _NETIO_PKT_TYPE_SHIFT) & _NETIO_PKT_TYPE_RMASK;
+
+ val = pkt_p[(_netio_pkt_info[index] >> _NETIO_PKT_INFO_VLAN_SHIFT) &
+ _NETIO_PKT_INFO_VLAN_RMASK];
+
+#ifdef __TILECC__
+ return (__insn_bytex(val) >> 16) & 0xFFF;
+#else
+ return (__builtin_bswap32(val) >> 16) & 0xFFF;
+#endif
+}
+
+
+/** Return the ethertype of the packet.
+ * @ingroup ingress
+ *
+ * This value is usually contained within the packet header.
+ *
+ * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED_M()
+ * returns true, and otherwise, may not be well defined.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's ethertype.
+ */
+static __inline unsigned short
+NETIO_PKT_ETHERTYPE_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ unsigned short* pkt_p = (unsigned short*) NETIO_PKT_L2_DATA_M(mda, pkt);
+ int index = (mda->__flags >> _NETIO_PKT_TYPE_SHIFT) & _NETIO_PKT_TYPE_RMASK;
+
+ unsigned short val =
+ pkt_p[(_netio_pkt_info[index] >> _NETIO_PKT_INFO_ETYPE_SHIFT) &
+ _NETIO_PKT_INFO_ETYPE_RMASK];
+
+ return __builtin_bswap32(val) >> 16;
+}
+
+
+/** Return the flow hash computed on the packet.
+ * @ingroup ingress
+ *
+ * For TCP and UDP packets, this hash is calculated by hashing together
+ * the "5-tuple" values, specifically the source IP address, destination
+ * IP address, protocol type, source port and destination port.
+ * The hash value is intended to be helpful for millions of distinct
+ * flows.
+ *
+ * For IPv4 or IPv6 packets which are neither TCP nor UDP, the flow hash is
+ * derived by hashing together the source and destination IP addresses.
+ *
+ * For MPLS-encapsulated packets, the flow hash is derived by hashing
+ * the first MPLS label.
+ *
+ * For all other packets the flow hash is computed from the source
+ * and destination Ethernet addresses.
+ *
+ * The hash is symmetric, meaning it produces the same value if the
+ * source and destination are swapped. The only exceptions are
+ * tunneling protocols 0x04 (IP in IP Encapsulation), 0x29 (Simple
+ * Internet Protocol), 0x2F (General Routing Encapsulation) and 0x32
+ * (Encap Security Payload), which use only the destination address
+ * since the source address is not meaningful.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's 32-bit flow hash.
+ */
+static __inline unsigned int
+NETIO_PKT_FLOW_HASH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return mda->__flow_hash;
+}
+
+
+/** Return the first word of "user data" for the packet.
+ *
+ * The contents of the user data words depend on the IPP.
+ *
+ * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the first
+ * word of user data contains the least significant bits of the 64-bit
+ * arrival cycle count (see @c get_cycle_count_low()).
+ *
+ * See the <em>System Programmer's Guide</em> for details.
+ *
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's first word of "user data".
+ */
+static __inline unsigned int
+NETIO_PKT_USER_DATA_0_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return mda->__user_data_0;
+}
+
+
+/** Return the second word of "user data" for the packet.
+ *
+ * The contents of the user data words depend on the IPP.
+ *
+ * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the second
+ * word of user data contains the most significant bits of the 64-bit
+ * arrival cycle count (see @c get_cycle_count_high()).
+ *
+ * See the <em>System Programmer's Guide</em> for details.
+ *
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's second word of "user data".
+ */
+static __inline unsigned int
+NETIO_PKT_USER_DATA_1_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return mda->__user_data_1;
+}
+
+
+/** Determine whether the L4 (TCP/UDP) checksum was calculated.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the L4 checksum was calculated.
+ */
+static __inline unsigned int
+NETIO_PKT_L4_CSUM_CALCULATED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return !(mda->__flags & _NETIO_PKT_NO_L4_CSUM_MASK);
+}
+
+
+/** Determine whether the L4 (TCP/UDP) checksum was calculated and found to
+ * be correct.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the checksum was calculated and is correct.
+ */
+static __inline unsigned int
+NETIO_PKT_L4_CSUM_CORRECT_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return !(mda->__flags &
+ (_NETIO_PKT_BAD_L4_CSUM_MASK | _NETIO_PKT_NO_L4_CSUM_MASK));
+}
+
+
+/** Determine whether the L3 (IP) checksum was calculated.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the L3 (IP) checksum was calculated.
+*/
+static __inline unsigned int
+NETIO_PKT_L3_CSUM_CALCULATED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return !(mda->__flags & _NETIO_PKT_NO_L3_CSUM_MASK);
+}
+
+
+/** Determine whether the L3 (IP) checksum was calculated and found to be
+ * correct.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the checksum was calculated and is correct.
+ */
+static __inline unsigned int
+NETIO_PKT_L3_CSUM_CORRECT_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return !(mda->__flags &
+ (_NETIO_PKT_BAD_L3_CSUM_MASK | _NETIO_PKT_NO_L3_CSUM_MASK));
+}
+
+
+/** Determine whether the ethertype was recognized and L3 packet data was
+ * processed.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the ethertype was recognized and L3 packet data was
+ * processed.
+ */
+static __inline unsigned int
+NETIO_PKT_ETHERTYPE_RECOGNIZED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return !(mda->__flags & _NETIO_PKT_TYPE_UNRECOGNIZED_MASK);
+}
+
+
+/** Retrieve the status of a packet and any errors that may have occurred
+ * during ingress processing (length mismatches, CRC errors, etc.).
+ * @ingroup ingress
+ *
+ * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
+ * returns zero are always reported as underlength, as there is no a priori
+ * means to determine their length. Normally, applications should use
+ * @ref NETIO_PKT_BAD_M() instead of explicitly checking status with this
+ * function.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's status.
+ */
+static __inline netio_pkt_status_t
+NETIO_PKT_STATUS_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (netio_pkt_status_t) __NETIO_PKT_NOTIF_HEADER(pkt).bits.__status;
+}
+
+
+/** Report whether a packet is bad (i.e., was shorter than expected based on
+ * its headers, or had a bad CRC).
+ * @ingroup ingress
+ *
+ * Note that this function does not verify L3 or L4 checksums.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the packet is bad and should be discarded.
+ */
+static __inline unsigned int
+NETIO_PKT_BAD_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return ((NETIO_PKT_STATUS_M(mda, pkt) & 1) &&
+ (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(mda, pkt) ||
+ NETIO_PKT_STATUS_M(mda, pkt) == NETIO_PKT_STATUS_BAD));
+}
+
+
+/** Return the length of the packet, starting with the L2 (Ethernet) header.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L2_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
+{
+ return mmd->l2_length;
+}
+
+
+/** Return the length of the L2 (Ethernet) header.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet's L2 header, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L2_HEADER_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd,
+ netio_pkt_t* pkt)
+{
+ return mmd->l3_offset - mmd->l2_offset;
+}
+
+
+/** Return the length of the packet, starting with the L3 (IP) header.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Length of the packet's L3 header and data, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L3_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
+{
+ return (NETIO_PKT_L2_LENGTH_MM(mmd, pkt) -
+ NETIO_PKT_L2_HEADER_LENGTH_MM(mmd, pkt));
+}
+
+
+/** Return a pointer to the packet's L3 (generally, the IP) header.
+ * @ingroup egress
+ *
+ * Note that we guarantee word alignment of the L3 header.
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to the packet's L3 header.
+ */
+static __inline unsigned char*
+NETIO_PKT_L3_DATA_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
+{
+ return _NETIO_PKT_BASE(pkt) + mmd->l3_offset;
+}
+
+
+/** Return a pointer to the packet's L2 (Ethernet) header.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to start of the packet.
+ */
+static __inline unsigned char*
+NETIO_PKT_L2_DATA_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
+{
+ return _NETIO_PKT_BASE(pkt) + mmd->l2_offset;
+}
+
+
+/** Retrieve the status of a packet and any errors that may have occurred
+ * during ingress processing (length mismatches, CRC errors, etc.).
+ * @ingroup ingress
+ *
+ * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
+ * returns zero are always reported as underlength, as there is no a priori
+ * means to determine their length. Normally, applications should use
+ * @ref NETIO_PKT_BAD() instead of explicitly checking status with this
+ * function.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's status.
+ */
+static __inline netio_pkt_status_t
+NETIO_PKT_STATUS(netio_pkt_t* pkt)
+{
+ netio_assert(!pkt->__packet.bits.__minimal);
+
+ return (netio_pkt_status_t) __NETIO_PKT_NOTIF_HEADER(pkt).bits.__status;
+}
+
+
+/** Report whether a packet is bad (i.e., was shorter than expected based on
+ * its headers, or had a bad CRC).
+ * @ingroup ingress
+ *
+ * Note that this function does not verify L3 or L4 checksums.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the packet is bad and should be discarded.
+ */
+static __inline unsigned int
+NETIO_PKT_BAD(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_BAD_M(mda, pkt);
+}
+
+
+/** Return the length of the packet's custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet's custom header, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_CUSTOM_HEADER_LENGTH(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt);
+}
+
+
+/** Return the length of the packet, starting with the custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_CUSTOM_LENGTH(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_CUSTOM_LENGTH_M(mda, pkt);
+}
+
+
+/** Return a pointer to the packet's custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to start of the packet.
+ */
+static __inline unsigned char*
+NETIO_PKT_CUSTOM_DATA(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_CUSTOM_DATA_M(mda, pkt);
+}
+
+
+/** Return the length of the packet's L2 (Ethernet plus VLAN or SNAP) header.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet's L2 header, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L2_HEADER_LENGTH(netio_pkt_t* pkt)
+{
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ return NETIO_PKT_L2_HEADER_LENGTH_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L2_HEADER_LENGTH_M(mda, pkt);
+ }
+}
+
+
+/** Return the length of the packet, starting with the L2 (Ethernet) header.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L2_LENGTH(netio_pkt_t* pkt)
+{
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ return NETIO_PKT_L2_LENGTH_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L2_LENGTH_M(mda, pkt);
+ }
+}
+
+
+/** Return a pointer to the packet's L2 (Ethernet) header.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to start of the packet.
+ */
+static __inline unsigned char*
+NETIO_PKT_L2_DATA(netio_pkt_t* pkt)
+{
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ return NETIO_PKT_L2_DATA_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L2_DATA_M(mda, pkt);
+ }
+}
+
+
+/** Retrieve the length of the packet, starting with the L3 (generally, the IP)
+ * header.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Length of the packet's L3 header and data, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L3_LENGTH(netio_pkt_t* pkt)
+{
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ return NETIO_PKT_L3_LENGTH_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L3_LENGTH_M(mda, pkt);
+ }
+}
+
+
+/** Return a pointer to the packet's L3 (generally, the IP) header.
+ * @ingroup pktfuncs
+ *
+ * Note that we guarantee word alignment of the L3 header.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to the packet's L3 header.
+ */
+static __inline unsigned char*
+NETIO_PKT_L3_DATA(netio_pkt_t* pkt)
+{
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ return NETIO_PKT_L3_DATA_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L3_DATA_M(mda, pkt);
+ }
+}
+
+
+/** Return the ordinal of the packet.
+ * @ingroup ingress
+ *
+ * Each packet is given an ordinal number when it is delivered by the IPP.
+ * In the medium term, the ordinal is unique and monotonically increasing,
+ * being incremented by 1 for each packet; the ordinal of the first packet
+ * delivered after the IPP starts is zero. (Since the ordinal is of finite
+ * size, given enough input packets, it will eventually wrap around to zero;
+ * in the long term, therefore, ordinals are not unique.) The ordinals
+ * handed out by different IPPs are not disjoint, so two packets from
+ * different IPPs may have identical ordinals. Packets dropped by the
+ * IPP or by the I/O shim are not assigned ordinals.
+ *
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's per-IPP packet ordinal.
+ */
+static __inline unsigned int
+NETIO_PKT_ORDINAL(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_ORDINAL_M(mda, pkt);
+}
+
+
+/** Return the per-group ordinal of the packet.
+ * @ingroup ingress
+ *
+ * Each packet is given a per-group ordinal number when it is
+ * delivered by the IPP. By default, the group is the packet's VLAN,
+ * although IPP can be recompiled to use different values. In
+ * the medium term, the ordinal is unique and monotonically
+ * increasing, being incremented by 1 for each packet; the ordinal of
+ * the first packet distributed to a particular group is zero.
+ * (Since the ordinal is of finite size, given enough input packets,
+ * it will eventually wrap around to zero; in the long term,
+ * therefore, ordinals are not unique.) The ordinals handed out by
+ * different IPPs are not disjoint, so two packets from different IPPs
+ * may have identical ordinals; similarly, packets distributed to
+ * different groups may have identical ordinals. Packets dropped by
+ * the IPP or by the I/O shim are not assigned ordinals.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's per-IPP, per-group ordinal.
+ */
+static __inline unsigned int
+NETIO_PKT_GROUP_ORDINAL(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_GROUP_ORDINAL_M(mda, pkt);
+}
+
+
+/** Return the VLAN ID assigned to the packet.
+ * @ingroup ingress
+ *
+ * This is usually also contained within the packet header. If the packet
+ * does not have a VLAN tag, the VLAN ID returned by this function is zero.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's VLAN ID.
+ */
+static __inline unsigned short
+NETIO_PKT_VLAN_ID(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_VLAN_ID_M(mda, pkt);
+}
+
+
+/** Return the ethertype of the packet.
+ * @ingroup ingress
+ *
+ * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
+ * returns true, and otherwise, may not be well defined.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's ethertype.
+ */
+static __inline unsigned short
+NETIO_PKT_ETHERTYPE(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_ETHERTYPE_M(mda, pkt);
+}
+
+
+/** Return the flow hash computed on the packet.
+ * @ingroup ingress
+ *
+ * For TCP and UDP packets, this hash is calculated by hashing together
+ * the "5-tuple" values, specifically the source IP address, destination
+ * IP address, protocol type, source port and destination port.
+ * The hash value is intended to be helpful for millions of distinct
+ * flows.
+ *
+ * For IPv4 or IPv6 packets which are neither TCP nor UDP, the flow hash is
+ * derived by hashing together the source and destination IP addresses.
+ *
+ * For MPLS-encapsulated packets, the flow hash is derived by hashing
+ * the first MPLS label.
+ *
+ * For all other packets the flow hash is computed from the source
+ * and destination Ethernet addresses.
+ *
+ * The hash is symmetric, meaning it produces the same value if the
+ * source and destination are swapped. The only exceptions are
+ * tunneling protocols 0x04 (IP in IP Encapsulation), 0x29 (Simple
+ * Internet Protocol), 0x2F (General Routing Encapsulation) and 0x32
+ * (Encap Security Payload), which use only the destination address
+ * since the source address is not meaningful.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's 32-bit flow hash.
+ */
+static __inline unsigned int
+NETIO_PKT_FLOW_HASH(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_FLOW_HASH_M(mda, pkt);
+}
+
+
+/** Return the first word of "user data" for the packet.
+ *
+ * The contents of the user data words depend on the IPP.
+ *
+ * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the first
+ * word of user data contains the least significant bits of the 64-bit
+ * arrival cycle count (see @c get_cycle_count_low()).
+ *
+ * See the <em>System Programmer's Guide</em> for details.
+ *
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's first word of "user data".
+ */
+static __inline unsigned int
+NETIO_PKT_USER_DATA_0(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_USER_DATA_0_M(mda, pkt);
+}
+
+
+/** Return the second word of "user data" for the packet.
+ *
+ * The contents of the user data words depend on the IPP.
+ *
+ * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the second
+ * word of user data contains the most significant bits of the 64-bit
+ * arrival cycle count (see @c get_cycle_count_high()).
+ *
+ * See the <em>System Programmer's Guide</em> for details.
+ *
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's second word of "user data".
+ */
+static __inline unsigned int
+NETIO_PKT_USER_DATA_1(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_USER_DATA_1_M(mda, pkt);
+}
+
+
+/** Determine whether the L4 (TCP/UDP) checksum was calculated.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the L4 checksum was calculated.
+ */
+static __inline unsigned int
+NETIO_PKT_L4_CSUM_CALCULATED(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L4_CSUM_CALCULATED_M(mda, pkt);
+}
+
+
+/** Determine whether the L4 (TCP/UDP) checksum was calculated and found to
+ * be correct.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the checksum was calculated and is correct.
+ */
+static __inline unsigned int
+NETIO_PKT_L4_CSUM_CORRECT(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L4_CSUM_CORRECT_M(mda, pkt);
+}
+
+
+/** Determine whether the L3 (IP) checksum was calculated.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the L3 (IP) checksum was calculated.
+*/
+static __inline unsigned int
+NETIO_PKT_L3_CSUM_CALCULATED(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L3_CSUM_CALCULATED_M(mda, pkt);
+}
+
+
+/** Determine whether the L3 (IP) checksum was calculated and found to be
+ * correct.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the checksum was calculated and is correct.
+ */
+static __inline unsigned int
+NETIO_PKT_L3_CSUM_CORRECT(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L3_CSUM_CORRECT_M(mda, pkt);
+}
+
+
+/** Determine whether the Ethertype was recognized and L3 packet data was
+ * processed.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the Ethertype was recognized and L3 packet data was
+ * processed.
+ */
+static __inline unsigned int
+NETIO_PKT_ETHERTYPE_RECOGNIZED(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_ETHERTYPE_RECOGNIZED_M(mda, pkt);
+}
+
+
+/** Set an egress packet's L2 length, using a metadata pointer to speed the
+ * computation.
+ * @ingroup egress
+ *
+ * @param[in,out] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @param[in] len Packet L2 length, in bytes.
+ */
+static __inline void
+NETIO_PKT_SET_L2_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt,
+ int len)
+{
+ mmd->l2_length = len;
+}
+
+
+/** Set an egress packet's L2 length.
+ * @ingroup egress
+ *
+ * @param[in,out] pkt Packet on which to operate.
+ * @param[in] len Packet L2 length, in bytes.
+ */
+static __inline void
+NETIO_PKT_SET_L2_LENGTH(netio_pkt_t* pkt, int len)
+{
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ NETIO_PKT_SET_L2_LENGTH_MM(mmd, pkt, len);
+}
+
+
+/** Set an egress packet's L2 header length, using a metadata pointer to
+ * speed the computation.
+ * @ingroup egress
+ *
+ * It is not normally necessary to call this routine; only the L2 length,
+ * not the header length, is needed to transmit a packet. It may be useful if
+ * the egress packet will later be processed by code which expects to use
+ * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload.
+ *
+ * @param[in,out] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @param[in] len Packet L2 header length, in bytes.
+ */
+static __inline void
+NETIO_PKT_SET_L2_HEADER_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd,
+ netio_pkt_t* pkt, int len)
+{
+ mmd->l3_offset = mmd->l2_offset + len;
+}
+
+
+/** Set an egress packet's L2 header length.
+ * @ingroup egress
+ *
+ * It is not normally necessary to call this routine; only the L2 length,
+ * not the header length, is needed to transmit a packet. It may be useful if
+ * the egress packet will later be processed by code which expects to use
+ * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload.
+ *
+ * @param[in,out] pkt Packet on which to operate.
+ * @param[in] len Packet L2 header length, in bytes.
+ */
+static __inline void
+NETIO_PKT_SET_L2_HEADER_LENGTH(netio_pkt_t* pkt, int len)
+{
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ NETIO_PKT_SET_L2_HEADER_LENGTH_MM(mmd, pkt, len);
+}
+
+
+/** Set up an egress packet for hardware checksum computation, using a
+ * metadata pointer to speed the operation.
+ * @ingroup egress
+ *
+ * NetIO provides the ability to automatically calculate a standard
+ * 16-bit Internet checksum on transmitted packets. The application
+ * may specify the point in the packet where the checksum starts, the
+ * number of bytes to be checksummed, and the two bytes in the packet
+ * which will be replaced with the completed checksum. (If the range
+ * of bytes to be checksummed includes the bytes to be replaced, the
+ * initial values of those bytes will be included in the checksum.)
+ *
+ * For some protocols, the packet checksum covers data which is not present
+ * in the packet, or is at least not contiguous to the main data payload.
+ * For instance, the TCP checksum includes a "pseudo-header" which includes
+ * the source and destination IP addresses of the packet. To accommodate
+ * this, the checksum engine may be "seeded" with an initial value, which
+ * the application would need to compute based on the specific protocol's
+ * requirements. Note that the seed is given in host byte order (little-
+ * endian), not network byte order (big-endian); code written to compute a
+ * pseudo-header checksum in network byte order will need to byte-swap it
+ * before use as the seed.
+ *
+ * Note that the checksum is computed as part of the transmission process,
+ * so it will not be present in the packet upon completion of this routine.
+ *
+ * @param[in,out] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @param[in] start Offset within L2 packet of the first byte to include in
+ * the checksum.
+ * @param[in] length Number of bytes to include in the checksum.
+ * the checksum.
+ * @param[in] location Offset within L2 packet of the first of the two bytes
+ * to be replaced with the calculated checksum.
+ * @param[in] seed Initial value of the running checksum before any of the
+ * packet data is added.
+ */
+static __inline void
+NETIO_PKT_DO_EGRESS_CSUM_MM(netio_pkt_minimal_metadata_t* mmd,
+ netio_pkt_t* pkt, int start, int length,
+ int location, uint16_t seed)
+{
+ mmd->csum_start = start;
+ mmd->csum_length = length;
+ mmd->csum_location = location;
+ mmd->csum_seed = seed;
+ mmd->flags |= _NETIO_PKT_NEED_EDMA_CSUM_MASK;
+}
+
+
+/** Set up an egress packet for hardware checksum computation.
+ * @ingroup egress
+ *
+ * NetIO provides the ability to automatically calculate a standard
+ * 16-bit Internet checksum on transmitted packets. The application
+ * may specify the point in the packet where the checksum starts, the
+ * number of bytes to be checksummed, and the two bytes in the packet
+ * which will be replaced with the completed checksum. (If the range
+ * of bytes to be checksummed includes the bytes to be replaced, the
+ * initial values of those bytes will be included in the checksum.)
+ *
+ * For some protocols, the packet checksum covers data which is not present
+ * in the packet, or is at least not contiguous to the main data payload.
+ * For instance, the TCP checksum includes a "pseudo-header" which includes
+ * the source and destination IP addresses of the packet. To accommodate
+ * this, the checksum engine may be "seeded" with an initial value, which
+ * the application would need to compute based on the specific protocol's
+ * requirements. Note that the seed is given in host byte order (little-
+ * endian), not network byte order (big-endian); code written to compute a
+ * pseudo-header checksum in network byte order will need to byte-swap it
+ * before use as the seed.
+ *
+ * Note that the checksum is computed as part of the transmission process,
+ * so it will not be present in the packet upon completion of this routine.
+ *
+ * @param[in,out] pkt Packet on which to operate.
+ * @param[in] start Offset within L2 packet of the first byte to include in
+ * the checksum.
+ * @param[in] length Number of bytes to include in the checksum.
+ * the checksum.
+ * @param[in] location Offset within L2 packet of the first of the two bytes
+ * to be replaced with the calculated checksum.
+ * @param[in] seed Initial value of the running checksum before any of the
+ * packet data is added.
+ */
+static __inline void
+NETIO_PKT_DO_EGRESS_CSUM(netio_pkt_t* pkt, int start, int length,
+ int location, uint16_t seed)
+{
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ NETIO_PKT_DO_EGRESS_CSUM_MM(mmd, pkt, start, length, location, seed);
+}
+
+
+/** Return the number of bytes which could be prepended to a packet, using a
+ * metadata pointer to speed the operation.
+ * See @ref netio_populate_prepend_buffer() to get a full description of
+ * prepending.
+ *
+ * @param[in,out] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline int
+NETIO_PKT_PREPEND_AVAIL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (pkt->__packet.bits.__offset << 6) +
+ NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt);
+}
+
+
+/** Return the number of bytes which could be prepended to a packet, using a
+ * metadata pointer to speed the operation.
+ * See @ref netio_populate_prepend_buffer() to get a full description of
+ * prepending.
+ * @ingroup egress
+ *
+ * @param[in,out] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline int
+NETIO_PKT_PREPEND_AVAIL_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
+{
+ return (pkt->__packet.bits.__offset << 6) + mmd->l2_offset;
+}
+
+
+/** Return the number of bytes which could be prepended to a packet.
+ * See @ref netio_populate_prepend_buffer() to get a full description of
+ * prepending.
+ * @ingroup egress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline int
+NETIO_PKT_PREPEND_AVAIL(netio_pkt_t* pkt)
+{
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ return NETIO_PKT_PREPEND_AVAIL_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_PREPEND_AVAIL_M(mda, pkt);
+ }
+}
+
+
+/** Flush a packet's minimal metadata from the cache, using a metadata pointer
+ * to speed the operation.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd,
+ netio_pkt_t* pkt)
+{
+}
+
+
+/** Invalidate a packet's minimal metadata from the cache, using a metadata
+ * pointer to speed the operation.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_INV_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd,
+ netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush and then invalidate a packet's minimal metadata from the cache,
+ * using a metadata pointer to speed the operation.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_INV_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd,
+ netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush a packet's metadata from the cache, using a metadata pointer
+ * to speed the operation.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+}
+
+
+/** Invalidate a packet's metadata from the cache, using a metadata
+ * pointer to speed the operation.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_INV_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush and then invalidate a packet's metadata from the cache,
+ * using a metadata pointer to speed the operation.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_INV_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush a packet's minimal metadata from the cache.
+ * @ingroup egress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_MINIMAL_METADATA(netio_pkt_t* pkt)
+{
+}
+
+
+/** Invalidate a packet's minimal metadata from the cache.
+ * @ingroup egress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_INV_MINIMAL_METADATA(netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush and then invalidate a packet's minimal metadata from the cache.
+ * @ingroup egress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_INV_MINIMAL_METADATA(netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush a packet's metadata from the cache.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_METADATA(netio_pkt_t* pkt)
+{
+}
+
+
+/** Invalidate a packet's metadata from the cache.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_INV_METADATA(netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush and then invalidate a packet's metadata from the cache.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_INV_METADATA(netio_pkt_t* pkt)
+{
+}
+
+/** Number of NUMA nodes we can distribute buffers to.
+ * @ingroup setup */
+#define NETIO_NUM_NODE_WEIGHTS 16
+
+/**
+ * @brief An object for specifying the characteristics of NetIO communication
+ * endpoint.
+ *
+ * @ingroup setup
+ *
+ * The @ref netio_input_register() function uses this structure to define
+ * how an application tile will communicate with an IPP.
+ *
+ *
+ * Future updates to NetIO may add new members to this structure,
+ * which can affect the success of the registration operation. Thus,
+ * if dynamically initializing the structure, applications are urged to
+ * zero it out first, for example:
+ *
+ * @code
+ * netio_input_config_t config;
+ * memset(&config, 0, sizeof (config));
+ * config.flags = NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE;
+ * config.num_receive_packets = NETIO_MAX_RECEIVE_PKTS;
+ * config.queue_id = 0;
+ * .
+ * .
+ * .
+ * @endcode
+ *
+ * since that guarantees that any unused structure members, including
+ * members which did not exist when the application was first developed,
+ * will not have unexpected values.
+ *
+ * If statically initializing the structure, we strongly recommend use of
+ * C99-style named initializers, for example:
+ *
+ * @code
+ * netio_input_config_t config = {
+ * .flags = NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE,
+ * .num_receive_packets = NETIO_MAX_RECEIVE_PKTS,
+ * .queue_id = 0,
+ * },
+ * @endcode
+ *
+ * instead of the old-style structure initialization:
+ *
+ * @code
+ * // Bad example! Currently equivalent to the above, but don't do this.
+ * netio_input_config_t config = {
+ * NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE, NETIO_MAX_RECEIVE_PKTS, 0
+ * },
+ * @endcode
+ *
+ * since the C99 style requires no changes to the code if elements of the
+ * config structure are rearranged. (It also makes the initialization much
+ * easier to understand.)
+ *
+ * Except for items which address a particular tile's transmit or receive
+ * characteristics, such as the ::NETIO_RECV flag, applications are advised
+ * to specify the same set of configuration data on all registrations.
+ * This prevents differing results if multiple tiles happen to do their
+ * registration operations in a different order on different invocations of
+ * the application. This is particularly important for things like link
+ * management flags, and buffer size and homing specifications.
+ *
+ * Unless the ::NETIO_FIXED_BUFFER_VA flag is specified in flags, the NetIO
+ * buffer pool is automatically created and mapped into the application's
+ * virtual address space at an address chosen by the operating system,
+ * using the common memory (cmem) facility in the Tilera Multicore
+ * Components library. The cmem facility allows multiple processes to gain
+ * access to shared memory which is mapped into each process at an
+ * identical virtual address. In order for this to work, the processes
+ * must have a common ancestor, which must create the common memory using
+ * tmc_cmem_init().
+ *
+ * In programs using the iLib process creation API, or in programs which use
+ * only one process (which include programs using the pthreads library),
+ * tmc_cmem_init() is called automatically. All other applications
+ * must call it explicitly, before any child processes which might call
+ * netio_input_register() are created.
+ */
+typedef struct
+{
+ /** Registration characteristics.
+
+ This value determines several characteristics of the registration;
+ flags for different types of behavior are ORed together to make the
+ final flag value. Generally applications should specify exactly
+ one flag from each of the following categories:
+
+ - Whether the application will be receiving packets on this queue
+ (::NETIO_RECV or ::NETIO_NO_RECV).
+
+ - Whether the application will be transmitting packets on this queue,
+ and if so, whether it will request egress checksum calculation
+ (::NETIO_XMIT, ::NETIO_XMIT_CSUM, or ::NETIO_NO_XMIT). It is
+ legal to call netio_get_buffer() without one of the XMIT flags,
+ as long as ::NETIO_RECV is specified; in this case, the retrieved
+ buffers must be passed to another tile for transmission.
+
+ - Whether the application expects any vendor-specific tags in
+ its packets' L2 headers (::NETIO_TAG_NONE, ::NETIO_TAG_BRCM,
+ or ::NETIO_TAG_MRVL). This must match the configuration of the
+ target IPP.
+
+ To accommodate applications written to previous versions of the NetIO
+ interface, none of the flags above are currently required; if omitted,
+ NetIO behaves more or less as if ::NETIO_RECV | ::NETIO_XMIT_CSUM |
+ ::NETIO_TAG_NONE were used. However, explicit specification of
+ the relevant flags allows NetIO to do a better job of resource
+ allocation, allows earlier detection of certain configuration errors,
+ and may enable advanced features or higher performance in the future,
+ so their use is strongly recommended.
+
+ Note that specifying ::NETIO_NO_RECV along with ::NETIO_NO_XMIT
+ is a special case, intended primarily for use by programs which
+ retrieve network statistics or do link management operations.
+ When these flags are both specified, the resulting queue may not
+ be used with NetIO routines other than netio_get(), netio_set(),
+ and netio_input_unregister(). See @ref link for more information
+ on link management.
+
+ Other flags are optional; their use is described below.
+ */
+ int flags;
+
+ /** Interface name. This is a string which identifies the specific
+ Ethernet controller hardware to be used. The format of the string
+ is a device type and a device index, separated by a slash; so,
+ the first 10 Gigabit Ethernet controller is named "xgbe/0", while
+ the second 10/100/1000 Megabit Ethernet controller is named "gbe/1".
+ */
+ const char* interface;
+
+ /** Receive packet queue size. This specifies the maximum number
+ of ingress packets that can be received on this queue without
+ being retrieved by @ref netio_get_packet(). If the IPP's distribution
+ algorithm calls for a packet to be sent to this queue, and this
+ number of packets are already pending there, the new packet
+ will either be discarded, or sent to another tile registered
+ for the same queue_id (see @ref drops). This value must
+ be at least ::NETIO_MIN_RECEIVE_PKTS, can always be at least
+ ::NETIO_MAX_RECEIVE_PKTS, and may be larger than that on certain
+ interfaces.
+ */
+ int num_receive_packets;
+
+ /** The queue ID being requested. Legal values for this range from 0
+ to ::NETIO_MAX_QUEUE_ID, inclusive. ::NETIO_MAX_QUEUE_ID is always
+ greater than or equal to the number of tiles; this allows one queue
+ for each tile, plus at least one additional queue. Some applications
+ may wish to use the additional queue as a destination for unwanted
+ packets, since packets delivered to queues for which no tiles have
+ registered are discarded.
+ */
+ unsigned int queue_id;
+
+ /** Maximum number of small send buffers to be held in the local empty
+ buffer cache. This specifies the size of the area which holds
+ empty small egress buffers requested from the IPP but not yet
+ retrieved via @ref netio_get_buffer(). This value must be greater
+ than zero if the application will ever use @ref netio_get_buffer()
+ to allocate empty small egress buffers; it may be no larger than
+ ::NETIO_MAX_SEND_BUFFERS. See @ref epp for more details on empty
+ buffer caching.
+ */
+ int num_send_buffers_small_total;
+
+ /** Number of small send buffers to be preallocated at registration.
+ If this value is nonzero, the specified number of empty small egress
+ buffers will be requested from the IPP during the netio_input_register
+ operation; this may speed the execution of @ref netio_get_buffer().
+ This may be no larger than @ref num_send_buffers_small_total. See @ref
+ epp for more details on empty buffer caching.
+ */
+ int num_send_buffers_small_prealloc;
+
+ /** Maximum number of large send buffers to be held in the local empty
+ buffer cache. This specifies the size of the area which holds empty
+ large egress buffers requested from the IPP but not yet retrieved via
+ @ref netio_get_buffer(). This value must be greater than zero if the
+ application will ever use @ref netio_get_buffer() to allocate empty
+ large egress buffers; it may be no larger than ::NETIO_MAX_SEND_BUFFERS.
+ See @ref epp for more details on empty buffer caching.
+ */
+ int num_send_buffers_large_total;
+
+ /** Number of large send buffers to be preallocated at registration.
+ If this value is nonzero, the specified number of empty large egress
+ buffers will be requested from the IPP during the netio_input_register
+ operation; this may speed the execution of @ref netio_get_buffer().
+ This may be no larger than @ref num_send_buffers_large_total. See @ref
+ epp for more details on empty buffer caching.
+ */
+ int num_send_buffers_large_prealloc;
+
+ /** Maximum number of jumbo send buffers to be held in the local empty
+ buffer cache. This specifies the size of the area which holds empty
+ jumbo egress buffers requested from the IPP but not yet retrieved via
+ @ref netio_get_buffer(). This value must be greater than zero if the
+ application will ever use @ref netio_get_buffer() to allocate empty
+ jumbo egress buffers; it may be no larger than ::NETIO_MAX_SEND_BUFFERS.
+ See @ref epp for more details on empty buffer caching.
+ */
+ int num_send_buffers_jumbo_total;
+
+ /** Number of jumbo send buffers to be preallocated at registration.
+ If this value is nonzero, the specified number of empty jumbo egress
+ buffers will be requested from the IPP during the netio_input_register
+ operation; this may speed the execution of @ref netio_get_buffer().
+ This may be no larger than @ref num_send_buffers_jumbo_total. See @ref
+ epp for more details on empty buffer caching.
+ */
+ int num_send_buffers_jumbo_prealloc;
+
+ /** Total packet buffer size. This determines the total size, in bytes,
+ of the NetIO buffer pool. Note that the maximum number of available
+ buffers of each size is determined during hypervisor configuration
+ (see the <em>System Programmer's Guide</em> for details); this just
+ influences how much host memory is allocated for those buffers.
+
+ The buffer pool is allocated from common memory, which will be
+ automatically initialized if needed. If your buffer pool is larger
+ than 240 MB, you might need to explicitly call @c tmc_cmem_init(),
+ as described in the Application Libraries Reference Manual (UG227).
+
+ Packet buffers are currently allocated in chunks of 16 MB; this
+ value will be rounded up to the next larger multiple of 16 MB.
+ If this value is zero, a default of 32 MB will be used; this was
+ the value used by previous versions of NetIO. Note that taking this
+ default also affects the placement of buffers on Linux NUMA nodes.
+ See @ref buffer_node_weights for an explanation of buffer placement.
+
+ In order to successfully allocate packet buffers, Linux must have
+ available huge pages on the relevant Linux NUMA nodes. See the
+ <em>System Programmer's Guide</em> for information on configuring
+ huge page support in Linux.
+ */
+ uint64_t total_buffer_size;
+
+ /** Buffer placement weighting factors.
+
+ This array specifies the relative amount of buffering to place
+ on each of the available Linux NUMA nodes. This array is
+ indexed by the NUMA node, and the values in the array are
+ proportional to the amount of buffer space to allocate on that
+ node.
+
+ If memory striping is enabled in the Hypervisor, then there is
+ only one logical NUMA node (node 0). In that case, NetIO will by
+ default ignore the suggested buffer node weights, and buffers
+ will be striped across the physical memory controllers. See
+ UG209 System Programmer's Guide for a description of the
+ hypervisor option that controls memory striping.
+
+ If memory striping is disabled, then there are up to four NUMA
+ nodes, corresponding to the four DDRAM controllers in the TILE
+ processor architecture. See UG100 Tile Processor Architecture
+ Overview for a diagram showing the location of each of the DDRAM
+ controllers relative to the tile array.
+
+ For instance, if memory striping is disabled, the following
+ configuration strucure:
+
+ @code
+ netio_input_config_t config = {
+ .
+ .
+ .
+ .total_buffer_size = 4 * 16 * 1024 * 1024;
+ .buffer_node_weights = { 1, 0, 1, 0 },
+ },
+ @endcode
+
+ would result in 32 MB of buffers being placed on controller 0, and
+ 32 MB on controller 2. (Since buffers are allocated in units of
+ 16 MB, some sets of weights will not be able to be matched exactly.)
+
+ For the weights to be effective, @ref total_buffer_size must be
+ nonzero. If @ref total_buffer_size is zero, causing the default
+ 32 MB of buffer space to be used, then any specified weights will
+ be ignored, and buffers will positioned as they were in previous
+ versions of NetIO:
+
+ - For xgbe/0 and gbe/0, 16 MB of buffers will be placed on controller 1,
+ and the other 16 MB will be placed on controller 2.
+
+ - For xgbe/1 and gbe/1, 16 MB of buffers will be placed on controller 2,
+ and the other 16 MB will be placed on controller 3.
+
+ If @ref total_buffer_size is nonzero, but all weights are zero,
+ then all buffer space will be allocated on Linux NUMA node zero.
+
+ By default, the specified buffer placement is treated as a hint;
+ if sufficient free memory is not available on the specified
+ controllers, the buffers will be allocated elsewhere. However,
+ if the ::NETIO_STRICT_HOMING flag is specified in @ref flags, then a
+ failure to allocate buffer space exactly as requested will cause the
+ registration operation to fail with an error of ::NETIO_CANNOT_HOME.
+
+ Note that maximal network performance cannot be achieved with
+ only one memory controller.
+ */
+ uint8_t buffer_node_weights[NETIO_NUM_NODE_WEIGHTS];
+
+ /** Fixed virtual address for packet buffers. Only valid when
+ ::NETIO_FIXED_BUFFER_VA is specified in @ref flags; see the
+ description of that flag for details.
+ */
+ void* fixed_buffer_va;
+
+ /**
+ Maximum number of outstanding send packet requests. This value is
+ only relevant when an EPP is in use; it determines the number of
+ slots in the EPP's outgoing packet queue which this tile is allowed
+ to consume, and thus the number of packets which may be sent before
+ the sending tile must wait for an acknowledgment from the EPP.
+ Modifying this value is generally only helpful when using @ref
+ netio_send_packet_vector(), where it can help improve performance by
+ allowing a single vector send operation to process more packets.
+ Typically it is not specified, and the default, which divides the
+ outgoing packet slots evenly between all tiles on the chip, is used.
+
+ If a registration asks for more outgoing packet queue slots than are
+ available, ::NETIO_TOOMANY_XMIT will be returned. The total number
+ of packet queue slots which are available for all tiles for each EPP
+ is subject to change, but is currently ::NETIO_TOTAL_SENDS_OUTSTANDING.
+
+
+ This value is ignored if ::NETIO_XMIT is not specified in flags.
+ If you want to specify a large value here for a specific tile, you are
+ advised to specify NETIO_NO_XMIT on other, non-transmitting tiles so
+ that they do not consume a default number of packet slots. Any tile
+ transmitting is required to have at least ::NETIO_MIN_SENDS_OUTSTANDING
+ slots allocated to it; values less than that will be silently
+ increased by the NetIO library.
+ */
+ int num_sends_outstanding;
+}
+netio_input_config_t;
+
+
+/** Registration flags; used in the @ref netio_input_config_t structure.
+ * @addtogroup setup
+ */
+/** @{ */
+
+/** Fail a registration request if we can't put packet buffers
+ on the specified memory controllers. */
+#define NETIO_STRICT_HOMING 0x00000002
+
+/** This application expects no tags on its L2 headers. */
+#define NETIO_TAG_NONE 0x00000004
+
+/** This application expects Marvell extended tags on its L2 headers. */
+#define NETIO_TAG_MRVL 0x00000008
+
+/** This application expects Broadcom tags on its L2 headers. */
+#define NETIO_TAG_BRCM 0x00000010
+
+/** This registration may call routines which receive packets. */
+#define NETIO_RECV 0x00000020
+
+/** This registration may not call routines which receive packets. */
+#define NETIO_NO_RECV 0x00000040
+
+/** This registration may call routines which transmit packets. */
+#define NETIO_XMIT 0x00000080
+
+/** This registration may call routines which transmit packets with
+ checksum acceleration. */
+#define NETIO_XMIT_CSUM 0x00000100
+
+/** This registration may not call routines which transmit packets. */
+#define NETIO_NO_XMIT 0x00000200
+
+/** This registration wants NetIO buffers mapped at an application-specified
+ virtual address.
+
+ NetIO buffers are by default created by the TMC common memory facility,
+ which must be configured by a common ancestor of all processes sharing
+ a network interface. When this flag is specified, NetIO buffers are
+ instead mapped at an address chosen by the application (and specified
+ in @ref netio_input_config_t::fixed_buffer_va). This allows multiple
+ unrelated but cooperating processes to share a NetIO interface.
+ All processes sharing the same interface must specify this flag,
+ and all must specify the same fixed virtual address.
+
+ @ref netio_input_config_t::fixed_buffer_va must be a
+ multiple of 16 MB, and the packet buffers will occupy @ref
+ netio_input_config_t::total_buffer_size bytes of virtual address
+ space, beginning at that address. If any of those virtual addresses
+ are currently occupied by other memory objects, like application or
+ shared library code or data, @ref netio_input_register() will return
+ ::NETIO_FAULT. While it is impossible to provide a fixed_buffer_va
+ which will work for all applications, a good first guess might be to
+ use 0xb0000000 minus @ref netio_input_config_t::total_buffer_size.
+ If that fails, it might be helpful to consult the running application's
+ virtual address description file (/proc/<em>pid</em>/maps) to see
+ which regions of virtual address space are available.
+ */
+#define NETIO_FIXED_BUFFER_VA 0x00000400
+
+/** This registration call will not complete unless the network link
+ is up. The process will wait several seconds for this to happen (the
+ precise interval is link-dependent), but if the link does not come up,
+ ::NETIO_LINK_DOWN will be returned. This flag is the default if
+ ::NETIO_NOREQUIRE_LINK_UP is not specified. Note that this flag by
+ itself does not request that the link be brought up; that can be done
+ with the ::NETIO_AUTO_LINK_UPDN or ::NETIO_AUTO_LINK_UP flags (the
+ latter is the default if no NETIO_AUTO_LINK_xxx flags are specified),
+ or by explicitly setting the link's desired state via netio_set().
+ If the link is not brought up by one of those methods, and this flag
+ is specified, the registration operation will return ::NETIO_LINK_DOWN.
+ This flag is ignored if it is specified along with ::NETIO_NO_XMIT and
+ ::NETIO_NO_RECV. See @ref link for more information on link
+ management.
+ */
+#define NETIO_REQUIRE_LINK_UP 0x00000800
+
+/** This registration call will complete even if the network link is not up.
+ Whenever the link is not up, packets will not be sent or received:
+ netio_get_packet() will return ::NETIO_NOPKT once all queued packets
+ have been drained, and netio_send_packet() and similar routines will
+ return NETIO_QUEUE_FULL once the outgoing packet queue in the EPP
+ or the I/O shim is full. See @ref link for more information on link
+ management.
+ */
+#define NETIO_NOREQUIRE_LINK_UP 0x00001000
+
+#ifndef __DOXYGEN__
+/*
+ * These are part of the implementation of the NETIO_AUTO_LINK_xxx flags,
+ * but should not be used directly by applications, and are thus not
+ * documented.
+ */
+#define _NETIO_AUTO_UP 0x00002000
+#define _NETIO_AUTO_DN 0x00004000
+#define _NETIO_AUTO_PRESENT 0x00008000
+#endif
+
+/** Set the desired state of the link to up, allowing any speeds which are
+ supported by the link hardware, as part of this registration operation.
+ Do not take down the link automatically. This is the default if
+ no other NETIO_AUTO_LINK_xxx flags are specified. This flag is ignored
+ if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV.
+ See @ref link for more information on link management.
+ */
+#define NETIO_AUTO_LINK_UP (_NETIO_AUTO_PRESENT | _NETIO_AUTO_UP)
+
+/** Set the desired state of the link to up, allowing any speeds which are
+ supported by the link hardware, as part of this registration operation.
+ Set the desired state of the link to down the next time no tiles are
+ registered for packet reception or transmission. This flag is ignored
+ if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV.
+ See @ref link for more information on link management.
+ */
+#define NETIO_AUTO_LINK_UPDN (_NETIO_AUTO_PRESENT | _NETIO_AUTO_UP | \
+ _NETIO_AUTO_DN)
+
+/** Set the desired state of the link to down the next time no tiles are
+ registered for packet reception or transmission. This flag is ignored
+ if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV.
+ See @ref link for more information on link management.
+ */
+#define NETIO_AUTO_LINK_DN (_NETIO_AUTO_PRESENT | _NETIO_AUTO_DN)
+
+/** Do not bring up the link automatically as part of this registration
+ operation. Do not take down the link automatically. This flag
+ is ignored if it is specified along with ::NETIO_NO_XMIT and
+ ::NETIO_NO_RECV. See @ref link for more information on link management.
+ */
+#define NETIO_AUTO_LINK_NONE _NETIO_AUTO_PRESENT
+
+
+/** Minimum number of receive packets. */
+#define NETIO_MIN_RECEIVE_PKTS 16
+
+/** Lower bound on the maximum number of receive packets; may be higher
+ than this on some interfaces. */
+#define NETIO_MAX_RECEIVE_PKTS 128
+
+/** Maximum number of send buffers, per packet size. */
+#define NETIO_MAX_SEND_BUFFERS 16
+
+/** Number of EPP queue slots, and thus outstanding sends, per EPP. */
+#define NETIO_TOTAL_SENDS_OUTSTANDING 2015
+
+/** Minimum number of EPP queue slots, and thus outstanding sends, per
+ * transmitting tile. */
+#define NETIO_MIN_SENDS_OUTSTANDING 16
+
+
+/**@}*/
+
+#ifndef __DOXYGEN__
+
+/**
+ * An object for providing Ethernet packets to a process.
+ */
+struct __netio_queue_impl_t;
+
+/**
+ * An object for managing the user end of a NetIO queue.
+ */
+struct __netio_queue_user_impl_t;
+
+#endif /* !__DOXYGEN__ */
+
+
+/** A netio_queue_t describes a NetIO communications endpoint.
+ * @ingroup setup
+ */
+typedef struct
+{
+#ifdef __DOXYGEN__
+ uint8_t opaque[8]; /**< This is an opaque structure. */
+#else
+ struct __netio_queue_impl_t* __system_part; /**< The system part. */
+ struct __netio_queue_user_impl_t* __user_part; /**< The user part. */
+#ifdef _NETIO_PTHREAD
+ _netio_percpu_mutex_t lock; /**< Queue lock. */
+#endif
+#endif
+}
+netio_queue_t;
+
+
+/**
+ * @brief Packet send context.
+ *
+ * @ingroup egress
+ *
+ * Packet send context for use with netio_send_packet_prepare and _commit.
+ */
+typedef struct
+{
+#ifdef __DOXYGEN__
+ uint8_t opaque[44]; /**< This is an opaque structure. */
+#else
+ uint8_t flags; /**< Defined below */
+ uint8_t datalen; /**< Number of valid words pointed to by data. */
+ uint32_t request[9]; /**< Request to be sent to the EPP or shim. Note
+ that this is smaller than the 11-word maximum
+ request size, since some constant values are
+ not saved in the context. */
+ uint32_t *data; /**< Data to be sent to the EPP or shim via IDN. */
+#endif
+}
+netio_send_pkt_context_t;
+
+
+#ifndef __DOXYGEN__
+#define SEND_PKT_CTX_USE_EPP 1 /**< We're sending to an EPP. */
+#define SEND_PKT_CTX_SEND_CSUM 2 /**< Request includes a checksum. */
+#endif
+
+/**
+ * @brief Packet vector entry.
+ *
+ * @ingroup egress
+ *
+ * This data structure is used with netio_send_packet_vector() to send multiple
+ * packets with one NetIO call. The structure should be initialized by
+ * calling netio_pkt_vector_set(), rather than by setting the fields
+ * directly.
+ *
+ * This structure is guaranteed to be a power of two in size, no
+ * bigger than one L2 cache line, and to be aligned modulo its size.
+ */
+typedef struct
+#ifndef __DOXYGEN__
+__attribute__((aligned(8)))
+#endif
+{
+ /** Reserved for use by the user application. When initialized with
+ * the netio_set_pkt_vector_entry() function, this field is guaranteed
+ * to be visible to readers only after all other fields are already
+ * visible. This way it can be used as a valid flag or generation
+ * counter. */
+ uint8_t user_data;
+
+ /* Structure members below this point should not be accessed directly by
+ * applications, as they may change in the future. */
+
+ /** Low 8 bits of the packet address to send. The high bits are
+ * acquired from the 'handle' field. */
+ uint8_t buffer_address_low;
+
+ /** Number of bytes to transmit. */
+ uint16_t size;
+
+ /** The raw handle from a netio_pkt_t. If this is NETIO_PKT_HANDLE_NONE,
+ * this vector entry will be skipped and no packet will be transmitted. */
+ netio_pkt_handle_t handle;
+}
+netio_pkt_vector_entry_t;
+
+
+/**
+ * @brief Initialize fields in a packet vector entry.
+ *
+ * @ingroup egress
+ *
+ * @param[out] v Pointer to the vector entry to be initialized.
+ * @param[in] pkt Packet to be transmitted when the vector entry is passed to
+ * netio_send_packet_vector(). Note that the packet's attributes
+ * (e.g., its L2 offset and length) are captured at the time this
+ * routine is called; subsequent changes in those attributes will not
+ * be reflected in the packet which is actually transmitted.
+ * Changes in the packet's contents, however, will be so reflected.
+ * If this is NULL, no packet will be transmitted.
+ * @param[in] user_data User data to be set in the vector entry.
+ * This function guarantees that the "user_data" field will become
+ * visible to a reader only after all other fields have become visible.
+ * This allows a structure in a ring buffer to be written and read
+ * by a polling reader without any locks or other synchronization.
+ */
+static __inline void
+netio_pkt_vector_set(volatile netio_pkt_vector_entry_t* v, netio_pkt_t* pkt,
+ uint8_t user_data)
+{
+ if (pkt)
+ {
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd =
+ (netio_pkt_minimal_metadata_t*) &pkt->__metadata;
+ v->buffer_address_low = (uintptr_t) NETIO_PKT_L2_DATA_MM(mmd, pkt) & 0xFF;
+ v->size = NETIO_PKT_L2_LENGTH_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = &pkt->__metadata;
+ v->buffer_address_low = (uintptr_t) NETIO_PKT_L2_DATA_M(mda, pkt) & 0xFF;
+ v->size = NETIO_PKT_L2_LENGTH_M(mda, pkt);
+ }
+ v->handle.word = pkt->__packet.word;
+ }
+ else
+ {
+ v->handle.word = 0; /* Set handle to NETIO_PKT_HANDLE_NONE. */
+ }
+
+ __asm__("" : : : "memory");
+
+ v->user_data = user_data;
+}
+
+
+/**
+ * Flags and structures for @ref netio_get() and @ref netio_set().
+ * @ingroup config
+ */
+
+/** @{ */
+/** Parameter class; addr is a NETIO_PARAM_xxx value. */
+#define NETIO_PARAM 0
+/** Interface MAC address. This address is only valid with @ref netio_get().
+ * The value is a 6-byte MAC address. Depending upon the overall system
+ * design, a MAC address may or may not be available for each interface. */
+#define NETIO_PARAM_MAC 0
+
+/** Determine whether to suspend output on the receipt of pause frames.
+ * If the value is nonzero, the I/O shim will suspend output when a pause
+ * frame is received. If the value is zero, pause frames will be ignored. */
+#define NETIO_PARAM_PAUSE_IN 1
+
+/** Determine whether to send pause frames if the I/O shim packet FIFOs are
+ * nearly full. If the value is zero, pause frames are not sent. If
+ * the value is nonzero, it is the delay value which will be sent in any
+ * pause frames which are output, in units of 512 bit times. */
+#define NETIO_PARAM_PAUSE_OUT 2
+
+/** Jumbo frame support. The value is a 4-byte integer. If the value is
+ * nonzero, the MAC will accept frames of up to 10240 bytes. If the value
+ * is zero, the MAC will only accept frames of up to 1544 bytes. */
+#define NETIO_PARAM_JUMBO 3
+
+/** I/O shim's overflow statistics register. The value is two 16-bit integers.
+ * The first 16-bit value (or the low 16 bits, if the value is treated as a
+ * 32-bit number) is the count of packets which were completely dropped and
+ * not delivered by the shim. The second 16-bit value (or the high 16 bits,
+ * if the value is treated as a 32-bit number) is the count of packets
+ * which were truncated and thus only partially delivered by the shim. This
+ * register is automatically reset to zero after it has been read.
+ */
+#define NETIO_PARAM_OVERFLOW 4
+
+/** IPP statistics. This address is only valid with @ref netio_get(). The
+ * value is a netio_stat_t structure. Unlike the I/O shim statistics, the
+ * IPP statistics are not all reset to zero on read; see the description
+ * of the netio_stat_t for details. */
+#define NETIO_PARAM_STAT 5
+
+/** Possible link state. The value is a combination of "NETIO_LINK_xxx"
+ * flags. With @ref netio_get(), this will indicate which flags are
+ * actually supported by the hardware.
+ *
+ * For historical reasons, specifying this value to netio_set() will have
+ * the same behavior as using ::NETIO_PARAM_LINK_CONFIG, but this usage is
+ * discouraged.
+ */
+#define NETIO_PARAM_LINK_POSSIBLE_STATE 6
+
+/** Link configuration. The value is a combination of "NETIO_LINK_xxx" flags.
+ * With @ref netio_set(), this will attempt to immediately bring up the
+ * link using whichever of the requested flags are supported by the
+ * hardware, or take down the link if the flags are zero; if this is
+ * not possible, an error will be returned. Many programs will want
+ * to use ::NETIO_PARAM_LINK_DESIRED_STATE instead.
+ *
+ * For historical reasons, specifying this value to netio_get() will
+ * have the same behavior as using ::NETIO_PARAM_LINK_POSSIBLE_STATE,
+ * but this usage is discouraged.
+ */
+#define NETIO_PARAM_LINK_CONFIG NETIO_PARAM_LINK_POSSIBLE_STATE
+
+/** Current link state. This address is only valid with @ref netio_get().
+ * The value is zero or more of the "NETIO_LINK_xxx" flags, ORed together.
+ * If the link is down, the value ANDed with NETIO_LINK_SPEED will be
+ * zero; if the link is up, the value ANDed with NETIO_LINK_SPEED will
+ * result in exactly one of the NETIO_LINK_xxx values, indicating the
+ * current speed. */
+#define NETIO_PARAM_LINK_CURRENT_STATE 7
+
+/** Variant symbol for current state, retained for compatibility with
+ * pre-MDE-2.1 programs. */
+#define NETIO_PARAM_LINK_STATUS NETIO_PARAM_LINK_CURRENT_STATE
+
+/** Packet Coherence protocol. This address is only valid with @ref netio_get().
+ * The value is nonzero if the interface is configured for cache-coherent DMA.
+ */
+#define NETIO_PARAM_COHERENT 8
+
+/** Desired link state. The value is a conbination of "NETIO_LINK_xxx"
+ * flags, which specify the desired state for the link. With @ref
+ * netio_set(), this will, in the background, attempt to bring up the link
+ * using whichever of the requested flags are reasonable, or take down the
+ * link if the flags are zero. The actual link up or down operation may
+ * happen after this call completes. If the link state changes in the
+ * future, the system will continue to try to get back to the desired link
+ * state; for instance, if the link is brought up successfully, and then
+ * the network cable is disconnected, the link will go down. However, the
+ * desired state of the link is still up, so if the cable is reconnected,
+ * the link will be brought up again.
+ *
+ * With @ref netio_get(), this will indicate the desired state for the
+ * link, as set with a previous netio_set() call, or implicitly by a
+ * netio_input_register() or netio_input_unregister() operation. This may
+ * not reflect the current state of the link; to get that, use
+ * ::NETIO_PARAM_LINK_CURRENT_STATE. */
+#define NETIO_PARAM_LINK_DESIRED_STATE 9
+
+/** NetIO statistics structure. Retrieved using the ::NETIO_PARAM_STAT
+ * address passed to @ref netio_get(). */
+typedef struct
+{
+ /** Number of packets which have been received by the IPP and forwarded
+ * to a tile's receive queue for processing. This value wraps at its
+ * maximum, and is not cleared upon read. */
+ uint32_t packets_received;
+
+ /** Number of packets which have been dropped by the IPP, because they could
+ * not be received, or could not be forwarded to a tile. The former happens
+ * when the IPP does not have a free packet buffer of suitable size for an
+ * incoming frame. The latter happens when all potential destination tiles
+ * for a packet, as defined by the group, bucket, and queue configuration,
+ * have full receive queues. This value wraps at its maximum, and is not
+ * cleared upon read. */
+ uint32_t packets_dropped;
+
+ /*
+ * Note: the #defines after each of the following four one-byte values
+ * denote their location within the third word of the netio_stat_t. They
+ * are intended for use only by the IPP implementation and are thus omitted
+ * from the Doxygen output.
+ */
+
+ /** Number of packets dropped because no worker was able to accept a new
+ * packet. This value saturates at its maximum, and is cleared upon
+ * read. */
+ uint8_t drops_no_worker;
+#ifndef __DOXYGEN__
+#define NETIO_STAT_DROPS_NO_WORKER 0
+#endif
+
+ /** Number of packets dropped because no small buffers were available.
+ * This value saturates at its maximum, and is cleared upon read. */
+ uint8_t drops_no_smallbuf;
+#ifndef __DOXYGEN__
+#define NETIO_STAT_DROPS_NO_SMALLBUF 1
+#endif
+
+ /** Number of packets dropped because no large buffers were available.
+ * This value saturates at its maximum, and is cleared upon read. */
+ uint8_t drops_no_largebuf;
+#ifndef __DOXYGEN__
+#define NETIO_STAT_DROPS_NO_LARGEBUF 2
+#endif
+
+ /** Number of packets dropped because no jumbo buffers were available.
+ * This value saturates at its maximum, and is cleared upon read. */
+ uint8_t drops_no_jumbobuf;
+#ifndef __DOXYGEN__
+#define NETIO_STAT_DROPS_NO_JUMBOBUF 3
+#endif
+}
+netio_stat_t;
+
+
+/** Link can run, should run, or is running at 10 Mbps. */
+#define NETIO_LINK_10M 0x01
+
+/** Link can run, should run, or is running at 100 Mbps. */
+#define NETIO_LINK_100M 0x02
+
+/** Link can run, should run, or is running at 1 Gbps. */
+#define NETIO_LINK_1G 0x04
+
+/** Link can run, should run, or is running at 10 Gbps. */
+#define NETIO_LINK_10G 0x08
+
+/** Link should run at the highest speed supported by the link and by
+ * the device connected to the link. Only usable as a value for
+ * the link's desired state; never returned as a value for the current
+ * or possible states. */
+#define NETIO_LINK_ANYSPEED 0x10
+
+/** All legal link speeds. */
+#define NETIO_LINK_SPEED (NETIO_LINK_10M | \
+ NETIO_LINK_100M | \
+ NETIO_LINK_1G | \
+ NETIO_LINK_10G | \
+ NETIO_LINK_ANYSPEED)
+
+
+/** MAC register class. Addr is a register offset within the MAC.
+ * Registers within the XGbE and GbE MACs are documented in the Tile
+ * Processor I/O Device Guide (UG104). MAC registers start at address
+ * 0x4000, and do not include the MAC_INTERFACE registers. */
+#define NETIO_MAC 1
+
+/** MDIO register class (IEEE 802.3 clause 22 format). Addr is the "addr"
+ * member of a netio_mdio_addr_t structure. */
+#define NETIO_MDIO 2
+
+/** MDIO register class (IEEE 802.3 clause 45 format). Addr is the "addr"
+ * member of a netio_mdio_addr_t structure. */
+#define NETIO_MDIO_CLAUSE45 3
+
+/** NetIO MDIO address type. Retrieved or provided using the ::NETIO_MDIO
+ * address passed to @ref netio_get() or @ref netio_set(). */
+typedef union
+{
+ struct
+ {
+ unsigned int reg:16; /**< MDIO register offset. For clause 22 access,
+ must be less than 32. */
+ unsigned int phy:5; /**< Which MDIO PHY to access. */
+ unsigned int dev:5; /**< Which MDIO device to access within that PHY.
+ Applicable for clause 45 access only; ignored
+ for clause 22 access. */
+ }
+ bits; /**< Container for bitfields. */
+ uint64_t addr; /**< Value to pass to @ref netio_get() or
+ * @ref netio_set(). */
+}
+netio_mdio_addr_t;
+
+/** @} */
+
+#endif /* __NETIO_INTF_H__ */
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 112b1e248f05..b4c8e8ec45dc 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 67617a05e602..dbc213adf5e1 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -21,7 +21,6 @@
#include <linux/kdev_t.h>
#include <linux/fs.h>
#include <linux/fcntl.h>
-#include <linux/smp_lock.h>
#include <linux/uaccess.h>
#include <linux/signal.h>
#include <asm/syscalls.h>
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index fb64b99959d4..543d6a33aa26 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -15,7 +15,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
new file mode 100644
index 000000000000..a1ee25be9ad9
--- /dev/null
+++ b/arch/tile/kernel/pci.c
@@ -0,0 +1,621 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bootmem.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/byteorder.h>
+#include <asm/hv_driver.h>
+#include <hv/drv_pcie_rc_intf.h>
+
+
+/*
+ * Initialization flow and process
+ * -------------------------------
+ *
+ * This files containes the routines to search for PCI buses,
+ * enumerate the buses, and configure any attached devices.
+ *
+ * There are two entry points here:
+ * 1) tile_pci_init
+ * This sets up the pci_controller structs, and opens the
+ * FDs to the hypervisor. This is called from setup_arch() early
+ * in the boot process.
+ * 2) pcibios_init
+ * This probes the PCI bus(es) for any attached hardware. It's
+ * called by subsys_initcall. All of the real work is done by the
+ * generic Linux PCI layer.
+ *
+ */
+
+/*
+ * This flag tells if the platform is TILEmpower that needs
+ * special configuration for the PLX switch chip.
+ */
+int __write_once tile_plx_gen1;
+
+static struct pci_controller controllers[TILE_NUM_PCIE];
+static int num_controllers;
+
+static struct pci_ops tile_cfg_ops;
+
+
+/*
+ * We don't need to worry about the alignment of resources.
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ return res->start;
+}
+EXPORT_SYMBOL(pcibios_align_resource);
+
+/*
+ * Open a FD to the hypervisor PCI device.
+ *
+ * controller_id is the controller number, config type is 0 or 1 for
+ * config0 or config1 operations.
+ */
+static int __init tile_pcie_open(int controller_id, int config_type)
+{
+ char filename[32];
+ int fd;
+
+ sprintf(filename, "pcie/%d/config%d", controller_id, config_type);
+
+ fd = hv_dev_open((HV_VirtAddr)filename, 0);
+
+ return fd;
+}
+
+
+/*
+ * Get the IRQ numbers from the HV and set up the handlers for them.
+ */
+static int __init tile_init_irqs(int controller_id,
+ struct pci_controller *controller)
+{
+ char filename[32];
+ int fd;
+ int ret;
+ int x;
+ struct pcie_rc_config rc_config;
+
+ sprintf(filename, "pcie/%d/ctl", controller_id);
+ fd = hv_dev_open((HV_VirtAddr)filename, 0);
+ if (fd < 0) {
+ pr_err("PCI: hv_dev_open(%s) failed\n", filename);
+ return -1;
+ }
+ ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config),
+ sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF);
+ hv_dev_close(fd);
+ if (ret != sizeof(rc_config)) {
+ pr_err("PCI: wanted %zd bytes, got %d\n",
+ sizeof(rc_config), ret);
+ return -1;
+ }
+ /* Record irq_base so that we can map INTx to IRQ # later. */
+ controller->irq_base = rc_config.intr;
+
+ for (x = 0; x < 4; x++)
+ tile_irq_activate(rc_config.intr + x,
+ TILE_IRQ_HW_CLEAR);
+
+ if (rc_config.plx_gen1)
+ controller->plx_gen1 = 1;
+
+ return 0;
+}
+
+/*
+ * First initialization entry point, called from setup_arch().
+ *
+ * Find valid controllers and fill in pci_controller structs for each
+ * of them.
+ *
+ * Returns the number of controllers discovered.
+ */
+int __init tile_pci_init(void)
+{
+ int i;
+
+ pr_info("PCI: Searching for controllers...\n");
+
+ /* Do any configuration we need before using the PCIe */
+
+ for (i = 0; i < TILE_NUM_PCIE; i++) {
+ int hv_cfg_fd0 = -1;
+ int hv_cfg_fd1 = -1;
+ int hv_mem_fd = -1;
+ char name[32];
+ struct pci_controller *controller;
+
+ /*
+ * Open the fd to the HV. If it fails then this
+ * device doesn't exist.
+ */
+ hv_cfg_fd0 = tile_pcie_open(i, 0);
+ if (hv_cfg_fd0 < 0)
+ continue;
+ hv_cfg_fd1 = tile_pcie_open(i, 1);
+ if (hv_cfg_fd1 < 0) {
+ pr_err("PCI: Couldn't open config fd to HV "
+ "for controller %d\n", i);
+ goto err_cont;
+ }
+
+ sprintf(name, "pcie/%d/mem", i);
+ hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
+ if (hv_mem_fd < 0) {
+ pr_err("PCI: Could not open mem fd to HV!\n");
+ goto err_cont;
+ }
+
+ pr_info("PCI: Found PCI controller #%d\n", i);
+
+ controller = &controllers[num_controllers];
+
+ if (tile_init_irqs(i, controller)) {
+ pr_err("PCI: Could not initialize "
+ "IRQs, aborting.\n");
+ goto err_cont;
+ }
+
+ controller->index = num_controllers;
+ controller->hv_cfg_fd[0] = hv_cfg_fd0;
+ controller->hv_cfg_fd[1] = hv_cfg_fd1;
+ controller->hv_mem_fd = hv_mem_fd;
+ controller->first_busno = 0;
+ controller->last_busno = 0xff;
+ controller->ops = &tile_cfg_ops;
+
+ num_controllers++;
+ continue;
+
+err_cont:
+ if (hv_cfg_fd0 >= 0)
+ hv_dev_close(hv_cfg_fd0);
+ if (hv_cfg_fd1 >= 0)
+ hv_dev_close(hv_cfg_fd1);
+ if (hv_mem_fd >= 0)
+ hv_dev_close(hv_mem_fd);
+ continue;
+ }
+
+ /*
+ * Before using the PCIe, see if we need to do any platform-specific
+ * configuration, such as the PLX switch Gen 1 issue on TILEmpower.
+ */
+ for (i = 0; i < num_controllers; i++) {
+ struct pci_controller *controller = &controllers[i];
+
+ if (controller->plx_gen1)
+ tile_plx_gen1 = 1;
+ }
+
+ return num_controllers;
+}
+
+/*
+ * (pin - 1) converts from the PCI standard's [1:4] convention to
+ * a normal [0:3] range.
+ */
+static int tile_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pci_controller *controller =
+ (struct pci_controller *)dev->sysdata;
+ return (pin - 1) + controller->irq_base;
+}
+
+
+static void __init fixup_read_and_payload_sizes(void)
+{
+ struct pci_dev *dev = NULL;
+ int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
+ int max_read_size = 0x2; /* Limit to 512 byte reads. */
+ u16 new_values;
+
+ /* Scan for the smallest maximum payload size. */
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ int pcie_caps_offset;
+ u32 devcap;
+ int max_payload;
+
+ pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (pcie_caps_offset == 0)
+ continue;
+
+ pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP,
+ &devcap);
+ max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD;
+ if (max_payload < smallest_max_payload)
+ smallest_max_payload = max_payload;
+ }
+
+ /* Now, set the max_payload_size for all devices to that value. */
+ new_values = (max_read_size << 12) | (smallest_max_payload << 5);
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ int pcie_caps_offset;
+ u16 devctl;
+
+ pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (pcie_caps_offset == 0)
+ continue;
+
+ pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
+ &devctl);
+ devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ);
+ devctl |= new_values;
+ pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
+ devctl);
+ }
+}
+
+
+/*
+ * Second PCI initialization entry point, called by subsys_initcall.
+ *
+ * The controllers have been set up by the time we get here, by a call to
+ * tile_pci_init.
+ */
+static int __init pcibios_init(void)
+{
+ int i;
+
+ pr_info("PCI: Probing PCI hardware\n");
+
+ /*
+ * Delay a bit in case devices aren't ready. Some devices are
+ * known to require at least 20ms here, but we use a more
+ * conservative value.
+ */
+ mdelay(250);
+
+ /* Scan all of the recorded PCI controllers. */
+ for (i = 0; i < num_controllers; i++) {
+ struct pci_controller *controller = &controllers[i];
+ struct pci_bus *bus;
+
+ pr_info("PCI: initializing controller #%d\n", i);
+
+ /*
+ * This comes from the generic Linux PCI driver.
+ *
+ * It reads the PCI tree for this bus into the Linux
+ * data structures.
+ *
+ * This is inlined in linux/pci.h and calls into
+ * pci_scan_bus_parented() in probe.c.
+ */
+ bus = pci_scan_bus(0, controller->ops, controller);
+ controller->root_bus = bus;
+ controller->last_busno = bus->subordinate;
+
+ }
+
+ /* Do machine dependent PCI interrupt routing */
+ pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
+
+ /*
+ * This comes from the generic Linux PCI driver.
+ *
+ * It allocates all of the resources (I/O memory, etc)
+ * associated with the devices read in above.
+ */
+
+ pci_assign_unassigned_resources();
+
+ /* Configure the max_read_size and max_payload_size values. */
+ fixup_read_and_payload_sizes();
+
+ /* Record the I/O resources in the PCI controller structure. */
+ for (i = 0; i < num_controllers; i++) {
+ struct pci_bus *root_bus = controllers[i].root_bus;
+ struct pci_bus *next_bus;
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &root_bus->devices, bus_list) {
+ /* Find the PCI host controller, ie. the 1st bridge. */
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
+ (PCI_SLOT(dev->devfn) == 0)) {
+ next_bus = dev->subordinate;
+ controllers[i].mem_resources[0] =
+ *next_bus->resource[0];
+ controllers[i].mem_resources[1] =
+ *next_bus->resource[1];
+ controllers[i].mem_resources[2] =
+ *next_bus->resource[2];
+
+ break;
+ }
+ }
+
+ }
+
+ return 0;
+}
+subsys_initcall(pcibios_init);
+
+/*
+ * No bus fixups needed.
+ */
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+{
+ /* Nothing needs to be done. */
+}
+
+/*
+ * This can be called from the generic PCI layer, but doesn't need to
+ * do anything.
+ */
+char __devinit *pcibios_setup(char *str)
+{
+ /* Nothing needs to be done. */
+ return str;
+}
+
+/*
+ * This is called from the generic Linux layer.
+ */
+void __init pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+/*
+ * Enable memory and/or address decoding, as appropriate, for the
+ * device described by the 'dev' struct.
+ *
+ * This is called from the generic PCI layer, and can be called
+ * for bridges or endpoints.
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ u8 header_type;
+ int i;
+ struct resource *r;
+
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /*
+ * For bridges, we enable both memory and I/O decoding
+ * in call cases.
+ */
+ cmd |= PCI_COMMAND_IO;
+ cmd |= PCI_COMMAND_MEMORY;
+ } else {
+ /*
+ * For endpoints, we enable memory and/or I/O decoding
+ * only if they have a memory resource of that type.
+ */
+ for (i = 0; i < 6; i++) {
+ r = &dev->resource[i];
+ if (r->flags & IORESOURCE_UNSET) {
+ pr_err("PCI: Device %s not available "
+ "because of resource collisions\n",
+ pci_name(dev));
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ }
+
+ /*
+ * We only write the command if it changed.
+ */
+ if (cmd != old_cmd)
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ return 0;
+}
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
+{
+ unsigned long start = pci_resource_start(dev, bar);
+ unsigned long len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len)
+ return NULL;
+ if (max && len > max)
+ len = max;
+
+ if (!(flags & IORESOURCE_MEM)) {
+ pr_info("PCI: Trying to map invalid resource %#lx\n", flags);
+ start = 0;
+ }
+
+ return (void __iomem *)start;
+}
+EXPORT_SYMBOL(pci_iomap);
+
+
+/****************************************************************
+ *
+ * Tile PCI config space read/write routines
+ *
+ ****************************************************************/
+
+/*
+ * These are the normal read and write ops
+ * These are expanded with macros from pci_bus_read_config_byte() etc.
+ *
+ * devfn is the combined PCI slot & function.
+ *
+ * offset is in bytes, from the start of config space for the
+ * specified bus & slot.
+ */
+
+static int __devinit tile_cfg_read(struct pci_bus *bus,
+ unsigned int devfn,
+ int offset,
+ int size,
+ u32 *val)
+{
+ struct pci_controller *controller = bus->sysdata;
+ int busnum = bus->number & 0xff;
+ int slot = (devfn >> 3) & 0x1f;
+ int function = devfn & 0x7;
+ u32 addr;
+ int config_mode = 1;
+
+ /*
+ * There is no bridge between the Tile and bus 0, so we
+ * use config0 to talk to bus 0.
+ *
+ * If we're talking to a bus other than zero then we
+ * must have found a bridge.
+ */
+ if (busnum == 0) {
+ /*
+ * We fake an empty slot for (busnum == 0) && (slot > 0),
+ * since there is only one slot on bus 0.
+ */
+ if (slot) {
+ *val = 0xFFFFFFFF;
+ return 0;
+ }
+ config_mode = 0;
+ }
+
+ addr = busnum << 20; /* Bus in 27:20 */
+ addr |= slot << 15; /* Slot (device) in 19:15 */
+ addr |= function << 12; /* Function is in 14:12 */
+ addr |= (offset & 0xFFF); /* byte address in 0:11 */
+
+ return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0,
+ (HV_VirtAddr)(val), size, addr);
+}
+
+
+/*
+ * See tile_cfg_read() for relevent comments.
+ * Note that "val" is the value to write, not a pointer to that value.
+ */
+static int __devinit tile_cfg_write(struct pci_bus *bus,
+ unsigned int devfn,
+ int offset,
+ int size,
+ u32 val)
+{
+ struct pci_controller *controller = bus->sysdata;
+ int busnum = bus->number & 0xff;
+ int slot = (devfn >> 3) & 0x1f;
+ int function = devfn & 0x7;
+ u32 addr;
+ int config_mode = 1;
+ HV_VirtAddr valp = (HV_VirtAddr)&val;
+
+ /*
+ * For bus 0 slot 0 we use config 0 accesses.
+ */
+ if (busnum == 0) {
+ /*
+ * We fake an empty slot for (busnum == 0) && (slot > 0),
+ * since there is only one slot on bus 0.
+ */
+ if (slot)
+ return 0;
+ config_mode = 0;
+ }
+
+ addr = busnum << 20; /* Bus in 27:20 */
+ addr |= slot << 15; /* Slot (device) in 19:15 */
+ addr |= function << 12; /* Function is in 14:12 */
+ addr |= (offset & 0xFFF); /* byte address in 0:11 */
+
+#ifdef __BIG_ENDIAN
+ /* Point to the correct part of the 32-bit "val". */
+ valp += 4 - size;
+#endif
+
+ return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0,
+ valp, size, addr);
+}
+
+
+static struct pci_ops tile_cfg_ops = {
+ .read = tile_cfg_read,
+ .write = tile_cfg_write,
+};
+
+
+/*
+ * In the following, each PCI controller's mem_resources[1]
+ * represents its (non-prefetchable) PCI memory resource.
+ * mem_resources[0] and mem_resources[2] refer to its PCI I/O and
+ * prefetchable PCI memory resources, respectively.
+ * For more details, see pci_setup_bridge() in setup-bus.c.
+ * By comparing the target PCI memory address against the
+ * end address of controller 0, we can determine the controller
+ * that should accept the PCI memory access.
+ */
+#define TILE_READ(size, type) \
+type _tile_read##size(unsigned long addr) \
+{ \
+ type val; \
+ int idx = 0; \
+ if (addr > controllers[0].mem_resources[1].end && \
+ addr > controllers[0].mem_resources[2].end) \
+ idx = 1; \
+ if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \
+ (HV_VirtAddr)(&val), sizeof(type), addr)) \
+ pr_err("PCI: read %zd bytes at 0x%lX failed\n", \
+ sizeof(type), addr); \
+ return val; \
+} \
+EXPORT_SYMBOL(_tile_read##size)
+
+TILE_READ(b, u8);
+TILE_READ(w, u16);
+TILE_READ(l, u32);
+TILE_READ(q, u64);
+
+#define TILE_WRITE(size, type) \
+void _tile_write##size(type val, unsigned long addr) \
+{ \
+ int idx = 0; \
+ if (addr > controllers[0].mem_resources[1].end && \
+ addr > controllers[0].mem_resources[2].end) \
+ idx = 1; \
+ if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \
+ (HV_VirtAddr)(&val), sizeof(type), addr)) \
+ pr_err("PCI: write %zd bytes at 0x%lX failed\n", \
+ sizeof(type), addr); \
+} \
+EXPORT_SYMBOL(_tile_write##size)
+
+TILE_WRITE(b, u8);
+TILE_WRITE(w, u16);
+TILE_WRITE(l, u32);
+TILE_WRITE(q, u64);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index fb0b3cbeae14..f18573643ed1 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -840,7 +840,7 @@ static int __init topology_init(void)
for_each_online_node(i)
register_one_node(i);
- for_each_present_cpu(i)
+ for (i = 0; i < smp_height * smp_width; ++i)
register_cpu(&cpu_devices[i], i);
return 0;
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 687719d4abd1..757407e36696 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -16,7 +16,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 74d62d098edf..b949edcec200 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -18,7 +18,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
-#include <linux/smp_lock.h>
#include <linux/bootmem.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index 7e764669a022..e2187d24a9b4 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -20,7 +20,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
diff --git a/arch/tile/lib/memchr_32.c b/arch/tile/lib/memchr_32.c
index 6235283b4859..cc3d9badf030 100644
--- a/arch/tile/lib/memchr_32.c
+++ b/arch/tile/lib/memchr_32.c
@@ -18,12 +18,24 @@
void *memchr(const void *s, int c, size_t n)
{
+ const uint32_t *last_word_ptr;
+ const uint32_t *p;
+ const char *last_byte_ptr;
+ uintptr_t s_int;
+ uint32_t goal, before_mask, v, bits;
+ char *ret;
+
+ if (__builtin_expect(n == 0, 0)) {
+ /* Don't dereference any memory if the array is empty. */
+ return NULL;
+ }
+
/* Get an aligned pointer. */
- const uintptr_t s_int = (uintptr_t) s;
- const uint32_t *p = (const uint32_t *)(s_int & -4);
+ s_int = (uintptr_t) s;
+ p = (const uint32_t *)(s_int & -4);
/* Create four copies of the byte for which we are looking. */
- const uint32_t goal = 0x01010101 * (uint8_t) c;
+ goal = 0x01010101 * (uint8_t) c;
/* Read the first word, but munge it so that bytes before the array
* will not match goal.
@@ -31,23 +43,14 @@ void *memchr(const void *s, int c, size_t n)
* Note that this shift count expression works because we know
* shift counts are taken mod 32.
*/
- const uint32_t before_mask = (1 << (s_int << 3)) - 1;
- uint32_t v = (*p | before_mask) ^ (goal & before_mask);
+ before_mask = (1 << (s_int << 3)) - 1;
+ v = (*p | before_mask) ^ (goal & before_mask);
/* Compute the address of the last byte. */
- const char *const last_byte_ptr = (const char *)s + n - 1;
+ last_byte_ptr = (const char *)s + n - 1;
/* Compute the address of the word containing the last byte. */
- const uint32_t *const last_word_ptr =
- (const uint32_t *)((uintptr_t) last_byte_ptr & -4);
-
- uint32_t bits;
- char *ret;
-
- if (__builtin_expect(n == 0, 0)) {
- /* Don't dereference any memory if the array is empty. */
- return NULL;
- }
+ last_word_ptr = (const uint32_t *)((uintptr_t) last_byte_ptr & -4);
while ((bits = __insn_seqb(v, goal)) == 0) {
if (__builtin_expect(p == last_word_ptr, 0)) {
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c
index 485e24d62c6b..5cd1c4004eca 100644
--- a/arch/tile/lib/spinlock_32.c
+++ b/arch/tile/lib/spinlock_32.c
@@ -167,23 +167,30 @@ void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val)
* when we compare them.
*/
u32 my_ticket_;
+ u32 iterations = 0;
- /* Take out the next ticket; this will also stop would-be readers. */
- if (val & 1)
- val = get_rwlock(rwlock);
- rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
+ /*
+ * Wait until there are no readers, then bump up the next
+ * field and capture the ticket value.
+ */
+ for (;;) {
+ if (!(val & 1)) {
+ if ((val >> RD_COUNT_SHIFT) == 0)
+ break;
+ rwlock->lock = val;
+ }
+ delay_backoff(iterations++);
+ val = __insn_tns((int *)&rwlock->lock);
+ }
- /* Extract my ticket value from the original word. */
+ /* Take out the next ticket and extract my ticket value. */
+ rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
my_ticket_ = val >> WR_NEXT_SHIFT;
- /*
- * Wait until the "current" field matches our ticket, and
- * there are no remaining readers.
- */
+ /* Wait until the "current" field matches our ticket. */
for (;;) {
u32 curr_ = val >> WR_CURR_SHIFT;
- u32 readers = val >> RD_COUNT_SHIFT;
- u32 delta = ((my_ticket_ - curr_) & WR_MASK) + !!readers;
+ u32 delta = ((my_ticket_ - curr_) & WR_MASK);
if (likely(delta == 0))
break;
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index f295b4ac941d..dcebfc831cd6 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -24,7 +24,6 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/tty.h>
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 24688b697a8d..201a582c4137 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -21,7 +21,6 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sysctl.h>
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 7f7338c90784..1664cce7b0ac 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -727,6 +727,9 @@ struct winch {
static void free_winch(struct winch *winch, int free_irq_ok)
{
+ if (free_irq_ok)
+ free_irq(WINCH_IRQ, winch);
+
list_del(&winch->list);
if (winch->pid != -1)
@@ -735,8 +738,6 @@ static void free_winch(struct winch *winch, int free_irq_ok)
os_close_file(winch->fd);
if (winch->stack != 0)
free_stack(winch->stack, 0);
- if (free_irq_ok)
- free_irq(WINCH_IRQ, winch);
kfree(winch);
}
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 340268be00b5..09bd7b585726 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -5,7 +5,6 @@
#include "linux/stddef.h"
#include "linux/fs.h"
-#include "linux/smp_lock.h"
#include "linux/ptrace.h"
#include "linux/sched.h"
#include "linux/slab.h"
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 849813f398e7..5852519b2d0f 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -28,7 +28,6 @@
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/utsname.h>
-#include <linux/smp_lock.h>
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/poll.h>
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 4d293dced62f..9479a037419f 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -216,8 +216,8 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
}
/* Return an pointer with offset calculated */
-static inline unsigned long __set_fixmap_offset(enum fixed_addresses idx,
- phys_addr_t phys, pgprot_t flags)
+static __always_inline unsigned long
+__set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
{
__set_fixmap(idx, phys, flags);
return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1));
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index e8506c1f0c55..1c10c88ee4e1 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -61,9 +61,9 @@ DEFINE_GUEST_HANDLE(void);
#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
#endif
-#ifndef machine_to_phys_mapping
-#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
-#endif
+#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
+#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>__MACH2PHYS_SHIFT)
/* Maximum number of virtual CPUs in multi-processor guests. */
#define MAX_VIRT_CPUS 32
diff --git a/arch/x86/include/asm/xen/interface_32.h b/arch/x86/include/asm/xen/interface_32.h
index 42a7e004ae5c..8413688b2571 100644
--- a/arch/x86/include/asm/xen/interface_32.h
+++ b/arch/x86/include/asm/xen/interface_32.h
@@ -32,6 +32,11 @@
/* And the trap vector is... */
#define TRAP_INSTR "int $0x82"
+#define __MACH2PHYS_VIRT_START 0xF5800000
+#define __MACH2PHYS_VIRT_END 0xF6800000
+
+#define __MACH2PHYS_SHIFT 2
+
/*
* Virtual addresses beyond this are not modifiable by guest OSes. The
* machine->physical mapping table starts at this address, read-only.
diff --git a/arch/x86/include/asm/xen/interface_64.h b/arch/x86/include/asm/xen/interface_64.h
index 100d2662b97c..839a4811cf98 100644
--- a/arch/x86/include/asm/xen/interface_64.h
+++ b/arch/x86/include/asm/xen/interface_64.h
@@ -39,18 +39,7 @@
#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
-
-#ifndef HYPERVISOR_VIRT_START
-#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
-#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
-#endif
-
-#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
-#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
-#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
-#ifndef machine_to_phys_mapping
-#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
-#endif
+#define __MACH2PHYS_SHIFT 3
/*
* int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index dd8c1414b3d5..8760cc60a21c 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pfn.h>
+#include <linux/mm.h>
#include <asm/uaccess.h>
#include <asm/page.h>
@@ -35,6 +36,8 @@ typedef struct xpaddr {
#define MAX_DOMAIN_PAGES \
((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
+extern unsigned long *machine_to_phys_mapping;
+extern unsigned int machine_to_phys_order;
extern unsigned long get_phys_to_machine(unsigned long pfn);
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
@@ -69,10 +72,8 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
-#if 0
if (unlikely((mfn >> machine_to_phys_order) != 0))
- return max_mapnr;
-#endif
+ return ~0;
pfn = 0;
/*
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 1b7b31ab7d86..212a6a42527c 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -33,7 +33,6 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/device.h>
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index ec592caac4b4..cd21b654dec6 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -315,14 +315,18 @@ static void kgdb_remove_all_hw_break(void)
if (!breakinfo[i].enabled)
continue;
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
- if (bp->attr.disabled == 1)
+ if (!bp->attr.disabled) {
+ arch_uninstall_hw_breakpoint(bp);
+ bp->attr.disabled = 1;
continue;
+ }
if (dbg_is_early)
early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
breakinfo[i].type);
- else
- arch_uninstall_hw_breakpoint(bp);
- bp->attr.disabled = 1;
+ else if (hw_break_release_slot(i))
+ printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n",
+ breakinfo[i].addr);
+ breakinfo[i].enabled = 0;
}
}
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 7bf2dc4c8f70..12fcbe2c143e 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -30,7 +30,6 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/device.h>
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 82e144a4e514..1ca12298ffc7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3395,6 +3395,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
load_host_msrs(vcpu);
+ kvm_load_ldt(ldt_selector);
loadsegment(fs, fs_selector);
#ifdef CONFIG_X86_64
load_gs_index(gs_selector);
@@ -3402,7 +3403,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
#else
loadsegment(gs, gs_selector);
#endif
- kvm_load_ldt(ldt_selector);
reload_tss(vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8da0e45ff7c9..ff21fdda0c53 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -821,10 +821,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
#endif
#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu)) {
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+ if (is_long_mode(&vmx->vcpu))
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
- }
#endif
for (i = 0; i < vmx->save_nmsrs; ++i)
kvm_set_shared_msr(vmx->guest_msrs[i].index,
@@ -839,23 +838,23 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
- if (vmx->host_state.fs_reload_needed)
- loadsegment(fs, vmx->host_state.fs_sel);
+#ifdef CONFIG_X86_64
+ if (is_long_mode(&vmx->vcpu))
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+#endif
if (vmx->host_state.gs_ldt_reload_needed) {
kvm_load_ldt(vmx->host_state.ldt_sel);
#ifdef CONFIG_X86_64
load_gs_index(vmx->host_state.gs_sel);
- wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
#else
loadsegment(gs, vmx->host_state.gs_sel);
#endif
}
+ if (vmx->host_state.fs_reload_needed)
+ loadsegment(fs, vmx->host_state.fs_sel);
reload_tss();
#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu)) {
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
- wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
- }
+ wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif
if (current_thread_info()->status & TS_USEDFPU)
clts();
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 235c0f4d3861..02c710bebf7a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -75,6 +75,11 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
enum xen_domain_type xen_domain_type = XEN_NATIVE;
EXPORT_SYMBOL_GPL(xen_domain_type);
+unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
+EXPORT_SYMBOL(machine_to_phys_mapping);
+unsigned int machine_to_phys_order;
+EXPORT_SYMBOL(machine_to_phys_order);
+
struct start_info *xen_start_info;
EXPORT_SYMBOL_GPL(xen_start_info);
@@ -1090,6 +1095,8 @@ static void __init xen_setup_stackprotector(void)
/* First C function to be called on Xen boot */
asmlinkage void __init xen_start_kernel(void)
{
+ struct physdev_set_iopl set_iopl;
+ int rc;
pgd_t *pgd;
if (!xen_start_info)
@@ -1097,6 +1104,8 @@ asmlinkage void __init xen_start_kernel(void)
xen_domain_type = XEN_PV_DOMAIN;
+ xen_setup_machphys_mapping();
+
/* Install Xen paravirt ops */
pv_info = xen_info;
pv_init_ops = xen_init_ops;
@@ -1191,8 +1200,6 @@ asmlinkage void __init xen_start_kernel(void)
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
- init_mm.pgd = pgd;
-
/* keep using Xen gdt for now; no urgent need to change it */
#ifdef CONFIG_X86_32
@@ -1202,10 +1209,18 @@ asmlinkage void __init xen_start_kernel(void)
#else
pv_info.kernel_rpl = 0;
#endif
-
/* set the limit of our address space */
xen_reserve_top();
+ /* We used to do this in xen_arch_setup, but that is too late on AMD
+ * were early_cpu_init (run before ->arch_setup()) calls early_amd_init
+ * which pokes 0xcf8 port.
+ */
+ set_iopl.iopl = 1;
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
+ if (rc != 0)
+ xen_raw_printk("physdev_op failed %d\n", rc);
+
#ifdef CONFIG_X86_32
/* set up basic CPUID stuff */
cpu_detect(&new_cpu_data);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 21ed8d7f75a5..a1feff9e59b6 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2034,6 +2034,20 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
set_page_prot(pmd, PAGE_KERNEL_RO);
}
+void __init xen_setup_machphys_mapping(void)
+{
+ struct xen_machphys_mapping mapping;
+ unsigned long machine_to_phys_nr_ents;
+
+ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
+ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
+ machine_to_phys_nr_ents = mapping.max_mfn + 1;
+ } else {
+ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
+ }
+ machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
+}
+
#ifdef CONFIG_X86_64
static void convert_pfn_mfn(void *v)
{
@@ -2119,44 +2133,83 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
return pgd;
}
#else /* !CONFIG_X86_64 */
-static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD);
+static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
+static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
+
+static __init void xen_write_cr3_init(unsigned long cr3)
+{
+ unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
+
+ BUG_ON(read_cr3() != __pa(initial_page_table));
+ BUG_ON(cr3 != __pa(swapper_pg_dir));
+
+ /*
+ * We are switching to swapper_pg_dir for the first time (from
+ * initial_page_table) and therefore need to mark that page
+ * read-only and then pin it.
+ *
+ * Xen disallows sharing of kernel PMDs for PAE
+ * guests. Therefore we must copy the kernel PMD from
+ * initial_page_table into a new kernel PMD to be used in
+ * swapper_pg_dir.
+ */
+ swapper_kernel_pmd =
+ extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
+ memcpy(swapper_kernel_pmd, initial_kernel_pmd,
+ sizeof(pmd_t) * PTRS_PER_PMD);
+ swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
+ __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
+ set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
+
+ set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
+ xen_write_cr3(cr3);
+ pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
+
+ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
+ PFN_DOWN(__pa(initial_page_table)));
+ set_page_prot(initial_page_table, PAGE_KERNEL);
+ set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
+
+ pv_mmu_ops.write_cr3 = &xen_write_cr3;
+}
__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
unsigned long max_pfn)
{
pmd_t *kernel_pmd;
- level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
+ initial_kernel_pmd =
+ extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
xen_start_info->nr_pt_frames * PAGE_SIZE +
512*1024);
kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
- memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
+ memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
- xen_map_identity_early(level2_kernel_pgt, max_pfn);
+ xen_map_identity_early(initial_kernel_pmd, max_pfn);
- memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
- set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
- __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
+ memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
+ initial_page_table[KERNEL_PGD_BOUNDARY] =
+ __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
- set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
- set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
+ set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
+ set_page_prot(initial_page_table, PAGE_KERNEL_RO);
set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
- xen_write_cr3(__pa(swapper_pg_dir));
-
- pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
+ pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
+ PFN_DOWN(__pa(initial_page_table)));
+ xen_write_cr3(__pa(initial_page_table));
memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
__pa(xen_start_info->pt_base +
xen_start_info->nr_pt_frames * PAGE_SIZE),
"XEN PAGETABLES");
- return swapper_pg_dir;
+ return initial_page_table;
}
#endif /* CONFIG_X86_64 */
@@ -2290,7 +2343,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
.write_cr2 = xen_write_cr2,
.read_cr3 = xen_read_cr3,
+#ifdef CONFIG_X86_32
+ .write_cr3 = xen_write_cr3_init,
+#else
.write_cr3 = xen_write_cr3,
+#endif
.flush_tlb_user = xen_flush_tlb,
.flush_tlb_kernel = xen_flush_tlb,
@@ -2627,7 +2684,8 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
- vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+ BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
+ (VM_PFNMAP | VM_RESERVED | VM_IO)));
rmd.mfn = mfn;
rmd.prot = prot;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 769c4b01fa32..01afd8a94607 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -23,7 +23,6 @@
#include <xen/interface/callback.h>
#include <xen/interface/memory.h>
#include <xen/interface/physdev.h>
-#include <xen/interface/memory.h>
#include <xen/features.h>
#include "xen-ops.h"
@@ -248,8 +247,7 @@ char * __init xen_memory_setup(void)
else
extra_pages = 0;
- if (!xen_initial_domain())
- xen_add_extra_mem(extra_pages);
+ xen_add_extra_mem(extra_pages);
return "Xen";
}
@@ -337,9 +335,6 @@ void __cpuinit xen_enable_syscall(void)
void __init xen_arch_setup(void)
{
- struct physdev_set_iopl set_iopl;
- int rc;
-
xen_panic_handler_init();
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
@@ -356,11 +351,6 @@ void __init xen_arch_setup(void)
xen_enable_sysenter();
xen_enable_syscall();
- set_iopl.iopl = 1;
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
- if (rc != 0)
- printk(KERN_INFO "physdev_op failed %d\n", rc);
-
#ifdef CONFIG_ACPI
if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
OpenPOWER on IntegriCloud