summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/Kbuild2
-rw-r--r--arch/arm/include/asm/arch_gicv3.h12
-rw-r--r--arch/arm/include/asm/device.h3
-rw-r--r--arch/arm/include/asm/dma-direct.h19
-rw-r--r--arch/arm/include/asm/dma-mapping.h6
-rw-r--r--arch/arm/include/asm/domain.h8
-rw-r--r--arch/arm/include/asm/efi.h17
-rw-r--r--arch/arm/include/asm/ftrace.h4
-rw-r--r--arch/arm/include/asm/hardware/cache-aurora-l2.h103
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h2
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h919
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h311
-rw-r--r--arch/arm/include/asm/hardware/iop_adma.h106
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h3
-rw-r--r--arch/arm/include/asm/io.h15
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_emulate.h36
-rw-r--r--arch/arm/include/asm/kvm_host.h49
-rw-r--r--arch/arm/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm/include/asm/kvm_mmio.h26
-rw-r--r--arch/arm/include/asm/pci.h2
-rw-r--r--arch/arm/include/asm/pgalloc.h2
-rw-r--r--arch/arm/include/asm/pgtable-2level.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h1
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h12
-rw-r--r--arch/arm/include/asm/pgtable.h4
-rw-r--r--arch/arm/include/asm/processor.h2
-rw-r--r--arch/arm/include/asm/switch_to.h2
-rw-r--r--arch/arm/include/asm/tlb.h6
-rw-r--r--arch/arm/include/asm/topology.h20
-rw-r--r--arch/arm/include/asm/uaccess.h4
-rw-r--r--arch/arm/include/asm/vdso/gettimeofday.h133
-rw-r--r--arch/arm/include/asm/vdso/vsyscall.h71
-rw-r--r--arch/arm/include/asm/vdso_datapage.h29
-rw-r--r--arch/arm/include/asm/vmalloc.h4
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h93
-rw-r--r--arch/arm/include/asm/xen/xen-ops.h6
37 files changed, 421 insertions, 1614 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 6b2dc15b6dff..fa579b23b4df 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -12,12 +12,10 @@ generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
-generic-y += msi.h
generic-y += parport.h
generic-y += preempt.h
generic-y += seccomp.h
generic-y += serial.h
-generic-y += simd.h
generic-y += trace_clock.h
generated-y += mach-types.h
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 0555f14cc8be..c815477b4303 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -10,6 +10,7 @@
#ifndef __ASSEMBLY__
#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/barrier.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
@@ -325,15 +326,16 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
/*
- * GITS_VPROPBASER - hi and lo bits may be accessed independently.
+ * GICR_VPROPBASER - hi and lo bits may be accessed independently.
*/
-#define gits_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
+#define gicr_read_vpropbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
/*
- * GITS_VPENDBASER - the Valid bit must be cleared before changing
+ * GICR_VPENDBASER - the Valid bit must be cleared before changing
* anything else.
*/
-static inline void gits_write_vpendbaser(u64 val, void * __iomem addr)
+static inline void gicr_write_vpendbaser(u64 val, void __iomem *addr)
{
u32 tmp;
@@ -350,7 +352,7 @@ static inline void gits_write_vpendbaser(u64 val, void * __iomem addr)
__gic_writeq_nonatomic(val, addr);
}
-#define gits_read_vpendbaser(c) __gic_readq_nonatomic(c)
+#define gicr_read_vpendbaser(c) __gic_readq_nonatomic(c)
static inline bool gic_prio_masking_enabled(void)
{
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index f6955b55c544..c675bc0d5aa8 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -15,9 +15,6 @@ struct dev_archdata {
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif
-#ifdef CONFIG_XEN
- const struct dma_map_ops *dev_dma_ops;
-#endif
unsigned int dma_coherent:1;
unsigned int dma_ops_setup:1;
};
diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h
index b67e5fc1fe43..7c3001a6a775 100644
--- a/arch/arm/include/asm/dma-direct.h
+++ b/arch/arm/include/asm/dma-direct.h
@@ -14,23 +14,4 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
}
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- u64 limit, mask;
-
- if (!dev->dma_mask)
- return 0;
-
- mask = *dev->dma_mask;
-
- limit = (mask + 1) & ~mask;
- if (limit && size > limit)
- return 0;
-
- if ((addr | (addr + size - 1)) & ~mask)
- return 0;
-
- return 1;
-}
-
#endif /* ASM_ARM_DMA_DIRECT_H */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index dba9355e2484..bdd80ddbca34 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -91,12 +91,6 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
}
#endif
-/* do not use this function in a driver */
-static inline bool is_device_dma_coherent(struct device *dev)
-{
- return dev->archdata.dma_coherent;
-}
-
/**
* arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 567dbede4785..f1d0a7807cd0 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -82,7 +82,7 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_CPU_CP15_MMU
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
{
unsigned int domain;
@@ -94,7 +94,7 @@ static inline unsigned int get_domain(void)
return domain;
}
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
{
asm volatile(
"mcr p15, 0, %0, c3, c0 @ set domain"
@@ -102,12 +102,12 @@ static inline void set_domain(unsigned val)
isb();
}
#else
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
{
return 0;
}
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
{
}
#endif
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 7667826b93f1..5ac46e2860bc 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -50,19 +50,16 @@ void efi_virtmap_unload(void);
/* arch specific definitions used by the stub code */
-#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
-#define __efi_call_early(f, ...) f(__VA_ARGS__)
-#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__)
-#define efi_is_64bit() (false)
+#define efi_bs_call(func, ...) efi_system_table()->boottime->func(__VA_ARGS__)
+#define efi_rt_call(func, ...) efi_system_table()->runtime->func(__VA_ARGS__)
+#define efi_is_native() (true)
-#define efi_table_attr(table, attr, instance) \
- ((table##_t *)instance)->attr
+#define efi_table_attr(inst, attr) (inst->attr)
-#define efi_call_proto(protocol, f, instance, ...) \
- ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
+#define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__)
-struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg);
-void free_screen_info(efi_system_table_t *sys_table, struct screen_info *si);
+struct screen_info *alloc_screen_info(void);
+void free_screen_info(struct screen_info *si);
static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
{
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 18b0197f2384..48ec1d0337da 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -11,7 +11,6 @@
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
-extern void mcount(void);
extern void __gnu_mcount_nc(void);
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -23,9 +22,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
/* With Thumb-2, the recorded addresses have the lsb set */
return addr & ~1;
}
-
-extern void ftrace_caller_old(void);
-extern void ftrace_call_old(void);
#endif
#endif
diff --git a/arch/arm/include/asm/hardware/cache-aurora-l2.h b/arch/arm/include/asm/hardware/cache-aurora-l2.h
new file mode 100644
index 000000000000..39769ffa0051
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-aurora-l2.h
@@ -0,0 +1,103 @@
+/*
+ * AURORA shared L2 cache controller support
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARM_HARDWARE_AURORA_L2_H
+#define __ASM_ARM_HARDWARE_AURORA_L2_H
+
+#define AURORA_SYNC_REG 0x700
+#define AURORA_RANGE_BASE_ADDR_REG 0x720
+#define AURORA_FLUSH_PHY_ADDR_REG 0x7f0
+#define AURORA_INVAL_RANGE_REG 0x774
+#define AURORA_CLEAN_RANGE_REG 0x7b4
+#define AURORA_FLUSH_RANGE_REG 0x7f4
+
+#define AURORA_ACR_REPLACEMENT_OFFSET 27
+#define AURORA_ACR_REPLACEMENT_MASK \
+ (0x3 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_REPLACEMENT_TYPE_WAYRR \
+ (0 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_REPLACEMENT_TYPE_LFSR \
+ (1 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU \
+ (3 << AURORA_ACR_REPLACEMENT_OFFSET)
+
+#define AURORA_ACR_PARITY_EN (1 << 21)
+#define AURORA_ACR_ECC_EN (1 << 20)
+
+#define AURORA_ACR_FORCE_WRITE_POLICY_OFFSET 0
+#define AURORA_ACR_FORCE_WRITE_POLICY_MASK \
+ (0x3 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+#define AURORA_ACR_FORCE_WRITE_POLICY_DIS \
+ (0 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+#define AURORA_ACR_FORCE_WRITE_BACK_POLICY \
+ (1 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+#define AURORA_ACR_FORCE_WRITE_THRO_POLICY \
+ (2 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+
+#define AURORA_ERR_CNT_REG 0x600
+#define AURORA_ERR_ATTR_CAP_REG 0x608
+#define AURORA_ERR_ADDR_CAP_REG 0x60c
+#define AURORA_ERR_WAY_CAP_REG 0x610
+#define AURORA_ERR_INJECT_CTL_REG 0x614
+#define AURORA_ERR_INJECT_MASK_REG 0x618
+
+#define AURORA_ERR_CNT_CLR_OFFSET 31
+#define AURORA_ERR_CNT_CLR \
+ (0x1 << AURORA_ERR_CNT_CLR_OFFSET)
+#define AURORA_ERR_CNT_UE_OFFSET 16
+#define AURORA_ERR_CNT_UE_MASK \
+ (0x7fff << AURORA_ERR_CNT_UE_OFFSET)
+#define AURORA_ERR_CNT_CE_OFFSET 0
+#define AURORA_ERR_CNT_CE_MASK \
+ (0xffff << AURORA_ERR_CNT_CE_OFFSET)
+
+#define AURORA_ERR_ATTR_SRC_OFF 16
+#define AURORA_ERR_ATTR_SRC_MSK \
+ (0x7 << AURORA_ERR_ATTR_SRC_OFF)
+#define AURORA_ERR_ATTR_TXN_OFF 12
+#define AURORA_ERR_ATTR_TXN_MSK \
+ (0xf << AURORA_ERR_ATTR_TXN_OFF)
+#define AURORA_ERR_ATTR_ERR_OFF 8
+#define AURORA_ERR_ATTR_ERR_MSK \
+ (0x3 << AURORA_ERR_ATTR_ERR_OFF)
+#define AURORA_ERR_ATTR_CAP_VALID_OFF 0
+#define AURORA_ERR_ATTR_CAP_VALID \
+ (0x1 << AURORA_ERR_ATTR_CAP_VALID_OFF)
+
+#define AURORA_ERR_ADDR_CAP_ADDR_MASK 0xffffffe0
+
+#define AURORA_ERR_WAY_IDX_OFF 8
+#define AURORA_ERR_WAY_IDX_MSK \
+ (0xfff << AURORA_ERR_WAY_IDX_OFF)
+#define AURORA_ERR_WAY_CAP_WAY_OFFSET 1
+#define AURORA_ERR_WAY_CAP_WAY_MASK \
+ (0xf << AURORA_ERR_WAY_CAP_WAY_OFFSET)
+
+#define AURORA_ERR_INJECT_CTL_ADDR_MASK 0xfffffff0
+#define AURORA_ERR_ATTR_TXN_OFF 12
+#define AURORA_ERR_INJECT_CTL_EN_MASK 0x3
+#define AURORA_ERR_INJECT_CTL_EN_PARITY 0x2
+#define AURORA_ERR_INJECT_CTL_EN_ECC 0x1
+
+#define AURORA_MAX_RANGE_SIZE 1024
+
+#define AURORA_WAY_SIZE_SHIFT 2
+
+#define AURORA_CTRL_FW 0x100
+
+/* chose a number outside L2X0_CACHE_ID_PART_MASK to be sure to make
+ * the distinction between a number coming from hardware and a number
+ * coming from the device tree */
+#define AURORA_CACHE_ID 0x100
+
+#endif /* __ASM_ARM_HARDWARE_AURORA_L2_H */
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 32edfadb1593..a6d4ee86ba54 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -118,6 +118,8 @@
#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */
#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16)
+#define L310_AUX_CTRL_FWA_SHIFT 23
+#define L310_AUX_CTRL_FWA_MASK (3 << 23)
#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */
#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26)
#define L310_AUX_CTRL_NS_INT_CTRL BIT(27)
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
deleted file mode 100644
index 6d998df17efd..000000000000
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ /dev/null
@@ -1,919 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright © 2006, Intel Corporation.
- */
-#ifndef _ADMA_H
-#define _ADMA_H
-#include <linux/types.h>
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <asm/hardware/iop_adma.h>
-
-/* Memory copy units */
-#define DMA_CCR(chan) (chan->mmr_base + 0x0)
-#define DMA_CSR(chan) (chan->mmr_base + 0x4)
-#define DMA_DAR(chan) (chan->mmr_base + 0xc)
-#define DMA_NDAR(chan) (chan->mmr_base + 0x10)
-#define DMA_PADR(chan) (chan->mmr_base + 0x14)
-#define DMA_PUADR(chan) (chan->mmr_base + 0x18)
-#define DMA_LADR(chan) (chan->mmr_base + 0x1c)
-#define DMA_BCR(chan) (chan->mmr_base + 0x20)
-#define DMA_DCR(chan) (chan->mmr_base + 0x24)
-
-/* Application accelerator unit */
-#define AAU_ACR(chan) (chan->mmr_base + 0x0)
-#define AAU_ASR(chan) (chan->mmr_base + 0x4)
-#define AAU_ADAR(chan) (chan->mmr_base + 0x8)
-#define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
-#define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
-#define AAU_DAR(chan) (chan->mmr_base + 0x20)
-#define AAU_ABCR(chan) (chan->mmr_base + 0x24)
-#define AAU_ADCR(chan) (chan->mmr_base + 0x28)
-#define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
-#define AAU_EDCR0_IDX 8
-#define AAU_EDCR1_IDX 17
-#define AAU_EDCR2_IDX 26
-
-#define DMA0_ID 0
-#define DMA1_ID 1
-#define AAU_ID 2
-
-struct iop3xx_aau_desc_ctrl {
- unsigned int int_en:1;
- unsigned int blk1_cmd_ctrl:3;
- unsigned int blk2_cmd_ctrl:3;
- unsigned int blk3_cmd_ctrl:3;
- unsigned int blk4_cmd_ctrl:3;
- unsigned int blk5_cmd_ctrl:3;
- unsigned int blk6_cmd_ctrl:3;
- unsigned int blk7_cmd_ctrl:3;
- unsigned int blk8_cmd_ctrl:3;
- unsigned int blk_ctrl:2;
- unsigned int dual_xor_en:1;
- unsigned int tx_complete:1;
- unsigned int zero_result_err:1;
- unsigned int zero_result_en:1;
- unsigned int dest_write_en:1;
-};
-
-struct iop3xx_aau_e_desc_ctrl {
- unsigned int reserved:1;
- unsigned int blk1_cmd_ctrl:3;
- unsigned int blk2_cmd_ctrl:3;
- unsigned int blk3_cmd_ctrl:3;
- unsigned int blk4_cmd_ctrl:3;
- unsigned int blk5_cmd_ctrl:3;
- unsigned int blk6_cmd_ctrl:3;
- unsigned int blk7_cmd_ctrl:3;
- unsigned int blk8_cmd_ctrl:3;
- unsigned int reserved2:7;
-};
-
-struct iop3xx_dma_desc_ctrl {
- unsigned int pci_transaction:4;
- unsigned int int_en:1;
- unsigned int dac_cycle_en:1;
- unsigned int mem_to_mem_en:1;
- unsigned int crc_data_tx_en:1;
- unsigned int crc_gen_en:1;
- unsigned int crc_seed_dis:1;
- unsigned int reserved:21;
- unsigned int crc_tx_complete:1;
-};
-
-struct iop3xx_desc_dma {
- u32 next_desc;
- union {
- u32 pci_src_addr;
- u32 pci_dest_addr;
- u32 src_addr;
- };
- union {
- u32 upper_pci_src_addr;
- u32 upper_pci_dest_addr;
- };
- union {
- u32 local_pci_src_addr;
- u32 local_pci_dest_addr;
- u32 dest_addr;
- };
- u32 byte_count;
- union {
- u32 desc_ctrl;
- struct iop3xx_dma_desc_ctrl desc_ctrl_field;
- };
- u32 crc_addr;
-};
-
-struct iop3xx_desc_aau {
- u32 next_desc;
- u32 src[4];
- u32 dest_addr;
- u32 byte_count;
- union {
- u32 desc_ctrl;
- struct iop3xx_aau_desc_ctrl desc_ctrl_field;
- };
- union {
- u32 src_addr;
- u32 e_desc_ctrl;
- struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
- } src_edc[31];
-};
-
-struct iop3xx_aau_gfmr {
- unsigned int gfmr1:8;
- unsigned int gfmr2:8;
- unsigned int gfmr3:8;
- unsigned int gfmr4:8;
-};
-
-struct iop3xx_desc_pq_xor {
- u32 next_desc;
- u32 src[3];
- union {
- u32 data_mult1;
- struct iop3xx_aau_gfmr data_mult1_field;
- };
- u32 dest_addr;
- u32 byte_count;
- union {
- u32 desc_ctrl;
- struct iop3xx_aau_desc_ctrl desc_ctrl_field;
- };
- union {
- u32 src_addr;
- u32 e_desc_ctrl;
- struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
- u32 data_multiplier;
- struct iop3xx_aau_gfmr data_mult_field;
- u32 reserved;
- } src_edc_gfmr[19];
-};
-
-struct iop3xx_desc_dual_xor {
- u32 next_desc;
- u32 src0_addr;
- u32 src1_addr;
- u32 h_src_addr;
- u32 d_src_addr;
- u32 h_dest_addr;
- u32 byte_count;
- union {
- u32 desc_ctrl;
- struct iop3xx_aau_desc_ctrl desc_ctrl_field;
- };
- u32 d_dest_addr;
-};
-
-union iop3xx_desc {
- struct iop3xx_desc_aau *aau;
- struct iop3xx_desc_dma *dma;
- struct iop3xx_desc_pq_xor *pq_xor;
- struct iop3xx_desc_dual_xor *dual_xor;
- void *ptr;
-};
-
-/* No support for p+q operations */
-static inline int
-iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
-{
- BUG();
- return 0;
-}
-
-static inline void
-iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- BUG();
-}
-
-static inline void
-iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
-{
- BUG();
-}
-
-static inline void
-iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
- dma_addr_t addr, unsigned char coef)
-{
- BUG();
-}
-
-static inline int
-iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
-{
- BUG();
- return 0;
-}
-
-static inline void
-iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- BUG();
-}
-
-static inline void
-iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
-{
- BUG();
-}
-
-#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
-
-static inline void
-iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
- dma_addr_t *src)
-{
- BUG();
-}
-
-static inline int iop_adma_get_max_xor(void)
-{
- return 32;
-}
-
-static inline int iop_adma_get_max_pq(void)
-{
- BUG();
- return 0;
-}
-
-static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
-{
- int id = chan->device->id;
-
- switch (id) {
- case DMA0_ID:
- case DMA1_ID:
- return __raw_readl(DMA_DAR(chan));
- case AAU_ID:
- return __raw_readl(AAU_ADAR(chan));
- default:
- BUG();
- }
- return 0;
-}
-
-static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
- u32 next_desc_addr)
-{
- int id = chan->device->id;
-
- switch (id) {
- case DMA0_ID:
- case DMA1_ID:
- __raw_writel(next_desc_addr, DMA_NDAR(chan));
- break;
- case AAU_ID:
- __raw_writel(next_desc_addr, AAU_ANDAR(chan));
- break;
- }
-
-}
-
-#define IOP_ADMA_STATUS_BUSY (1 << 10)
-#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
-#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
-#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
-
-static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
-{
- u32 status = __raw_readl(DMA_CSR(chan));
- return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
-}
-
-static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
- int num_slots)
-{
- /* num_slots will only ever be 1, 2, 4, or 8 */
- return (desc->idx & (num_slots - 1)) ? 0 : 1;
-}
-
-/* to do: support large (i.e. > hw max) buffer sizes */
-static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
-{
- *slots_per_op = 1;
- return 1;
-}
-
-/* to do: support large (i.e. > hw max) buffer sizes */
-static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
-{
- *slots_per_op = 1;
- return 1;
-}
-
-static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
- int *slots_per_op)
-{
- static const char slot_count_table[] = {
- 1, 1, 1, 1, /* 01 - 04 */
- 2, 2, 2, 2, /* 05 - 08 */
- 4, 4, 4, 4, /* 09 - 12 */
- 4, 4, 4, 4, /* 13 - 16 */
- 8, 8, 8, 8, /* 17 - 20 */
- 8, 8, 8, 8, /* 21 - 24 */
- 8, 8, 8, 8, /* 25 - 28 */
- 8, 8, 8, 8, /* 29 - 32 */
- };
- *slots_per_op = slot_count_table[src_cnt - 1];
- return *slots_per_op;
-}
-
-static inline int
-iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
-{
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return iop_chan_memcpy_slot_count(0, slots_per_op);
- case AAU_ID:
- return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
- default:
- BUG();
- }
- return 0;
-}
-
-static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
- int *slots_per_op)
-{
- int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
-
- if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
- return slot_cnt;
-
- len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
- while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
- len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
- slot_cnt += *slots_per_op;
- }
-
- slot_cnt += *slots_per_op;
-
- return slot_cnt;
-}
-
-/* zero sum on iop3xx is limited to 1k at a time so it requires multiple
- * descriptors
- */
-static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
- int *slots_per_op)
-{
- int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
-
- if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
- return slot_cnt;
-
- len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
- while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
- len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
- slot_cnt += *slots_per_op;
- }
-
- slot_cnt += *slots_per_op;
-
- return slot_cnt;
-}
-
-static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return hw_desc.dma->byte_count;
- case AAU_ID:
- return hw_desc.aau->byte_count;
- default:
- BUG();
- }
- return 0;
-}
-
-/* translate the src_idx to a descriptor word index */
-static inline int __desc_idx(int src_idx)
-{
- static const int desc_idx_table[] = { 0, 0, 0, 0,
- 0, 1, 2, 3,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 14, 15, 16, 17,
- 18, 19, 20, 21,
- 23, 24, 25, 26,
- 27, 28, 29, 30,
- };
-
- return desc_idx_table[src_idx];
-}
-
-static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan,
- int src_idx)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return hw_desc.dma->src_addr;
- case AAU_ID:
- break;
- default:
- BUG();
- }
-
- if (src_idx < 4)
- return hw_desc.aau->src[src_idx];
- else
- return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
-}
-
-static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
- int src_idx, dma_addr_t addr)
-{
- if (src_idx < 4)
- hw_desc->src[src_idx] = addr;
- else
- hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
-}
-
-static inline void
-iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
-{
- struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
- union {
- u32 value;
- struct iop3xx_dma_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = 0;
- u_desc_ctrl.field.mem_to_mem_en = 1;
- u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- hw_desc->desc_ctrl = u_desc_ctrl.value;
- hw_desc->upper_pci_src_addr = 0;
- hw_desc->crc_addr = 0;
-}
-
-static inline void
-iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
-{
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
- union {
- u32 value;
- struct iop3xx_aau_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = 0;
- u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
- u_desc_ctrl.field.dest_write_en = 1;
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- hw_desc->desc_ctrl = u_desc_ctrl.value;
-}
-
-static inline u32
-iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
- unsigned long flags)
-{
- int i, shift;
- u32 edcr;
- union {
- u32 value;
- struct iop3xx_aau_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = 0;
- switch (src_cnt) {
- case 25 ... 32:
- u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
- edcr = 0;
- shift = 1;
- for (i = 24; i < src_cnt; i++) {
- edcr |= (1 << shift);
- shift += 3;
- }
- hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
- src_cnt = 24;
- /* fall through */
- case 17 ... 24:
- if (!u_desc_ctrl.field.blk_ctrl) {
- hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
- u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
- }
- edcr = 0;
- shift = 1;
- for (i = 16; i < src_cnt; i++) {
- edcr |= (1 << shift);
- shift += 3;
- }
- hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
- src_cnt = 16;
- /* fall through */
- case 9 ... 16:
- if (!u_desc_ctrl.field.blk_ctrl)
- u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
- edcr = 0;
- shift = 1;
- for (i = 8; i < src_cnt; i++) {
- edcr |= (1 << shift);
- shift += 3;
- }
- hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
- src_cnt = 8;
- /* fall through */
- case 2 ... 8:
- shift = 1;
- for (i = 0; i < src_cnt; i++) {
- u_desc_ctrl.value |= (1 << shift);
- shift += 3;
- }
-
- if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
- u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
- }
-
- u_desc_ctrl.field.dest_write_en = 1;
- u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- hw_desc->desc_ctrl = u_desc_ctrl.value;
-
- return u_desc_ctrl.value;
-}
-
-static inline void
-iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
-}
-
-/* return the number of operations */
-static inline int
-iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
- struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
- union {
- u32 value;
- struct iop3xx_aau_desc_ctrl field;
- } u_desc_ctrl;
- int i, j;
-
- hw_desc = desc->hw_desc;
-
- for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
- i += slots_per_op, j++) {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
- u_desc_ctrl.field.dest_write_en = 0;
- u_desc_ctrl.field.zero_result_en = 1;
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- iter->desc_ctrl = u_desc_ctrl.value;
-
- /* for the subsequent descriptors preserve the store queue
- * and chain them together
- */
- if (i) {
- prev_hw_desc =
- iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
- prev_hw_desc->next_desc =
- (u32) (desc->async_tx.phys + (i << 5));
- }
- }
-
- return j;
-}
-
-static inline void
-iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
- union {
- u32 value;
- struct iop3xx_aau_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = 0;
- switch (src_cnt) {
- case 25 ... 32:
- u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
- hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
- /* fall through */
- case 17 ... 24:
- if (!u_desc_ctrl.field.blk_ctrl) {
- hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
- u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
- }
- hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
- /* fall through */
- case 9 ... 16:
- if (!u_desc_ctrl.field.blk_ctrl)
- u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
- hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
- /* fall through */
- case 1 ... 8:
- if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
- u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
- }
-
- u_desc_ctrl.field.dest_write_en = 0;
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- hw_desc->desc_ctrl = u_desc_ctrl.value;
-}
-
-static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan,
- u32 byte_count)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- hw_desc.dma->byte_count = byte_count;
- break;
- case AAU_ID:
- hw_desc.aau->byte_count = byte_count;
- break;
- default:
- BUG();
- }
-}
-
-static inline void
-iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- iop_desc_init_memcpy(desc, 1);
- hw_desc.dma->byte_count = 0;
- hw_desc.dma->dest_addr = 0;
- hw_desc.dma->src_addr = 0;
- break;
- case AAU_ID:
- iop_desc_init_null_xor(desc, 2, 1);
- hw_desc.aau->byte_count = 0;
- hw_desc.aau->dest_addr = 0;
- hw_desc.aau->src[0] = 0;
- hw_desc.aau->src[1] = 0;
- break;
- default:
- BUG();
- }
-}
-
-static inline void
-iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
-{
- int slots_per_op = desc->slots_per_op;
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
- int i = 0;
-
- if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
- hw_desc->byte_count = len;
- } else {
- do {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
- len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
- i += slots_per_op;
- } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
-
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iter->byte_count = len;
- }
-}
-
-static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan,
- dma_addr_t addr)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- hw_desc.dma->dest_addr = addr;
- break;
- case AAU_ID:
- hw_desc.aau->dest_addr = addr;
- break;
- default:
- BUG();
- }
-}
-
-static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
- dma_addr_t addr)
-{
- struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
- hw_desc->src_addr = addr;
-}
-
-static inline void
-iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
- dma_addr_t addr)
-{
-
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
- int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
- int i;
-
- for (i = 0; (slot_cnt -= slots_per_op) >= 0;
- i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
- }
-}
-
-static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
- int src_idx, dma_addr_t addr)
-{
-
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
- int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
- int i;
-
- for (i = 0; (slot_cnt -= slots_per_op) >= 0;
- i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
- }
-}
-
-static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
- u32 next_desc_addr)
-{
- /* hw_desc->next_desc is the same location for all channels */
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- iop_paranoia(hw_desc.dma->next_desc);
- hw_desc.dma->next_desc = next_desc_addr;
-}
-
-static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
-{
- /* hw_desc->next_desc is the same location for all channels */
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
- return hw_desc.dma->next_desc;
-}
-
-static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
-{
- /* hw_desc->next_desc is the same location for all channels */
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
- hw_desc.dma->next_desc = 0;
-}
-
-static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
- u32 val)
-{
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
- hw_desc->src[0] = val;
-}
-
-static inline enum sum_check_flags
-iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
-{
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
- struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
-
- iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
- return desc_ctrl.zero_result_err << SUM_CHECK_P;
-}
-
-static inline void iop_chan_append(struct iop_adma_chan *chan)
-{
- u32 dma_chan_ctrl;
-
- dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
- dma_chan_ctrl |= 0x2;
- __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
-}
-
-static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
-{
- return __raw_readl(DMA_CSR(chan));
-}
-
-static inline void iop_chan_disable(struct iop_adma_chan *chan)
-{
- u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
- dma_chan_ctrl &= ~1;
- __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
-}
-
-static inline void iop_chan_enable(struct iop_adma_chan *chan)
-{
- u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
-
- dma_chan_ctrl |= 1;
- __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
-}
-
-static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
-{
- u32 status = __raw_readl(DMA_CSR(chan));
- status &= (1 << 9);
- __raw_writel(status, DMA_CSR(chan));
-}
-
-static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
-{
- u32 status = __raw_readl(DMA_CSR(chan));
- status &= (1 << 8);
- __raw_writel(status, DMA_CSR(chan));
-}
-
-static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
-{
- u32 status = __raw_readl(DMA_CSR(chan));
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
- break;
- case AAU_ID:
- status &= (1 << 5);
- break;
- default:
- BUG();
- }
-
- __raw_writel(status, DMA_CSR(chan));
-}
-
-static inline int
-iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
-{
- return 0;
-}
-
-static inline int
-iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
-{
- return 0;
-}
-
-static inline int
-iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
-{
- return 0;
-}
-
-static inline int
-iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
-{
- return test_bit(5, &status);
-}
-
-static inline int
-iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
-{
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return test_bit(2, &status);
- default:
- return 0;
- }
-}
-
-static inline int
-iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
-{
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return test_bit(3, &status);
- default:
- return 0;
- }
-}
-
-static inline int
-iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
-{
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return test_bit(1, &status);
- default:
- return 0;
- }
-}
-#endif /* _ADMA_H */
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
deleted file mode 100644
index 3cb6f22f510b..000000000000
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ /dev/null
@@ -1,311 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/include/asm/hardware/iop3xx.h
- *
- * Intel IOP32X and IOP33X register definitions
- *
- * Author: Rory Bolt <rorybolt@pacbell.net>
- * Copyright (C) 2002 Rory Bolt
- * Copyright (C) 2004 Intel Corp.
- */
-
-#ifndef __IOP3XX_H
-#define __IOP3XX_H
-
-/*
- * IOP3XX GPIO handling
- */
-#define IOP3XX_GPIO_LINE(x) (x)
-
-#ifndef __ASSEMBLY__
-extern int init_atu;
-extern int iop3xx_get_init_atu(void);
-#endif
-
-
-/*
- * IOP3XX processor registers
- */
-#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
-#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
-#define IOP3XX_PERIPHERAL_SIZE 0x00002000
-#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
- IOP3XX_PERIPHERAL_SIZE - 1)
-#define IOP3XX_PERIPHERAL_UPPER_VA (IOP3XX_PERIPHERAL_VIRT_BASE +\
- IOP3XX_PERIPHERAL_SIZE - 1)
-#define IOP3XX_PMMR_PHYS_TO_VIRT(addr) (u32) ((u32) (addr) -\
- (IOP3XX_PERIPHERAL_PHYS_BASE\
- - IOP3XX_PERIPHERAL_VIRT_BASE))
-#define IOP3XX_REG_ADDR(reg) (IOP3XX_PERIPHERAL_VIRT_BASE + (reg))
-
-/* Address Translation Unit */
-#define IOP3XX_ATUVID (volatile u16 *)IOP3XX_REG_ADDR(0x0100)
-#define IOP3XX_ATUDID (volatile u16 *)IOP3XX_REG_ADDR(0x0102)
-#define IOP3XX_ATUCMD (volatile u16 *)IOP3XX_REG_ADDR(0x0104)
-#define IOP3XX_ATUSR (volatile u16 *)IOP3XX_REG_ADDR(0x0106)
-#define IOP3XX_ATURID (volatile u8 *)IOP3XX_REG_ADDR(0x0108)
-#define IOP3XX_ATUCCR (volatile u32 *)IOP3XX_REG_ADDR(0x0109)
-#define IOP3XX_ATUCLSR (volatile u8 *)IOP3XX_REG_ADDR(0x010c)
-#define IOP3XX_ATULT (volatile u8 *)IOP3XX_REG_ADDR(0x010d)
-#define IOP3XX_ATUHTR (volatile u8 *)IOP3XX_REG_ADDR(0x010e)
-#define IOP3XX_ATUBIST (volatile u8 *)IOP3XX_REG_ADDR(0x010f)
-#define IOP3XX_IABAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0110)
-#define IOP3XX_IAUBAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0114)
-#define IOP3XX_IABAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0118)
-#define IOP3XX_IAUBAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x011c)
-#define IOP3XX_IABAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0120)
-#define IOP3XX_IAUBAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0124)
-#define IOP3XX_ASVIR (volatile u16 *)IOP3XX_REG_ADDR(0x012c)
-#define IOP3XX_ASIR (volatile u16 *)IOP3XX_REG_ADDR(0x012e)
-#define IOP3XX_ERBAR (volatile u32 *)IOP3XX_REG_ADDR(0x0130)
-#define IOP3XX_ATUILR (volatile u8 *)IOP3XX_REG_ADDR(0x013c)
-#define IOP3XX_ATUIPR (volatile u8 *)IOP3XX_REG_ADDR(0x013d)
-#define IOP3XX_ATUMGNT (volatile u8 *)IOP3XX_REG_ADDR(0x013e)
-#define IOP3XX_ATUMLAT (volatile u8 *)IOP3XX_REG_ADDR(0x013f)
-#define IOP3XX_IALR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0140)
-#define IOP3XX_IATVR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0144)
-#define IOP3XX_ERLR (volatile u32 *)IOP3XX_REG_ADDR(0x0148)
-#define IOP3XX_ERTVR (volatile u32 *)IOP3XX_REG_ADDR(0x014c)
-#define IOP3XX_IALR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0150)
-#define IOP3XX_IALR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0154)
-#define IOP3XX_IATVR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0158)
-#define IOP3XX_OIOWTVR (volatile u32 *)IOP3XX_REG_ADDR(0x015c)
-#define IOP3XX_OMWTVR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0160)
-#define IOP3XX_OUMWTVR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0164)
-#define IOP3XX_OMWTVR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0168)
-#define IOP3XX_OUMWTVR1 (volatile u32 *)IOP3XX_REG_ADDR(0x016c)
-#define IOP3XX_OUDWTVR (volatile u32 *)IOP3XX_REG_ADDR(0x0178)
-#define IOP3XX_ATUCR (volatile u32 *)IOP3XX_REG_ADDR(0x0180)
-#define IOP3XX_PCSR (volatile u32 *)IOP3XX_REG_ADDR(0x0184)
-#define IOP3XX_ATUISR (volatile u32 *)IOP3XX_REG_ADDR(0x0188)
-#define IOP3XX_ATUIMR (volatile u32 *)IOP3XX_REG_ADDR(0x018c)
-#define IOP3XX_IABAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0190)
-#define IOP3XX_IAUBAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0194)
-#define IOP3XX_IALR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0198)
-#define IOP3XX_IATVR3 (volatile u32 *)IOP3XX_REG_ADDR(0x019c)
-#define IOP3XX_OCCAR (volatile u32 *)IOP3XX_REG_ADDR(0x01a4)
-#define IOP3XX_OCCDR (volatile u32 *)IOP3XX_REG_ADDR(0x01ac)
-#define IOP3XX_PDSCR (volatile u32 *)IOP3XX_REG_ADDR(0x01bc)
-#define IOP3XX_PMCAPID (volatile u8 *)IOP3XX_REG_ADDR(0x01c0)
-#define IOP3XX_PMNEXT (volatile u8 *)IOP3XX_REG_ADDR(0x01c1)
-#define IOP3XX_APMCR (volatile u16 *)IOP3XX_REG_ADDR(0x01c2)
-#define IOP3XX_APMCSR (volatile u16 *)IOP3XX_REG_ADDR(0x01c4)
-#define IOP3XX_PCIXCAPID (volatile u8 *)IOP3XX_REG_ADDR(0x01e0)
-#define IOP3XX_PCIXNEXT (volatile u8 *)IOP3XX_REG_ADDR(0x01e1)
-#define IOP3XX_PCIXCMD (volatile u16 *)IOP3XX_REG_ADDR(0x01e2)
-#define IOP3XX_PCIXSR (volatile u32 *)IOP3XX_REG_ADDR(0x01e4)
-#define IOP3XX_PCIIRSR (volatile u32 *)IOP3XX_REG_ADDR(0x01ec)
-#define IOP3XX_PCSR_OUT_Q_BUSY (1 << 15)
-#define IOP3XX_PCSR_IN_Q_BUSY (1 << 14)
-#define IOP3XX_ATUCR_OUT_EN (1 << 1)
-
-#define IOP3XX_INIT_ATU_DEFAULT 0
-#define IOP3XX_INIT_ATU_DISABLE -1
-#define IOP3XX_INIT_ATU_ENABLE 1
-
-/* Messaging Unit */
-#define IOP3XX_IMR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0310)
-#define IOP3XX_IMR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0314)
-#define IOP3XX_OMR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0318)
-#define IOP3XX_OMR1 (volatile u32 *)IOP3XX_REG_ADDR(0x031c)
-#define IOP3XX_IDR (volatile u32 *)IOP3XX_REG_ADDR(0x0320)
-#define IOP3XX_IISR (volatile u32 *)IOP3XX_REG_ADDR(0x0324)
-#define IOP3XX_IIMR (volatile u32 *)IOP3XX_REG_ADDR(0x0328)
-#define IOP3XX_ODR (volatile u32 *)IOP3XX_REG_ADDR(0x032c)
-#define IOP3XX_OISR (volatile u32 *)IOP3XX_REG_ADDR(0x0330)
-#define IOP3XX_OIMR (volatile u32 *)IOP3XX_REG_ADDR(0x0334)
-#define IOP3XX_MUCR (volatile u32 *)IOP3XX_REG_ADDR(0x0350)
-#define IOP3XX_QBAR (volatile u32 *)IOP3XX_REG_ADDR(0x0354)
-#define IOP3XX_IFHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0360)
-#define IOP3XX_IFTPR (volatile u32 *)IOP3XX_REG_ADDR(0x0364)
-#define IOP3XX_IPHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0368)
-#define IOP3XX_IPTPR (volatile u32 *)IOP3XX_REG_ADDR(0x036c)
-#define IOP3XX_OFHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0370)
-#define IOP3XX_OFTPR (volatile u32 *)IOP3XX_REG_ADDR(0x0374)
-#define IOP3XX_OPHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0378)
-#define IOP3XX_OPTPR (volatile u32 *)IOP3XX_REG_ADDR(0x037c)
-#define IOP3XX_IAR (volatile u32 *)IOP3XX_REG_ADDR(0x0380)
-
-/* DMA Controller */
-#define IOP3XX_DMA_PHYS_BASE(chan) (IOP3XX_PERIPHERAL_PHYS_BASE + \
- (0x400 + (chan << 6)))
-#define IOP3XX_DMA_UPPER_PA(chan) (IOP3XX_DMA_PHYS_BASE(chan) + 0x27)
-
-/* Peripheral bus interface */
-#define IOP3XX_PBCR (volatile u32 *)IOP3XX_REG_ADDR(0x0680)
-#define IOP3XX_PBISR (volatile u32 *)IOP3XX_REG_ADDR(0x0684)
-#define IOP3XX_PBBAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0688)
-#define IOP3XX_PBLR0 (volatile u32 *)IOP3XX_REG_ADDR(0x068c)
-#define IOP3XX_PBBAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0690)
-#define IOP3XX_PBLR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0694)
-#define IOP3XX_PBBAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0698)
-#define IOP3XX_PBLR2 (volatile u32 *)IOP3XX_REG_ADDR(0x069c)
-#define IOP3XX_PBBAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x06a0)
-#define IOP3XX_PBLR3 (volatile u32 *)IOP3XX_REG_ADDR(0x06a4)
-#define IOP3XX_PBBAR4 (volatile u32 *)IOP3XX_REG_ADDR(0x06a8)
-#define IOP3XX_PBLR4 (volatile u32 *)IOP3XX_REG_ADDR(0x06ac)
-#define IOP3XX_PBBAR5 (volatile u32 *)IOP3XX_REG_ADDR(0x06b0)
-#define IOP3XX_PBLR5 (volatile u32 *)IOP3XX_REG_ADDR(0x06b4)
-#define IOP3XX_PMBR0 (volatile u32 *)IOP3XX_REG_ADDR(0x06c0)
-#define IOP3XX_PMBR1 (volatile u32 *)IOP3XX_REG_ADDR(0x06e0)
-#define IOP3XX_PMBR2 (volatile u32 *)IOP3XX_REG_ADDR(0x06e4)
-
-/* Peripheral performance monitoring unit */
-#define IOP3XX_GTMR (volatile u32 *)IOP3XX_REG_ADDR(0x0700)
-#define IOP3XX_ESR (volatile u32 *)IOP3XX_REG_ADDR(0x0704)
-#define IOP3XX_EMISR (volatile u32 *)IOP3XX_REG_ADDR(0x0708)
-#define IOP3XX_GTSR (volatile u32 *)IOP3XX_REG_ADDR(0x0710)
-/* PERCR0 DOESN'T EXIST - index from 1! */
-#define IOP3XX_PERCR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0710)
-
-/* Timers */
-#define IOP3XX_TU_TMR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0000)
-#define IOP3XX_TU_TMR1 (volatile u32 *)IOP3XX_TIMER_REG(0x0004)
-#define IOP3XX_TU_TCR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0008)
-#define IOP3XX_TU_TCR1 (volatile u32 *)IOP3XX_TIMER_REG(0x000c)
-#define IOP3XX_TU_TRR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0010)
-#define IOP3XX_TU_TRR1 (volatile u32 *)IOP3XX_TIMER_REG(0x0014)
-#define IOP3XX_TU_TISR (volatile u32 *)IOP3XX_TIMER_REG(0x0018)
-#define IOP3XX_TU_WDTCR (volatile u32 *)IOP3XX_TIMER_REG(0x001c)
-#define IOP_TMR_EN 0x02
-#define IOP_TMR_RELOAD 0x04
-#define IOP_TMR_PRIVILEGED 0x08
-#define IOP_TMR_RATIO_1_1 0x00
-
-/* Watchdog timer definitions */
-#define IOP_WDTCR_EN_ARM 0x1e1e1e1e
-#define IOP_WDTCR_EN 0xe1e1e1e1
-/* iop3xx does not support stopping the watchdog, so we just re-arm */
-#define IOP_WDTCR_DIS_ARM (IOP_WDTCR_EN_ARM)
-#define IOP_WDTCR_DIS (IOP_WDTCR_EN)
-
-/* Application accelerator unit */
-#define IOP3XX_AAU_PHYS_BASE (IOP3XX_PERIPHERAL_PHYS_BASE + 0x800)
-#define IOP3XX_AAU_UPPER_PA (IOP3XX_AAU_PHYS_BASE + 0xa7)
-
-/* I2C bus interface unit */
-#define IOP3XX_ICR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1680)
-#define IOP3XX_ISR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1684)
-#define IOP3XX_ISAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1688)
-#define IOP3XX_IDBR0 (volatile u32 *)IOP3XX_REG_ADDR(0x168c)
-#define IOP3XX_IBMR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1694)
-#define IOP3XX_ICR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16a0)
-#define IOP3XX_ISR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16a4)
-#define IOP3XX_ISAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16a8)
-#define IOP3XX_IDBR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16ac)
-#define IOP3XX_IBMR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16b4)
-
-
-/*
- * IOP3XX I/O and Mem space regions for PCI autoconfiguration
- */
-#define IOP3XX_PCI_LOWER_MEM_PA 0x80000000
-#define IOP3XX_PCI_MEM_WINDOW_SIZE 0x08000000
-
-#define IOP3XX_PCI_LOWER_IO_PA 0x90000000
-#define IOP3XX_PCI_LOWER_IO_BA 0x00000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <linux/reboot.h>
-
-void iop3xx_map_io(void);
-void iop_init_cp6_handler(void);
-void iop_init_time(unsigned long tickrate);
-void iop3xx_restart(enum reboot_mode, const char *);
-
-static inline u32 read_tmr0(void)
-{
- u32 val;
- asm volatile("mrc p6, 0, %0, c0, c1, 0" : "=r" (val));
- return val;
-}
-
-static inline void write_tmr0(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c0, c1, 0" : : "r" (val));
-}
-
-static inline void write_tmr1(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c1, c1, 0" : : "r" (val));
-}
-
-static inline u32 read_tcr0(void)
-{
- u32 val;
- asm volatile("mrc p6, 0, %0, c2, c1, 0" : "=r" (val));
- return val;
-}
-
-static inline void write_tcr0(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c2, c1, 0" : : "r" (val));
-}
-
-static inline u32 read_tcr1(void)
-{
- u32 val;
- asm volatile("mrc p6, 0, %0, c3, c1, 0" : "=r" (val));
- return val;
-}
-
-static inline void write_tcr1(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c3, c1, 0" : : "r" (val));
-}
-
-static inline void write_trr0(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val));
-}
-
-static inline void write_trr1(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c5, c1, 0" : : "r" (val));
-}
-
-static inline void write_tisr(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c6, c1, 0" : : "r" (val));
-}
-
-static inline u32 read_wdtcr(void)
-{
- u32 val;
- asm volatile("mrc p6, 0, %0, c7, c1, 0":"=r" (val));
- return val;
-}
-static inline void write_wdtcr(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c7, c1, 0"::"r" (val));
-}
-
-extern unsigned long get_iop_tick_rate(void);
-
-/* only iop13xx has these registers, we define these to present a
- * common register interface for the iop_wdt driver.
- */
-#define IOP_RCSR_WDT (0)
-static inline u32 read_rcsr(void)
-{
- return 0;
-}
-static inline void write_wdtsr(u32 val)
-{
- do { } while (0);
-}
-
-extern struct platform_device iop3xx_dma_0_channel;
-extern struct platform_device iop3xx_dma_1_channel;
-extern struct platform_device iop3xx_aau_channel;
-extern struct platform_device iop3xx_i2c0_device;
-extern struct platform_device iop3xx_i2c1_device;
-extern struct gpiod_lookup_table iop3xx_i2c0_gpio_lookup;
-extern struct gpiod_lookup_table iop3xx_i2c1_gpio_lookup;
-
-#endif
-
-
-#endif
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h
deleted file mode 100644
index bcedbab90ac0..000000000000
--- a/arch/arm/include/asm/hardware/iop_adma.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright © 2006, Intel Corporation.
- */
-#ifndef IOP_ADMA_H
-#define IOP_ADMA_H
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-#include <linux/interrupt.h>
-
-#define IOP_ADMA_SLOT_SIZE 32
-#define IOP_ADMA_THRESHOLD 4
-#ifdef DEBUG
-#define IOP_PARANOIA 1
-#else
-#define IOP_PARANOIA 0
-#endif
-#define iop_paranoia(x) BUG_ON(IOP_PARANOIA && (x))
-
-/**
- * struct iop_adma_device - internal representation of an ADMA device
- * @pdev: Platform device
- * @id: HW ADMA Device selector
- * @dma_desc_pool: base of DMA descriptor region (DMA address)
- * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
- * @common: embedded struct dma_device
- */
-struct iop_adma_device {
- struct platform_device *pdev;
- int id;
- dma_addr_t dma_desc_pool;
- void *dma_desc_pool_virt;
- struct dma_device common;
-};
-
-/**
- * struct iop_adma_chan - internal representation of an ADMA device
- * @pending: allows batching of hardware operations
- * @lock: serializes enqueue/dequeue operations to the slot pool
- * @mmr_base: memory mapped register base
- * @chain: device chain view of the descriptors
- * @device: parent device
- * @common: common dmaengine channel object members
- * @last_used: place holder for allocation to continue from where it left off
- * @all_slots: complete domain of slots usable by the channel
- * @slots_allocated: records the actual size of the descriptor slot pool
- * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs
- */
-struct iop_adma_chan {
- int pending;
- spinlock_t lock; /* protects the descriptor slot pool */
- void __iomem *mmr_base;
- struct list_head chain;
- struct iop_adma_device *device;
- struct dma_chan common;
- struct iop_adma_desc_slot *last_used;
- struct list_head all_slots;
- int slots_allocated;
- struct tasklet_struct irq_tasklet;
-};
-
-/**
- * struct iop_adma_desc_slot - IOP-ADMA software descriptor
- * @slot_node: node on the iop_adma_chan.all_slots list
- * @chain_node: node on the op_adma_chan.chain list
- * @hw_desc: virtual address of the hardware descriptor chain
- * @phys: hardware address of the hardware descriptor chain
- * @group_head: first operation in a transaction
- * @slot_cnt: total slots used in an transaction (group of operations)
- * @slots_per_op: number of slots per operation
- * @idx: pool index
- * @tx_list: list of descriptors that are associated with one operation
- * @async_tx: support for the async_tx api
- * @group_list: list of slots that make up a multi-descriptor transaction
- * for example transfer lengths larger than the supported hw max
- * @xor_check_result: result of zero sum
- * @crc32_result: result crc calculation
- */
-struct iop_adma_desc_slot {
- struct list_head slot_node;
- struct list_head chain_node;
- void *hw_desc;
- struct iop_adma_desc_slot *group_head;
- u16 slot_cnt;
- u16 slots_per_op;
- u16 idx;
- struct list_head tx_list;
- struct dma_async_tx_descriptor async_tx;
- union {
- u32 *xor_check_result;
- u32 *crc32_result;
- u32 *pq_check_result;
- };
-};
-
-struct iop_adma_platform_data {
- int hw_id;
- dma_cap_mask_t cap_mask;
- size_t pool_size;
-};
-
-#define to_iop_sw_desc(addr_hw_desc) \
- container_of(addr_hw_desc, struct iop_adma_desc_slot, hw_desc)
-#define iop_hw_desc_slot_idx(hw_desc, idx) \
- ( (void *) (((unsigned long) hw_desc) + ((idx) << 5)) )
-#endif
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index ac54c06764e6..62358d3ca0a8 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -53,6 +53,9 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5
#define ARM_DEBUG_ARCH_V8 6
+#define ARM_DEBUG_ARCH_V8_1 7
+#define ARM_DEBUG_ARCH_V8_2 8
+#define ARM_DEBUG_ARCH_V8_4 9
/* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 7a0596fcb2e7..ab2b654084fa 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -356,7 +356,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
*
* Function Memory type Cacheability Cache hint
* ioremap() Device n/a n/a
- * ioremap_nocache() Device n/a n/a
* ioremap_cache() Normal Writeback Read allocate
* ioremap_wc() Normal Non-cacheable n/a
* ioremap_wt() Normal Non-cacheable n/a
@@ -368,13 +367,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
* - unaligned accesses are "unpredictable"
* - writes may be delayed before they hit the endpoint device
*
- * ioremap_nocache() is the same as ioremap() as there are too many device
- * drivers using this for device registers, and documentation which tells
- * people to use it for such for this to be any different. This is not a
- * safe fallback for memory-like mappings, or memory regions where the
- * compiler may generate unaligned accesses - eg, via inlining its own
- * memcpy.
- *
* All normal memory mappings have the following properties:
* - reads can be repeated with no side effects
* - repeated reads return the last value written
@@ -392,7 +384,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
*/
void __iomem *ioremap(resource_size_t res_cookie, size_t size);
#define ioremap ioremap
-#define ioremap_nocache ioremap
/*
* Do not use ioremap_cache for mapping memory. Use memremap instead.
@@ -400,12 +391,6 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
#define ioremap_cache ioremap_cache
-/*
- * Do not use ioremap_cached in new code. Provided for the benefit of
- * the pxa2xx-flash MTD driver only.
- */
-void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size);
-
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
#define ioremap_wc ioremap_wc
#define ioremap_wt ioremap_wc
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 0125aa059d5b..9c04bd810d07 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -162,6 +162,7 @@
#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
#define HSR_SRT_SHIFT (16)
#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
+#define HSR_CM (1 << 8)
#define HSR_FSC (0x3f)
#define HSR_FSC_TYPE (0x3c)
#define HSR_SSE (1 << 21)
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 40002416efec..3944305e81df 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -9,18 +9,29 @@
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
-#include <asm/kvm_mmio.h>
#include <asm/kvm_arm.h>
#include <asm/cputype.h>
/* arm64 compatibility macros */
+#define PSR_AA32_MODE_FIQ FIQ_MODE
+#define PSR_AA32_MODE_SVC SVC_MODE
#define PSR_AA32_MODE_ABT ABT_MODE
#define PSR_AA32_MODE_UND UND_MODE
#define PSR_AA32_T_BIT PSR_T_BIT
+#define PSR_AA32_F_BIT PSR_F_BIT
#define PSR_AA32_I_BIT PSR_I_BIT
#define PSR_AA32_A_BIT PSR_A_BIT
#define PSR_AA32_E_BIT PSR_E_BIT
#define PSR_AA32_IT_MASK PSR_IT_MASK
+#define PSR_AA32_GE_MASK 0x000f0000
+#define PSR_AA32_DIT_BIT 0x00200000
+#define PSR_AA32_PAN_BIT 0x00400000
+#define PSR_AA32_SSBS_BIT 0x00800000
+#define PSR_AA32_Q_BIT PSR_Q_BIT
+#define PSR_AA32_V_BIT PSR_V_BIT
+#define PSR_AA32_C_BIT PSR_C_BIT
+#define PSR_AA32_Z_BIT PSR_Z_BIT
+#define PSR_AA32_N_BIT PSR_N_BIT
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
@@ -41,6 +52,11 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
*__vcpu_spsr(vcpu) = v;
}
+static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
+{
+ return spsr;
+}
+
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
u8 reg_num)
{
@@ -95,12 +111,12 @@ static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
return (unsigned long *)&vcpu->arch.hcr;
}
-static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
+static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr &= ~HCR_TWE;
}
-static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
+static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr |= HCR_TWE;
}
@@ -167,6 +183,11 @@ static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
}
+static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & (HSR_CM | HSR_WNR | HSR_FSC);
+}
+
static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
@@ -177,6 +198,11 @@ static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
}
+static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
{
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
@@ -193,7 +219,7 @@ static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
}
/* Get Access Size from a data abort */
-static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
+static inline unsigned int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
{
switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
case 0:
@@ -204,7 +230,7 @@ static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
return 4;
default:
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
- return -EFAULT;
+ return 4;
}
}
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 8a37c8e89777..c3314b286a61 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -7,13 +7,13 @@
#ifndef __ARM_KVM_HOST_H__
#define __ARM_KVM_HOST_H__
+#include <linux/arm-smccc.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/cputype.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
-#include <asm/kvm_mmio.h>
#include <asm/fpstate.h>
#include <kvm/arm_arch_timer.h>
@@ -38,6 +38,7 @@
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
+#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
@@ -76,6 +77,14 @@ struct kvm_arch {
/* Mandated version of PSCI */
u32 psci_version;
+
+ /*
+ * If we encounter a data abort without valid instruction syndrome
+ * information, report this to user space. User space can (and
+ * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
+ * supported.
+ */
+ bool return_nisv_io_abort_to_user;
};
#define KVM_NR_MEM_OBJS 40
@@ -192,9 +201,6 @@ struct kvm_vcpu_arch {
/* Don't run the guest (internal implementation need) */
bool pause;
- /* IO related fields */
- struct kvm_decode mmio_decode;
-
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
@@ -274,8 +280,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
-struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
@@ -290,6 +294,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index) {}
+/* MMIO helpers */
+void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
+unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
+
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ phys_addr_t fault_ipa);
+
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
@@ -323,6 +335,29 @@ static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int kvm_perf_init(void);
int kvm_perf_teardown(void);
+static inline long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
+{
+ return SMCCC_RET_NOT_SUPPORTED;
+}
+
+static inline gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
+{
+ return GPA_INVALID;
+}
+
+static inline void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
+{
+}
+
+static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
+{
+ return false;
+}
+
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
@@ -330,9 +365,9 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
static inline bool kvm_arch_requires_vhe(void) { return false; }
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_init_debug(void) {}
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 40e9034db601..3c1b55ecc578 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/cp15.h>
+#include <asm/kvm_arm.h>
#include <asm/vfp.h>
#define __hyp_text __section(.hyp.text) notrace
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
deleted file mode 100644
index 7c0eddb0adb2..000000000000
--- a/arch/arm/include/asm/kvm_mmio.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_MMIO_H__
-#define __ARM_KVM_MMIO_H__
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-
-struct kvm_decode {
- unsigned long rt;
- bool sign_extend;
-};
-
-void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
-unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
-
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
- phys_addr_t fault_ipa);
-
-#endif /* __ARM_KVM_MMIO_H__ */
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 0abd389cf0ec..68e6f25784a4 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -27,5 +27,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return channel ? 15 : 14;
}
+extern void pcibios_report_status(unsigned int status_mask, int warn);
+
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index a2a68b751971..069da393110c 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -15,8 +15,6 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-#define check_pgt_cache() do { } while (0)
-
#ifdef CONFIG_MMU
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 51beec41d48c..0d3ea35c97fe 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -189,6 +189,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
}
#define pmd_large(pmd) (pmd_val(pmd) & 2)
+#define pmd_leaf(pmd) (pmd_val(pmd) & 2)
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define pmd_present(pmd) (pmd_val(pmd))
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 5b18295021a0..ad55ab068dbf 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -134,6 +134,7 @@
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
#define pmd_large(pmd) pmd_sect(pmd)
+#define pmd_leaf(pmd) pmd_sect(pmd)
#define pud_clear(pudp) \
do { \
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 0b1f6799a32e..30fb2330f57b 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -42,12 +42,6 @@
#define swapper_pg_dir ((pgd_t *) 0)
-#define __swp_type(x) (0)
-#define __swp_offset(x) (0)
-#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
typedef pte_t *pte_addr_t;
@@ -62,7 +56,6 @@ typedef pte_t *pte_addr_t;
*/
#define pgprot_noncached(prot) (prot)
#define pgprot_writecombine(prot) (prot)
-#define pgprot_dmacoherent(prot) (prot)
#define pgprot_device(prot) (prot)
@@ -72,11 +65,6 @@ typedef pte_t *pte_addr_t;
extern unsigned int kobjsize(const void *objp);
/*
- * No page table caches to initialise.
- */
-#define pgtable_cache_init() do { } while (0)
-
-/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
*/
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index f2e990dc27e7..eabcb48a7840 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -12,7 +12,7 @@
#ifndef CONFIG_MMU
-#include <asm-generic/4level-fixup.h>
+#include <asm-generic/pgtable-nopud.h>
#include <asm/pgtable-nommu.h>
#else
@@ -368,8 +368,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#define pgtable_cache_init() do { } while (0)
-
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 20c2f42454b8..614bf829e454 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -140,8 +140,6 @@ static inline void prefetchw(const void *ptr)
#endif
#endif
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
#endif
#endif /* __ASM_ARM_PROCESSOR_H */
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index d3e937dcee4d..007d8fea7157 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -10,7 +10,7 @@
* to ensure that the maintenance completes in case we migrate to another
* CPU.
*/
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
+#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
#define __complete_pending_tlbi() dsb(ish)
#else
#define __complete_pending_tlbi()
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index b75ea15b85c0..4d4e7b6aabff 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -37,14 +37,10 @@ static inline void __tlb_remove_table(void *_table)
#include <asm-generic/tlb.h>
-#ifndef CONFIG_HAVE_RCU_TABLE_FREE
-#define tlb_remove_table(tlb, entry) tlb_remove_page(tlb, entry)
-#endif
-
static inline void
__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
#ifndef CONFIG_ARM_LPAE
/*
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 2a786f54d8b8..8a0fae94d45e 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -5,26 +5,6 @@
#ifdef CONFIG_ARM_CPU_TOPOLOGY
#include <linux/cpumask.h>
-
-struct cputopo_arm {
- int thread_id;
- int core_id;
- int socket_id;
- cpumask_t thread_sibling;
- cpumask_t core_sibling;
-};
-
-extern struct cputopo_arm cpu_topology[NR_CPUS];
-
-#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
-#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
-#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
-
-void init_cpu_topology(void);
-void store_cpu_topology(unsigned int cpuid);
-const struct cpumask *cpu_coregroup_mask(int cpu);
-
#include <linux/arch_topology.h>
/* Replace task scheduler's default frequency-invariant accounting */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 303248e5b990..98c6b91be4a8 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -22,7 +22,7 @@
* perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation.
*/
-static inline unsigned int uaccess_save_and_enable(void)
+static __always_inline unsigned int uaccess_save_and_enable(void)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain();
@@ -37,7 +37,7 @@ static inline unsigned int uaccess_save_and_enable(void)
#endif
}
-static inline void uaccess_restore(unsigned int flags)
+static __always_inline void uaccess_restore(unsigned int flags)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */
diff --git a/arch/arm/include/asm/vdso/gettimeofday.h b/arch/arm/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..fe6e1f65932d
--- /dev/null
+++ b/arch/arm/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 ARM Limited
+ */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+#include <asm/cp15.h>
+#include <asm/unistd.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+extern struct vdso_data *__get_datapage(void);
+
+static __always_inline int gettimeofday_fallback(
+ struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct timezone *tz asm("r1") = _tz;
+ register struct __kernel_old_timeval *tv asm("r0") = _tv;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_gettimeofday;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (tv), "r" (tz), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline long clock_gettime_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_gettime64;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline long clock_gettime32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_gettime;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline int clock_getres_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_getres_time64;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline int clock_getres32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_getres;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline u64 __arch_get_hw_counter(int clock_mode)
+{
+#ifdef CONFIG_ARM_ARCH_TIMER
+ u64 cycle_now;
+
+ if (!clock_mode)
+ return -EINVAL;
+
+ isb();
+ cycle_now = read_sysreg(CNTVCT);
+
+ return cycle_now;
+#else
+ return -EINVAL; /* use fallback */
+#endif
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return __get_datapage();
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/arm/include/asm/vdso/vsyscall.h b/arch/arm/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..cff87d8d30da
--- /dev/null
+++ b/arch/arm/include/asm/vdso/vsyscall.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+#include <asm/cacheflush.h>
+
+extern struct vdso_data *vdso_data;
+extern bool cntvct_ok;
+
+static __always_inline
+bool tk_is_cntvct(const struct timekeeper *tk)
+{
+ if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
+ return false;
+
+ if (!tk->tkr_mono.clock->archdata.vdso_direct)
+ return false;
+
+ return true;
+}
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline
+struct vdso_data *__arm_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+#define __arch_get_k_vdso_data __arm_get_k_vdso_data
+
+static __always_inline
+bool __arm_update_vdso_data(void)
+{
+ return cntvct_ok;
+}
+#define __arch_update_vdso_data __arm_update_vdso_data
+
+static __always_inline
+int __arm_get_clock_mode(struct timekeeper *tk)
+{
+ u32 __tk_is_cntvct = tk_is_cntvct(tk);
+
+ return __tk_is_cntvct;
+}
+#define __arch_get_clock_mode __arm_get_clock_mode
+
+static __always_inline
+int __arm_use_vsyscall(struct vdso_data *vdata)
+{
+ return vdata[CS_HRES_COARSE].clock_mode;
+}
+#define __arch_use_vsyscall __arm_use_vsyscall
+
+static __always_inline
+void __arm_sync_vdso_data(struct vdso_data *vdata)
+{
+ flush_dcache_page(virt_to_page(vdata));
+}
+#define __arch_sync_vdso_data __arm_sync_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/arm/include/asm/vdso_datapage.h b/arch/arm/include/asm/vdso_datapage.h
index 7910abf89b1c..bef68f59928d 100644
--- a/arch/arm/include/asm/vdso_datapage.h
+++ b/arch/arm/include/asm/vdso_datapage.h
@@ -11,35 +11,12 @@
#ifndef __ASSEMBLY__
+#include <vdso/datapage.h>
#include <asm/page.h>
-/* Try to be cache-friendly on systems that don't implement the
- * generic timer: fit the unconditionally updated fields in the first
- * 32 bytes.
- */
-struct vdso_data {
- u32 seq_count; /* sequence count - odd during updates */
- u16 tk_is_cntvct; /* fall back to syscall if false */
- u16 cs_shift; /* clocksource shift */
- u32 xtime_coarse_sec; /* coarse time */
- u32 xtime_coarse_nsec;
-
- u32 wtm_clock_sec; /* wall to monotonic offset */
- u32 wtm_clock_nsec;
- u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */
- u32 cs_mult; /* clocksource multiplier */
-
- u64 cs_cycle_last; /* last cycle value */
- u64 cs_mask; /* clocksource mask */
-
- u64 xtime_clock_snsec; /* CLOCK_REALTIME sub-ns base */
- u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
- u32 tz_dsttime;
-};
-
union vdso_data_store {
- struct vdso_data data;
- u8 page[PAGE_SIZE];
+ struct vdso_data data[CS_BASES];
+ u8 page[PAGE_SIZE];
};
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/vmalloc.h b/arch/arm/include/asm/vmalloc.h
new file mode 100644
index 000000000000..a9b3718b8600
--- /dev/null
+++ b/arch/arm/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_ARM_VMALLOC_H
+#define _ASM_ARM_VMALLOC_H
+
+#endif /* _ASM_ARM_VMALLOC_H */
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 2c403e7c782d..27e984977402 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -1,95 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
-#define _ASM_ARM_XEN_PAGE_COHERENT_H
-
-#include <linux/dma-mapping.h>
-#include <asm/page.h>
#include <xen/arm/page-coherent.h>
-
-static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
-{
- if (dev && dev->archdata.dev_dma_ops)
- return dev->archdata.dev_dma_ops;
- return get_arch_dma_ops(NULL);
-}
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
-{
- return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
-{
- xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- unsigned long page_pfn = page_to_xen_pfn(page);
- unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
- unsigned long compound_pages =
- (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
- bool local = (page_pfn <= dev_pfn) &&
- (dev_pfn - page_pfn < compound_pages);
-
- /*
- * Dom0 is mapped 1:1, while the Linux page can span across
- * multiple Xen pages, it's not possible for it to contain a
- * mix of local and foreign Xen pages. So if the first xen_pfn
- * == mfn the page is local otherwise it's a foreign page
- * grant-mapped in dom0. If the page is local we can safely
- * call the native dma_ops function, otherwise we call the xen
- * specific function.
- */
- if (local)
- xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
- else
- __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
-}
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- unsigned long pfn = PFN_DOWN(handle);
- /*
- * Dom0 is mapped 1:1, while the Linux page can be spanned accross
- * multiple Xen page, it's not possible to have a mix of local and
- * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
- * foreign mfn will always return false. If the page is local we can
- * safely call the native dma_ops function, otherwise we call the xen
- * specific function.
- */
- if (pfn_valid(pfn)) {
- if (xen_get_dma_ops(hwdev)->unmap_page)
- xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
- } else
- __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned long pfn = PFN_DOWN(handle);
- if (pfn_valid(pfn)) {
- if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
- xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
- } else
- __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned long pfn = PFN_DOWN(handle);
- if (pfn_valid(pfn)) {
- if (xen_get_dma_ops(hwdev)->sync_single_for_device)
- xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
- } else
- __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
-}
-
-#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h
deleted file mode 100644
index ec154e719b11..000000000000
--- a/arch/arm/include/asm/xen/xen-ops.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_XEN_OPS_H
-#define _ASM_XEN_OPS_H
-
-void xen_efi_runtime_setup(void);
-
-#endif /* _ASM_XEN_OPS_H */
OpenPOWER on IntegriCloud