summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_pad.c1
-rw-r--r--drivers/acpi/irq.c26
-rw-r--r--drivers/acpi/pptt.c61
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/base/arch_topology.c6
-rw-r--r--drivers/base/cacheinfo.c5
-rw-r--r--drivers/base/topology.c22
-rw-r--r--drivers/char/agp/generic.c3
-rw-r--r--drivers/char/hw_random/iproc-rng200.c1
-rw-r--r--drivers/char/hw_random/meson-rng.c52
-rw-r--r--drivers/char/tpm/eventlog/efi.c59
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c47
-rw-r--r--drivers/char/tpm/tpm-chip.c6
-rw-r--r--drivers/char/tpm/tpm1-cmd.c7
-rw-r--r--drivers/char/tpm/tpm2-cmd.c7
-rw-r--r--drivers/clk/clk.c2
-rw-r--r--drivers/clk/meson/g12a.c4
-rw-r--r--drivers/clk/meson/g12a.h2
-rw-r--r--drivers/clk/meson/meson8b.c10
-rw-r--r--drivers/clk/socfpga/clk-s10.c4
-rw-r--r--drivers/clk/tegra/clk-tegra210.c2
-rw-r--r--drivers/clk/ti/clkctrl.c7
-rw-r--r--drivers/clocksource/Kconfig14
-rw-r--r--drivers/clocksource/Makefile5
-rw-r--r--drivers/clocksource/arc_timer.c3
-rw-r--r--drivers/clocksource/arm_arch_timer.c15
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/clocksource/hyperv_timer.c339
-rw-r--r--drivers/clocksource/timer-davinci.c369
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c145
-rw-r--r--drivers/clocksource/timer-ixp4xx.c16
-rw-r--r--drivers/clocksource/timer-meson6.c5
-rw-r--r--drivers/clocksource/timer-tegra.c416
-rw-r--r--drivers/clocksource/timer-tegra20.c379
-rw-r--r--drivers/crypto/Kconfig20
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c36
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c25
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h10
-rw-r--r--drivers/crypto/atmel-ecc.c403
-rw-r--r--drivers/crypto/atmel-ecc.h116
-rw-r--r--drivers/crypto/atmel-i2c.c364
-rw-r--r--drivers/crypto/atmel-i2c.h197
-rw-r--r--drivers/crypto/atmel-sha204a.c171
-rw-r--r--drivers/crypto/bcm/cipher.c8
-rw-r--r--drivers/crypto/bcm/spu2.c10
-rw-r--r--drivers/crypto/caam/Kconfig46
-rw-r--r--drivers/crypto/caam/Makefile18
-rw-r--r--drivers/crypto/caam/caamalg.c338
-rw-r--r--drivers/crypto/caam/caamalg_desc.c147
-rw-r--r--drivers/crypto/caam/caamalg_desc.h4
-rw-r--r--drivers/crypto/caam/caamalg_qi.c267
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c202
-rw-r--r--drivers/crypto/caam/caamhash.c329
-rw-r--r--drivers/crypto/caam/caampkc.c177
-rw-r--r--drivers/crypto/caam/caampkc.h9
-rw-r--r--drivers/crypto/caam/caamrng.c76
-rw-r--r--drivers/crypto/caam/ctrl.c56
-rw-r--r--drivers/crypto/caam/desc_constr.h11
-rw-r--r--drivers/crypto/caam/error.c8
-rw-r--r--drivers/crypto/caam/error.h2
-rw-r--r--drivers/crypto/caam/intern.h102
-rw-r--r--drivers/crypto/caam/jr.c43
-rw-r--r--drivers/crypto/caam/key_gen.c28
-rw-r--r--drivers/crypto/caam/qi.c52
-rw-r--r--drivers/crypto/caam/sg_sw_qm.h18
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h18
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h26
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.h2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.h2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c7
-rw-r--r--drivers/crypto/ccp/ccp-dev.c96
-rw-r--r--drivers/crypto/ccp/ccp-dev.h2
-rw-r--r--drivers/crypto/ccp/ccp-ops.c20
-rw-r--r--drivers/crypto/ccree/cc_driver.c70
-rw-r--r--drivers/crypto/ccree/cc_driver.h6
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h20
-rw-r--r--drivers/crypto/ccree/cc_pm.c11
-rw-r--r--drivers/crypto/ccree/cc_pm.h7
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c13
-rw-r--r--drivers/crypto/inside-secure/safexcel.h17
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c116
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c92
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c3
-rw-r--r--drivers/crypto/ixp4xx_crypto.c15
-rw-r--r--drivers/crypto/mxs-dcp.c5
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c8
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c6
-rw-r--r--drivers/crypto/nx/nx.c4
-rw-r--r--drivers/crypto/nx/nx.h12
-rw-r--r--drivers/crypto/nx/nx_debugfs.c71
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c294
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h2
-rw-r--r--drivers/crypto/sahara.c4
-rw-r--r--drivers/crypto/stm32/Makefile2
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c (renamed from drivers/crypto/stm32/stm32_crc32.c)0
-rw-r--r--drivers/crypto/stm32/stm32-hash.c6
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c47
-rw-r--r--drivers/crypto/talitos.c368
-rw-r--r--drivers/crypto/talitos.h73
-rw-r--r--drivers/crypto/vmx/aes_cbc.c183
-rw-r--r--drivers/crypto/vmx/aes_ctr.c165
-rw-r--r--drivers/crypto/vmx/aes_xts.c175
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.h2
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl22
-rw-r--r--drivers/crypto/vmx/vmx.c72
-rw-r--r--drivers/dma/dma-jz4780.c5
-rw-r--r--drivers/dma/imx-sdma.c52
-rw-r--r--drivers/dma/qcom/bam_dma.c3
-rw-r--r--drivers/firmware/efi/efi-bgrt.c5
-rw-r--r--drivers/firmware/efi/efi.c14
-rw-r--r--drivers/firmware/efi/efibc.c12
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c15
-rw-r--r--drivers/firmware/efi/libstub/efistub.h2
-rw-r--r--drivers/firmware/efi/libstub/fdt.c27
-rw-r--r--drivers/firmware/efi/libstub/tpm.c80
-rw-r--r--drivers/firmware/efi/tpm.c63
-rw-r--r--drivers/gpio/gpio-mb86s7x.c51
-rw-r--r--drivers/gpio/gpiolib-of.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-multitouch.c4
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-uclogic-core.c2
-rw-r--r--drivers/hid/hid-uclogic-params.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c15
-rw-r--r--drivers/hv/Kconfig4
-rw-r--r--drivers/hv/hv.c156
-rw-r--r--drivers/hv/hv_util.c1
-rw-r--r--drivers/hv/hyperv_vmbus.h3
-rw-r--r--drivers/hv/vmbus_drv.c42
-rw-r--r--drivers/hwmon/coretemp.c36
-rw-r--r--drivers/i2c/i2c-core-acpi.c6
-rw-r--r--drivers/iio/humidity/dht11.c8
-rw-r--r--drivers/iio/industrialio-core.c4
-rw-r--r--drivers/infiniband/core/device.c8
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c6
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c3
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c7
-rw-r--r--drivers/irqchip/Kconfig32
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-al-fic.c278
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c101
-rw-r--r--drivers/irqchip/irq-gic-v2m.c85
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c35
-rw-r--r--drivers/irqchip/irq-gic-v3.c10
-rw-r--r--drivers/irqchip/irq-mbigen.c3
-rw-r--r--drivers/irqchip/irq-meson-gpio.c1
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c3
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c91
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c283
-rw-r--r--drivers/irqchip/irq-sni-exiu.c142
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c4
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c5
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c2
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-init.c10
-rw-r--r--drivers/md/dm-log-writes.c23
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/mtd/nand/raw/ingenic/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/ingenic/Makefile4
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c9
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c (renamed from drivers/mtd/nand/raw/ingenic/ingenic_nand.c)0
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c40
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c2
-rw-r--r--drivers/mtd/nand/spi/macronix.c4
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c19
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c28
-rw-r--r--drivers/net/ethernet/sis/sis900.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ppp/Kconfig3
-rw-r--r--drivers/net/ppp/ppp_mppe.c98
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c2
-rw-r--r--drivers/net/wireless/virt_wifi.c2
-rw-r--r--drivers/nvdimm/security.c2
-rw-r--r--drivers/pci/pci-driver.c8
-rw-r--r--drivers/perf/Kconfig8
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm_pmu_acpi.c72
-rw-r--r--drivers/perf/arm_spe_pmu.c12
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c554
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c34
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c8
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c18
-rw-r--r--drivers/powercap/intel_rapl.c75
-rw-r--r--drivers/ras/cec.c132
-rw-r--r--drivers/s390/block/Kconfig2
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/char/Kconfig22
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/sclp_async.c189
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/airq.c37
-rw-r--r--drivers/s390/cio/ccwreq.c9
-rw-r--r--drivers/s390/cio/chsc.c30
-rw-r--r--drivers/s390/cio/cio.h3
-rw-r--r--drivers/s390/cio/css.c187
-rw-r--r--drivers/s390/cio/device.c68
-rw-r--r--drivers/s390/cio/device_fsm.c49
-rw-r--r--drivers/s390/cio/device_id.c20
-rw-r--r--drivers/s390/cio/device_ops.c21
-rw-r--r--drivers/s390/cio/device_pgid.c22
-rw-r--r--drivers/s390/cio/device_status.c24
-rw-r--r--drivers/s390/cio/io_sch.h20
-rw-r--r--drivers/s390/cio/qdio_main.c1
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c524
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h7
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c13
-rw-r--r--drivers/s390/crypto/pkey_api.c8
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c34
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c380
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h15
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c4
-rw-r--r--drivers/s390/net/Kconfig8
-rw-r--r--drivers/s390/virtio/virtio_ccw.c246
-rw-r--r--drivers/scsi/vmw_pvscsi.c6
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/ti/Kconfig4
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c16
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c142
-rw-r--r--drivers/tty/tty_ldisc.c8
259 files changed, 7979 insertions, 4625 deletions
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 6b3f1217a237..e7dc0133f817 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -64,6 +64,7 @@ static void power_saving_mwait_init(void)
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
+ case X86_VENDOR_ZHAOXIN:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 89690a471360..e209081d644b 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -292,3 +292,29 @@ void __init acpi_set_irq_model(enum acpi_irq_model_id model,
acpi_irq_model = model;
acpi_gsi_domain_id = fwnode;
}
+
+/**
+ * acpi_irq_create_hierarchy - Create a hierarchical IRQ domain with the default
+ * GSI domain as its parent.
+ * @flags: Irq domain flags associated with the domain
+ * @size: Size of the domain.
+ * @fwnode: Optional fwnode of the interrupt controller
+ * @ops: Pointer to the interrupt domain callbacks
+ * @host_data: Controller private data pointer
+ */
+struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
+ unsigned int size,
+ struct fwnode_handle *fwnode,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
+ DOMAIN_BUS_ANY);
+
+ if (!d)
+ return NULL;
+
+ return irq_domain_create_hierarchy(d, flags, size, fwnode, ops,
+ host_data);
+}
+EXPORT_SYMBOL_GPL(acpi_irq_create_hierarchy);
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index b72e6afaa8fb..1e7ac0bd0d3a 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -432,17 +432,40 @@ static void cache_setup_acpi_cpu(struct acpi_table_header *table,
}
}
+static bool flag_identical(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu)
+{
+ struct acpi_pptt_processor *next;
+
+ /* heterogeneous machines must use PPTT revision > 1 */
+ if (table_hdr->revision < 2)
+ return false;
+
+ /* Locate the last node in the tree with IDENTICAL set */
+ if (cpu->flags & ACPI_PPTT_ACPI_IDENTICAL) {
+ next = fetch_pptt_node(table_hdr, cpu->parent);
+ if (!(next && next->flags & ACPI_PPTT_ACPI_IDENTICAL))
+ return true;
+ }
+
+ return false;
+}
+
/* Passing level values greater than this will result in search termination */
#define PPTT_ABORT_PACKAGE 0xFF
-static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu,
- int level, int flag)
+static struct acpi_pptt_processor *acpi_find_processor_tag(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu,
+ int level, int flag)
{
struct acpi_pptt_processor *prev_node;
while (cpu && level) {
- if (cpu->flags & flag)
+ /* special case the identical flag to find last identical */
+ if (flag == ACPI_PPTT_ACPI_IDENTICAL) {
+ if (flag_identical(table_hdr, cpu))
+ break;
+ } else if (cpu->flags & flag)
break;
pr_debug("level %d\n", level);
prev_node = fetch_pptt_node(table_hdr, cpu->parent);
@@ -480,8 +503,8 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
if (cpu_node) {
- cpu_node = acpi_find_processor_package_id(table, cpu_node,
- level, flag);
+ cpu_node = acpi_find_processor_tag(table, cpu_node,
+ level, flag);
/*
* As per specification if the processor structure represents
* an actual processor, then ACPI processor ID must be valid.
@@ -660,3 +683,29 @@ int find_acpi_cpu_topology_package(unsigned int cpu)
return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
ACPI_PPTT_PHYSICAL_PACKAGE);
}
+
+/**
+ * find_acpi_cpu_topology_hetero_id() - Get a core architecture tag
+ * @cpu: Kernel logical CPU number
+ *
+ * Determine a unique heterogeneous tag for the given CPU. CPUs with the same
+ * implementation should have matching tags.
+ *
+ * The returned tag can be used to group peers with identical implementation.
+ *
+ * The search terminates when a level is found with the identical implementation
+ * flag set or we reach a root node.
+ *
+ * Due to limitations in the PPTT data structure, there may be rare situations
+ * where two cores in a heterogeneous machine may be identical, but won't have
+ * the same tag.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
+ * Otherwise returns a value which represents a group of identical cores
+ * similar to this CPU.
+ */
+int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
+{
+ return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
+ ACPI_PPTT_ACPI_IDENTICAL);
+}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e387a258d649..ed56c6d20b08 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -196,6 +196,7 @@ static void tsc_check_state(int state)
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
+ case X86_VENDOR_ZHAOXIN:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1739d7e1952a..9b09e31ae82f 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -43,7 +43,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
- return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
+ return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
}
static void update_topology_flags_workfn(struct work_struct *work);
@@ -116,7 +116,7 @@ void topology_normalize_cpu_scale(void)
/ capacity_scale;
topology_set_cpu_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
- cpu, topology_get_cpu_scale(NULL, cpu));
+ cpu, topology_get_cpu_scale(cpu));
}
}
@@ -185,7 +185,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
for_each_cpu(cpu, policy->related_cpus) {
- raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
+ raw_capacity[cpu] = topology_get_cpu_scale(cpu) *
policy->cpuinfo.max_freq / 1000UL;
capacity_scale = max(raw_capacity[cpu], capacity_scale);
}
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index a7359535caf5..8827c60f51e2 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -213,6 +213,8 @@ int __weak cache_setup_acpi(unsigned int cpu)
return -ENOTSUPP;
}
+unsigned int coherency_max_size;
+
static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -251,6 +253,9 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
}
}
+ /* record the maximum cache line size */
+ if (this_leaf->coherency_line_size > coherency_max_size)
+ coherency_max_size = this_leaf->coherency_line_size;
}
return 0;
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 5fd9f167ecc1..4e033d4cc0dc 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -43,6 +43,9 @@ static ssize_t name##_list_show(struct device *dev, \
define_id_show_func(physical_package_id);
static DEVICE_ATTR_RO(physical_package_id);
+define_id_show_func(die_id);
+static DEVICE_ATTR_RO(die_id);
+
define_id_show_func(core_id);
static DEVICE_ATTR_RO(core_id);
@@ -50,10 +53,22 @@ define_siblings_show_func(thread_siblings, sibling_cpumask);
static DEVICE_ATTR_RO(thread_siblings);
static DEVICE_ATTR_RO(thread_siblings_list);
+define_siblings_show_func(core_cpus, sibling_cpumask);
+static DEVICE_ATTR_RO(core_cpus);
+static DEVICE_ATTR_RO(core_cpus_list);
+
define_siblings_show_func(core_siblings, core_cpumask);
static DEVICE_ATTR_RO(core_siblings);
static DEVICE_ATTR_RO(core_siblings_list);
+define_siblings_show_func(die_cpus, die_cpumask);
+static DEVICE_ATTR_RO(die_cpus);
+static DEVICE_ATTR_RO(die_cpus_list);
+
+define_siblings_show_func(package_cpus, core_cpumask);
+static DEVICE_ATTR_RO(package_cpus);
+static DEVICE_ATTR_RO(package_cpus_list);
+
#ifdef CONFIG_SCHED_BOOK
define_id_show_func(book_id);
static DEVICE_ATTR_RO(book_id);
@@ -72,11 +87,18 @@ static DEVICE_ATTR_RO(drawer_siblings_list);
static struct attribute *default_attrs[] = {
&dev_attr_physical_package_id.attr,
+ &dev_attr_die_id.attr,
&dev_attr_core_id.attr,
&dev_attr_thread_siblings.attr,
&dev_attr_thread_siblings_list.attr,
+ &dev_attr_core_cpus.attr,
+ &dev_attr_core_cpus_list.attr,
&dev_attr_core_siblings.attr,
&dev_attr_core_siblings_list.attr,
+ &dev_attr_die_cpus.attr,
+ &dev_attr_die_cpus_list.attr,
+ &dev_attr_package_cpus.attr,
+ &dev_attr_package_cpus_list.attr,
#ifdef CONFIG_SCHED_BOOK
&dev_attr_book_id.attr,
&dev_attr_book_siblings.attr,
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 658664a5a5aa..df1edb5ec0ad 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -1311,8 +1311,7 @@ static void ipi_handler(void *null)
void global_cache_flush(void)
{
- if (on_each_cpu(ipi_handler, NULL, 1) != 0)
- panic(PFX "timed out waiting for the other CPUs!\n");
+ on_each_cpu(ipi_handler, NULL, 1);
}
EXPORT_SYMBOL(global_cache_flush);
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 8b5a20b35293..92be1c0ab99f 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -220,6 +220,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
}
static const struct of_device_id iproc_rng200_of_match[] = {
+ { .compatible = "brcm,bcm7211-rng200", },
{ .compatible = "brcm,bcm7278-rng200", },
{ .compatible = "brcm,iproc-rng200", },
{},
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 2e23be802a62..76e693da5dde 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -1,58 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
* Copyright (C) 2014 Amlogic, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * BSD LICENSE
- *
- * Copyright (c) 2016 BayLibre, SAS.
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- * Copyright (C) 2014 Amlogic, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/err.h>
#include <linux/module.h>
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
index 3e44362e469c..6bb023de17f1 100644
--- a/drivers/char/tpm/eventlog/efi.c
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -16,10 +16,13 @@
int tpm_read_log_efi(struct tpm_chip *chip)
{
+ struct efi_tcg2_final_events_table *final_tbl = NULL;
struct linux_efi_tpm_eventlog *log_tbl;
struct tpm_bios_log *log;
u32 log_size;
u8 tpm_log_version;
+ void *tmp;
+ int ret;
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
@@ -47,15 +50,57 @@ int tpm_read_log_efi(struct tpm_chip *chip)
/* malloc EventLog space */
log->bios_event_log = kmemdup(log_tbl->log, log_size, GFP_KERNEL);
- if (!log->bios_event_log)
- goto err_memunmap;
- log->bios_event_log_end = log->bios_event_log + log_size;
+ if (!log->bios_event_log) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ log->bios_event_log_end = log->bios_event_log + log_size;
tpm_log_version = log_tbl->version;
- memunmap(log_tbl);
- return tpm_log_version;
-err_memunmap:
+ ret = tpm_log_version;
+
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
+ efi_tpm_final_log_size == 0 ||
+ tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ goto out;
+
+ final_tbl = memremap(efi.tpm_final_log,
+ sizeof(*final_tbl) + efi_tpm_final_log_size,
+ MEMREMAP_WB);
+ if (!final_tbl) {
+ pr_err("Could not map UEFI TPM final log\n");
+ kfree(log->bios_event_log);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
+
+ tmp = krealloc(log->bios_event_log,
+ log_size + efi_tpm_final_log_size,
+ GFP_KERNEL);
+ if (!tmp) {
+ kfree(log->bios_event_log);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ log->bios_event_log = tmp;
+
+ /*
+ * Copy any of the final events log that didn't also end up in the
+ * main log. Events can be logged in both if events are generated
+ * between GetEventLog() and ExitBootServices().
+ */
+ memcpy((void *)log->bios_event_log + log_size,
+ final_tbl->events + log_tbl->final_events_preboot_size,
+ efi_tpm_final_log_size);
+ log->bios_event_log_end = log->bios_event_log +
+ log_size + efi_tpm_final_log_size;
+
+out:
+ memunmap(final_tbl);
memunmap(log_tbl);
- return -ENOMEM;
+ return ret;
}
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
index d506362e046f..b9aeda1cbcd7 100644
--- a/drivers/char/tpm/eventlog/tpm2.c
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -36,52 +36,7 @@
static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
struct tcg_pcr_event *event_header)
{
- struct tcg_efi_specid_event_head *efispecid;
- struct tcg_event_field *event_field;
- void *marker;
- void *marker_start;
- u32 halg_size;
- size_t size;
- u16 halg;
- int i;
- int j;
-
- marker = event;
- marker_start = marker;
- marker = marker + sizeof(event->pcr_idx) + sizeof(event->event_type)
- + sizeof(event->count);
-
- efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
-
- /* Check if event is malformed. */
- if (event->count > efispecid->num_algs)
- return 0;
-
- for (i = 0; i < event->count; i++) {
- halg_size = sizeof(event->digests[i].alg_id);
- memcpy(&halg, marker, halg_size);
- marker = marker + halg_size;
- for (j = 0; j < efispecid->num_algs; j++) {
- if (halg == efispecid->digest_sizes[j].alg_id) {
- marker +=
- efispecid->digest_sizes[j].digest_size;
- break;
- }
- }
- /* Algorithm without known length. Such event is unparseable. */
- if (j == efispecid->num_algs)
- return 0;
- }
-
- event_field = (struct tcg_event_field *)marker;
- marker = marker + sizeof(event_field->event_size)
- + event_field->event_size;
- size = marker - marker_start;
-
- if ((event->event_type == 0) && (event_field->event_size == 0))
- return 0;
-
- return size;
+ return __calc_tpm2_event_size(event, event_header, false);
}
static void *tpm2_bios_measurements_start(struct seq_file *m, loff_t *pos)
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 90325e1749fb..d47ad10a35fe 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -289,15 +289,15 @@ static int tpm_class_shutdown(struct device *dev)
{
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+ down_write(&chip->ops_sem);
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- down_write(&chip->ops_sem);
if (!tpm_chip_start(chip)) {
tpm2_shutdown(chip, TPM2_SU_CLEAR);
tpm_chip_stop(chip);
}
- chip->ops = NULL;
- up_write(&chip->ops_sem);
}
+ chip->ops = NULL;
+ up_write(&chip->ops_sem);
return 0;
}
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index 85dcf2654d11..faacbe1ffa1a 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -510,7 +510,7 @@ struct tpm1_get_random_out {
*
* Return:
* * number of bytes read
- * * -errno or a TPM return code otherwise
+ * * -errno (positive TPM return codes are masked to -EIO)
*/
int tpm1_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
{
@@ -531,8 +531,11 @@ int tpm1_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
rc = tpm_transmit_cmd(chip, &buf, sizeof(out->rng_data_len),
"attempting get random");
- if (rc)
+ if (rc) {
+ if (rc > 0)
+ rc = -EIO;
goto out;
+ }
out = (struct tpm1_get_random_out *)&buf.data[TPM_HEADER_SIZE];
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 4de49924cfc4..d103545e4055 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -297,7 +297,7 @@ struct tpm2_get_random_out {
*
* Return:
* size of the buffer on success,
- * -errno otherwise
+ * -errno otherwise (positive TPM return codes are masked to -EIO)
*/
int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
{
@@ -324,8 +324,11 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
offsetof(struct tpm2_get_random_out,
buffer),
"attempting get random");
- if (err)
+ if (err) {
+ if (err > 0)
+ err = -EIO;
goto out;
+ }
out = (struct tpm2_get_random_out *)
&buf.data[TPM_HEADER_SIZE];
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index aa51756fd4d6..87b410d6e51d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -368,7 +368,7 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
const char *dev_id = dev ? dev_name(dev) : NULL;
struct device_node *np = core->of_node;
- if (np && index >= 0)
+ if (np && (name || index >= 0))
hw = of_clk_get_hw(np, index, name);
/*
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 739f64fdf1e3..206fafd299ea 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -2734,8 +2734,8 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = {
[CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw,
[CLKID_MALI_1] = &g12a_mali_1.hw,
[CLKID_MALI] = &g12a_mali.hw,
- [CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw,
- [CLKID_MPLL_5OM] = &g12a_mpll_50m.hw,
+ [CLKID_MPLL_50M_DIV] = &g12a_mpll_50m_div.hw,
+ [CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
[CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
index 39c41af70804..bcc05cd9882f 100644
--- a/drivers/clk/meson/g12a.h
+++ b/drivers/clk/meson/g12a.h
@@ -166,7 +166,7 @@
#define CLKID_HDMI_DIV 167
#define CLKID_MALI_0_DIV 170
#define CLKID_MALI_1_DIV 173
-#define CLKID_MPLL_5OM_DIV 176
+#define CLKID_MPLL_50M_DIV 176
#define CLKID_SYS_PLL_DIV16_EN 178
#define CLKID_SYS_PLL_DIV16 179
#define CLKID_CPU_CLK_DYN0_SEL 180
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 37cf0f01bb5d..62cd3a7f1f65 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -1761,7 +1761,7 @@ static struct clk_regmap meson8m2_gp_pll = {
},
};
-static const char * const mmeson8b_vpu_0_1_parent_names[] = {
+static const char * const meson8b_vpu_0_1_parent_names[] = {
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
};
@@ -1778,8 +1778,8 @@ static struct clk_regmap meson8b_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = mmeson8b_vpu_0_1_parent_names,
- .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names),
+ .parent_names = meson8b_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1837,8 +1837,8 @@ static struct clk_regmap meson8b_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = mmeson8b_vpu_0_1_parent_names,
- .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names),
+ .parent_names = meson8b_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 8281dfbf38c2..5bed36e12951 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -103,9 +103,9 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
{ STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
0, 0, 0, 0x3C, 1},
{ STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
- 0, 0, 4, 0xB0, 0},
+ 0, 0, 2, 0xB0, 0},
{ STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
- 0, 0, 4, 0xB0, 1},
+ 0, 0, 2, 0xB0, 1},
{ STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2},
{ STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index e1ba62d2b1a0..ac1d27a8c650 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3366,6 +3366,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 },
+ { TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 },
/* This MUST be the last entry. */
{ TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 8e834317c97d..975995eea15c 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -229,6 +229,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
{
struct omap_clkctrl_provider *provider = data;
struct omap_clkctrl_clk *entry;
+ bool found = false;
if (clkspec->args_count != 2)
return ERR_PTR(-EINVAL);
@@ -238,11 +239,13 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
list_for_each_entry(entry, &provider->clocks, node) {
if (entry->reg_offset == clkspec->args[0] &&
- entry->bit_offset == clkspec->args[1])
+ entry->bit_offset == clkspec->args[1]) {
+ found = true;
break;
+ }
}
- if (!entry)
+ if (!found)
return ERR_PTR(-EINVAL);
return entry->clk;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 3300739edce4..5e9317dc3d39 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -43,6 +43,11 @@ config BCM_KONA_TIMER
help
Enables the support for the BCM Kona mobile timer driver.
+config DAVINCI_TIMER
+ bool "Texas Instruments DaVinci timer driver" if COMPILE_TEST
+ help
+ Enables the support for the TI DaVinci timer driver.
+
config DIGICOLOR_TIMER
bool "Digicolor timer driver" if COMPILE_TEST
select CLKSRC_MMIO
@@ -140,7 +145,7 @@ config TEGRA_TIMER
bool "Tegra timer driver" if COMPILE_TEST
select CLKSRC_MMIO
select TIMER_OF
- depends on ARM || ARM64
+ depends on ARCH_TEGRA || COMPILE_TEST
help
Enables support for the Tegra driver.
@@ -617,6 +622,13 @@ config CLKSRC_IMX_TPM
Enable this option to use IMX Timer/PWM Module (TPM) timer as
clocksource.
+config TIMER_IMX_SYS_CTR
+ bool "i.MX system counter timer" if COMPILE_TEST
+ select TIMER_OF
+ help
+ Enable this option to use i.MX system counter timer as a
+ clockevent.
+
config CLKSRC_ST_LPC
bool "Low power clocksource found in the LPC" if COMPILE_TEST
select TIMER_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 236858fa7fbf..2e7936e7833f 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
+obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
@@ -36,7 +37,7 @@ obj-$(CONFIG_U300_TIMER) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += timer-sun4i.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += timer-meson6.o
-obj-$(CONFIG_TEGRA_TIMER) += timer-tegra20.o
+obj-$(CONFIG_TEGRA_TIMER) += timer-tegra.o
obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o
obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o
obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
@@ -74,6 +75,7 @@ obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += timer-tango-xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
obj-$(CONFIG_CLKSRC_IMX_TPM) += timer-imx-tpm.o
+obj-$(CONFIG_TIMER_IMX_SYS_CTR) += timer-imx-sysctr.o
obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
obj-$(CONFIG_H8300_TMR8) += h8300_timer8.o
obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o
@@ -84,3 +86,4 @@ obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o
obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o
obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o
obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o
+obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index ebfbccefc7b3..b29b5a75333e 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -13,6 +13,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clocksource.h>
@@ -139,7 +140,7 @@ static u64 arc_read_rtc(struct clocksource *cs)
l = read_aux_reg(AUX_RTC_LOW);
h = read_aux_reg(AUX_RTC_HIGH);
status = read_aux_reg(AUX_RTC_CTRL);
- } while (!(status & _BITUL(31)));
+ } while (!(status & BIT(31)));
return (((u64)h) << 32) | l;
}
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 07e57a49d1e8..9a5464c625b4 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -801,14 +801,7 @@ static void arch_timer_evtstrm_enable(int divider)
cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
| ARCH_TIMER_VIRT_EVT_EN;
arch_timer_set_cntkctl(cntkctl);
-#ifdef CONFIG_ARM64
- cpu_set_named_feature(EVTSTRM);
-#else
- elf_hwcap |= HWCAP_EVTSTRM;
-#endif
-#ifdef CONFIG_COMPAT
- compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
-#endif
+ arch_timer_set_evtstrm_feature();
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
@@ -1037,11 +1030,7 @@ static int arch_timer_cpu_pm_notify(struct notifier_block *self,
} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
-#ifdef CONFIG_ARM64
- if (cpu_have_named_feature(EVTSTRM))
-#else
- if (elf_hwcap & HWCAP_EVTSTRM)
-#endif
+ if (arch_timer_have_evtstrm_feature())
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
return NOTIFY_OK;
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index e8eab16b154b..74cb299f5089 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -206,7 +206,7 @@ static void exynos4_frc_resume(struct clocksource *cs)
static struct clocksource mct_frc = {
.name = "mct-frc",
- .rating = 400,
+ .rating = 450, /* use value higher than ARM arch timer */
.read = exynos4_frc_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -461,7 +461,7 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
evt->set_state_oneshot_stopped = set_state_shutdown;
evt->tick_resume = set_state_shutdown;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- evt->rating = 450;
+ evt->rating = 500; /* use value higher than ARM arch timer */
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
new file mode 100644
index 000000000000..ba2c79e6a0ee
--- /dev/null
+++ b/drivers/clocksource/hyperv_timer.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Clocksource driver for the synthetic counter and timers
+ * provided by the Hyper-V hypervisor to guest VMs, as described
+ * in the Hyper-V Top Level Functional Spec (TLFS). This driver
+ * is instruction set architecture independent.
+ *
+ * Copyright (C) 2019, Microsoft, Inc.
+ *
+ * Author: Michael Kelley <mikelley@microsoft.com>
+ */
+
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/mm.h>
+#include <clocksource/hyperv_timer.h>
+#include <asm/hyperv-tlfs.h>
+#include <asm/mshyperv.h>
+
+static struct clock_event_device __percpu *hv_clock_event;
+
+/*
+ * If false, we're using the old mechanism for stimer0 interrupts
+ * where it sends a VMbus message when it expires. The old
+ * mechanism is used when running on older versions of Hyper-V
+ * that don't support Direct Mode. While Hyper-V provides
+ * four stimer's per CPU, Linux uses only stimer0.
+ */
+static bool direct_mode_enabled;
+
+static int stimer0_irq;
+static int stimer0_vector;
+static int stimer0_message_sint;
+
+/*
+ * ISR for when stimer0 is operating in Direct Mode. Direct Mode
+ * does not use VMbus or any VMbus messages, so process here and not
+ * in the VMbus driver code.
+ */
+void hv_stimer0_isr(void)
+{
+ struct clock_event_device *ce;
+
+ ce = this_cpu_ptr(hv_clock_event);
+ ce->event_handler(ce);
+}
+EXPORT_SYMBOL_GPL(hv_stimer0_isr);
+
+static int hv_ce_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ u64 current_tick;
+
+ current_tick = hyperv_cs->read(NULL);
+ current_tick += delta;
+ hv_init_timer(0, current_tick);
+ return 0;
+}
+
+static int hv_ce_shutdown(struct clock_event_device *evt)
+{
+ hv_init_timer(0, 0);
+ hv_init_timer_config(0, 0);
+ if (direct_mode_enabled)
+ hv_disable_stimer0_percpu_irq(stimer0_irq);
+
+ return 0;
+}
+
+static int hv_ce_set_oneshot(struct clock_event_device *evt)
+{
+ union hv_stimer_config timer_cfg;
+
+ timer_cfg.as_uint64 = 0;
+ timer_cfg.enable = 1;
+ timer_cfg.auto_enable = 1;
+ if (direct_mode_enabled) {
+ /*
+ * When it expires, the timer will directly interrupt
+ * on the specified hardware vector/IRQ.
+ */
+ timer_cfg.direct_mode = 1;
+ timer_cfg.apic_vector = stimer0_vector;
+ hv_enable_stimer0_percpu_irq(stimer0_irq);
+ } else {
+ /*
+ * When it expires, the timer will generate a VMbus message,
+ * to be handled by the normal VMbus interrupt handler.
+ */
+ timer_cfg.direct_mode = 0;
+ timer_cfg.sintx = stimer0_message_sint;
+ }
+ hv_init_timer_config(0, timer_cfg.as_uint64);
+ return 0;
+}
+
+/*
+ * hv_stimer_init - Per-cpu initialization of the clockevent
+ */
+void hv_stimer_init(unsigned int cpu)
+{
+ struct clock_event_device *ce;
+
+ /*
+ * Synthetic timers are always available except on old versions of
+ * Hyper-V on x86. In that case, just return as Linux will use a
+ * clocksource based on emulated PIT or LAPIC timer hardware.
+ */
+ if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
+ return;
+
+ ce = per_cpu_ptr(hv_clock_event, cpu);
+ ce->name = "Hyper-V clockevent";
+ ce->features = CLOCK_EVT_FEAT_ONESHOT;
+ ce->cpumask = cpumask_of(cpu);
+ ce->rating = 1000;
+ ce->set_state_shutdown = hv_ce_shutdown;
+ ce->set_state_oneshot = hv_ce_set_oneshot;
+ ce->set_next_event = hv_ce_set_next_event;
+
+ clockevents_config_and_register(ce,
+ HV_CLOCK_HZ,
+ HV_MIN_DELTA_TICKS,
+ HV_MAX_MAX_DELTA_TICKS);
+}
+EXPORT_SYMBOL_GPL(hv_stimer_init);
+
+/*
+ * hv_stimer_cleanup - Per-cpu cleanup of the clockevent
+ */
+void hv_stimer_cleanup(unsigned int cpu)
+{
+ struct clock_event_device *ce;
+
+ /* Turn off clockevent device */
+ if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
+ ce = per_cpu_ptr(hv_clock_event, cpu);
+ hv_ce_shutdown(ce);
+ }
+}
+EXPORT_SYMBOL_GPL(hv_stimer_cleanup);
+
+/* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
+int hv_stimer_alloc(int sint)
+{
+ int ret;
+
+ hv_clock_event = alloc_percpu(struct clock_event_device);
+ if (!hv_clock_event)
+ return -ENOMEM;
+
+ direct_mode_enabled = ms_hyperv.misc_features &
+ HV_STIMER_DIRECT_MODE_AVAILABLE;
+ if (direct_mode_enabled) {
+ ret = hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector,
+ hv_stimer0_isr);
+ if (ret) {
+ free_percpu(hv_clock_event);
+ hv_clock_event = NULL;
+ return ret;
+ }
+ }
+
+ stimer0_message_sint = sint;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hv_stimer_alloc);
+
+/* hv_stimer_free - Free global resources allocated by hv_stimer_alloc() */
+void hv_stimer_free(void)
+{
+ if (direct_mode_enabled && (stimer0_irq != 0)) {
+ hv_remove_stimer0_irq(stimer0_irq);
+ stimer0_irq = 0;
+ }
+ free_percpu(hv_clock_event);
+ hv_clock_event = NULL;
+}
+EXPORT_SYMBOL_GPL(hv_stimer_free);
+
+/*
+ * Do a global cleanup of clockevents for the cases of kexec and
+ * vmbus exit
+ */
+void hv_stimer_global_cleanup(void)
+{
+ int cpu;
+ struct clock_event_device *ce;
+
+ if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
+ for_each_present_cpu(cpu) {
+ ce = per_cpu_ptr(hv_clock_event, cpu);
+ clockevents_unbind_device(ce, cpu);
+ }
+ }
+ hv_stimer_free();
+}
+EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
+
+/*
+ * Code and definitions for the Hyper-V clocksources. Two
+ * clocksources are defined: one that reads the Hyper-V defined MSR, and
+ * the other that uses the TSC reference page feature as defined in the
+ * TLFS. The MSR version is for compatibility with old versions of
+ * Hyper-V and 32-bit x86. The TSC reference page version is preferred.
+ */
+
+struct clocksource *hyperv_cs;
+EXPORT_SYMBOL_GPL(hyperv_cs);
+
+#ifdef CONFIG_HYPERV_TSCPAGE
+
+static struct ms_hyperv_tsc_page *tsc_pg;
+
+struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
+{
+ return tsc_pg;
+}
+EXPORT_SYMBOL_GPL(hv_get_tsc_page);
+
+static u64 notrace read_hv_sched_clock_tsc(void)
+{
+ u64 current_tick = hv_read_tsc_page(tsc_pg);
+
+ if (current_tick == U64_MAX)
+ hv_get_time_ref_count(current_tick);
+
+ return current_tick;
+}
+
+static u64 read_hv_clock_tsc(struct clocksource *arg)
+{
+ return read_hv_sched_clock_tsc();
+}
+
+static struct clocksource hyperv_cs_tsc = {
+ .name = "hyperv_clocksource_tsc_page",
+ .rating = 400,
+ .read = read_hv_clock_tsc,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+#endif
+
+static u64 notrace read_hv_sched_clock_msr(void)
+{
+ u64 current_tick;
+ /*
+ * Read the partition counter to get the current tick count. This count
+ * is set to 0 when the partition is created and is incremented in
+ * 100 nanosecond units.
+ */
+ hv_get_time_ref_count(current_tick);
+ return current_tick;
+}
+
+static u64 read_hv_clock_msr(struct clocksource *arg)
+{
+ return read_hv_sched_clock_msr();
+}
+
+static struct clocksource hyperv_cs_msr = {
+ .name = "hyperv_clocksource_msr",
+ .rating = 400,
+ .read = read_hv_clock_msr,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#ifdef CONFIG_HYPERV_TSCPAGE
+static bool __init hv_init_tsc_clocksource(void)
+{
+ u64 tsc_msr;
+ phys_addr_t phys_addr;
+
+ if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
+ return false;
+
+ tsc_pg = vmalloc(PAGE_SIZE);
+ if (!tsc_pg)
+ return false;
+
+ hyperv_cs = &hyperv_cs_tsc;
+ phys_addr = page_to_phys(vmalloc_to_page(tsc_pg));
+
+ /*
+ * The Hyper-V TLFS specifies to preserve the value of reserved
+ * bits in registers. So read the existing value, preserve the
+ * low order 12 bits, and add in the guest physical address
+ * (which already has at least the low 12 bits set to zero since
+ * it is page aligned). Also set the "enable" bit, which is bit 0.
+ */
+ hv_get_reference_tsc(tsc_msr);
+ tsc_msr &= GENMASK_ULL(11, 0);
+ tsc_msr = tsc_msr | 0x1 | (u64)phys_addr;
+ hv_set_reference_tsc(tsc_msr);
+
+ hv_set_clocksource_vdso(hyperv_cs_tsc);
+ clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
+
+ /* sched_clock_register is needed on ARM64 but is a no-op on x86 */
+ sched_clock_register(read_hv_sched_clock_tsc, 64, HV_CLOCK_HZ);
+ return true;
+}
+#else
+static bool __init hv_init_tsc_clocksource(void)
+{
+ return false;
+}
+#endif
+
+
+void __init hv_init_clocksource(void)
+{
+ /*
+ * Try to set up the TSC page clocksource. If it succeeds, we're
+ * done. Otherwise, set up the MSR clocksoruce. At least one of
+ * these will always be available except on very old versions of
+ * Hyper-V on x86. In that case we won't have a Hyper-V
+ * clocksource, but Linux will still run with a clocksource based
+ * on the emulated PIT or LAPIC timer.
+ */
+ if (hv_init_tsc_clocksource())
+ return;
+
+ if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE))
+ return;
+
+ hyperv_cs = &hyperv_cs_msr;
+ clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
+
+ /* sched_clock_register is needed on ARM64 but is a no-op on x86 */
+ sched_clock_register(read_hv_sched_clock_msr, 64, HV_CLOCK_HZ);
+}
+EXPORT_SYMBOL_GPL(hv_init_clocksource);
diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
new file mode 100644
index 000000000000..62745c962049
--- /dev/null
+++ b/drivers/clocksource/timer-davinci.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI DaVinci clocksource driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ * (with tiny parts adopted from code by Kevin Hilman <khilman@baylibre.com>)
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+
+#include <clocksource/timer-davinci.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+
+#define DAVINCI_TIMER_REG_TIM12 0x10
+#define DAVINCI_TIMER_REG_TIM34 0x14
+#define DAVINCI_TIMER_REG_PRD12 0x18
+#define DAVINCI_TIMER_REG_PRD34 0x1c
+#define DAVINCI_TIMER_REG_TCR 0x20
+#define DAVINCI_TIMER_REG_TGCR 0x24
+
+#define DAVINCI_TIMER_TIMMODE_MASK GENMASK(3, 2)
+#define DAVINCI_TIMER_RESET_MASK GENMASK(1, 0)
+#define DAVINCI_TIMER_TIMMODE_32BIT_UNCHAINED BIT(2)
+#define DAVINCI_TIMER_UNRESET GENMASK(1, 0)
+
+#define DAVINCI_TIMER_ENAMODE_MASK GENMASK(1, 0)
+#define DAVINCI_TIMER_ENAMODE_DISABLED 0x00
+#define DAVINCI_TIMER_ENAMODE_ONESHOT BIT(0)
+#define DAVINCI_TIMER_ENAMODE_PERIODIC BIT(1)
+
+#define DAVINCI_TIMER_ENAMODE_SHIFT_TIM12 6
+#define DAVINCI_TIMER_ENAMODE_SHIFT_TIM34 22
+
+#define DAVINCI_TIMER_MIN_DELTA 0x01
+#define DAVINCI_TIMER_MAX_DELTA 0xfffffffe
+
+#define DAVINCI_TIMER_CLKSRC_BITS 32
+
+#define DAVINCI_TIMER_TGCR_DEFAULT \
+ (DAVINCI_TIMER_TIMMODE_32BIT_UNCHAINED | DAVINCI_TIMER_UNRESET)
+
+struct davinci_clockevent {
+ struct clock_event_device dev;
+ void __iomem *base;
+ unsigned int cmp_off;
+};
+
+/*
+ * This must be globally accessible by davinci_timer_read_sched_clock(), so
+ * let's keep it here.
+ */
+static struct {
+ struct clocksource dev;
+ void __iomem *base;
+ unsigned int tim_off;
+} davinci_clocksource;
+
+static struct davinci_clockevent *
+to_davinci_clockevent(struct clock_event_device *clockevent)
+{
+ return container_of(clockevent, struct davinci_clockevent, dev);
+}
+
+static unsigned int
+davinci_clockevent_read(struct davinci_clockevent *clockevent,
+ unsigned int reg)
+{
+ return readl_relaxed(clockevent->base + reg);
+}
+
+static void davinci_clockevent_write(struct davinci_clockevent *clockevent,
+ unsigned int reg, unsigned int val)
+{
+ writel_relaxed(val, clockevent->base + reg);
+}
+
+static void davinci_tim12_shutdown(void __iomem *base)
+{
+ unsigned int tcr;
+
+ tcr = DAVINCI_TIMER_ENAMODE_DISABLED <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
+ /*
+ * This function is only ever called if we're using both timer
+ * halves. In this case TIM34 runs in periodic mode and we must
+ * not modify it.
+ */
+ tcr |= DAVINCI_TIMER_ENAMODE_PERIODIC <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
+
+ writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
+}
+
+static void davinci_tim12_set_oneshot(void __iomem *base)
+{
+ unsigned int tcr;
+
+ tcr = DAVINCI_TIMER_ENAMODE_ONESHOT <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
+ /* Same as above. */
+ tcr |= DAVINCI_TIMER_ENAMODE_PERIODIC <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
+
+ writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
+}
+
+static int davinci_clockevent_shutdown(struct clock_event_device *dev)
+{
+ struct davinci_clockevent *clockevent;
+
+ clockevent = to_davinci_clockevent(dev);
+
+ davinci_tim12_shutdown(clockevent->base);
+
+ return 0;
+}
+
+static int davinci_clockevent_set_oneshot(struct clock_event_device *dev)
+{
+ struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
+
+ davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_TIM12, 0x0);
+
+ davinci_tim12_set_oneshot(clockevent->base);
+
+ return 0;
+}
+
+static int
+davinci_clockevent_set_next_event_std(unsigned long cycles,
+ struct clock_event_device *dev)
+{
+ struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
+
+ davinci_clockevent_shutdown(dev);
+
+ davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_TIM12, 0x0);
+ davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_PRD12, cycles);
+
+ davinci_clockevent_set_oneshot(dev);
+
+ return 0;
+}
+
+static int
+davinci_clockevent_set_next_event_cmp(unsigned long cycles,
+ struct clock_event_device *dev)
+{
+ struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
+ unsigned int curr_time;
+
+ curr_time = davinci_clockevent_read(clockevent,
+ DAVINCI_TIMER_REG_TIM12);
+ davinci_clockevent_write(clockevent,
+ clockevent->cmp_off, curr_time + cycles);
+
+ return 0;
+}
+
+static irqreturn_t davinci_timer_irq_timer(int irq, void *data)
+{
+ struct davinci_clockevent *clockevent = data;
+
+ if (!clockevent_state_oneshot(&clockevent->dev))
+ davinci_tim12_shutdown(clockevent->base);
+
+ clockevent->dev.event_handler(&clockevent->dev);
+
+ return IRQ_HANDLED;
+}
+
+static u64 notrace davinci_timer_read_sched_clock(void)
+{
+ return readl_relaxed(davinci_clocksource.base +
+ davinci_clocksource.tim_off);
+}
+
+static u64 davinci_clocksource_read(struct clocksource *dev)
+{
+ return davinci_timer_read_sched_clock();
+}
+
+/*
+ * Standard use-case: we're using tim12 for clockevent and tim34 for
+ * clocksource. The default is making the former run in oneshot mode
+ * and the latter in periodic mode.
+ */
+static void davinci_clocksource_init_tim34(void __iomem *base)
+{
+ int tcr;
+
+ tcr = DAVINCI_TIMER_ENAMODE_PERIODIC <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
+ tcr |= DAVINCI_TIMER_ENAMODE_ONESHOT <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
+
+ writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM34);
+ writel_relaxed(UINT_MAX, base + DAVINCI_TIMER_REG_PRD34);
+ writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
+}
+
+/*
+ * Special use-case on da830: the DSP may use tim34. We're using tim12 for
+ * both clocksource and clockevent. We set tim12 to periodic and don't touch
+ * tim34.
+ */
+static void davinci_clocksource_init_tim12(void __iomem *base)
+{
+ unsigned int tcr;
+
+ tcr = DAVINCI_TIMER_ENAMODE_PERIODIC <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
+
+ writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM12);
+ writel_relaxed(UINT_MAX, base + DAVINCI_TIMER_REG_PRD12);
+ writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
+}
+
+static void davinci_timer_init(void __iomem *base)
+{
+ /* Set clock to internal mode and disable it. */
+ writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TCR);
+ /*
+ * Reset both 32-bit timers, set no prescaler for timer 34, set the
+ * timer to dual 32-bit unchained mode, unreset both 32-bit timers.
+ */
+ writel_relaxed(DAVINCI_TIMER_TGCR_DEFAULT,
+ base + DAVINCI_TIMER_REG_TGCR);
+ /* Init both counters to zero. */
+ writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM12);
+ writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM34);
+}
+
+int __init davinci_timer_register(struct clk *clk,
+ const struct davinci_timer_cfg *timer_cfg)
+{
+ struct davinci_clockevent *clockevent;
+ unsigned int tick_rate;
+ void __iomem *base;
+ int rv;
+
+ rv = clk_prepare_enable(clk);
+ if (rv) {
+ pr_err("Unable to prepare and enable the timer clock");
+ return rv;
+ }
+
+ if (!request_mem_region(timer_cfg->reg.start,
+ resource_size(&timer_cfg->reg),
+ "davinci-timer")) {
+ pr_err("Unable to request memory region");
+ return -EBUSY;
+ }
+
+ base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
+ if (!base) {
+ pr_err("Unable to map the register range");
+ return -ENOMEM;
+ }
+
+ davinci_timer_init(base);
+ tick_rate = clk_get_rate(clk);
+
+ clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL | __GFP_NOFAIL);
+ if (!clockevent) {
+ pr_err("Error allocating memory for clockevent data");
+ return -ENOMEM;
+ }
+
+ clockevent->dev.name = "tim12";
+ clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
+ clockevent->dev.cpumask = cpumask_of(0);
+ clockevent->base = base;
+
+ if (timer_cfg->cmp_off) {
+ clockevent->cmp_off = timer_cfg->cmp_off;
+ clockevent->dev.set_next_event =
+ davinci_clockevent_set_next_event_cmp;
+ } else {
+ clockevent->dev.set_next_event =
+ davinci_clockevent_set_next_event_std;
+ clockevent->dev.set_state_oneshot =
+ davinci_clockevent_set_oneshot;
+ clockevent->dev.set_state_shutdown =
+ davinci_clockevent_shutdown;
+ }
+
+ rv = request_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
+ davinci_timer_irq_timer, IRQF_TIMER,
+ "clockevent/tim12", clockevent);
+ if (rv) {
+ pr_err("Unable to request the clockevent interrupt");
+ return rv;
+ }
+
+ clockevents_config_and_register(&clockevent->dev, tick_rate,
+ DAVINCI_TIMER_MIN_DELTA,
+ DAVINCI_TIMER_MAX_DELTA);
+
+ davinci_clocksource.dev.rating = 300;
+ davinci_clocksource.dev.read = davinci_clocksource_read;
+ davinci_clocksource.dev.mask =
+ CLOCKSOURCE_MASK(DAVINCI_TIMER_CLKSRC_BITS);
+ davinci_clocksource.dev.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ davinci_clocksource.base = base;
+
+ if (timer_cfg->cmp_off) {
+ davinci_clocksource.dev.name = "tim12";
+ davinci_clocksource.tim_off = DAVINCI_TIMER_REG_TIM12;
+ davinci_clocksource_init_tim12(base);
+ } else {
+ davinci_clocksource.dev.name = "tim34";
+ davinci_clocksource.tim_off = DAVINCI_TIMER_REG_TIM34;
+ davinci_clocksource_init_tim34(base);
+ }
+
+ rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
+ if (rv) {
+ pr_err("Unable to register clocksource");
+ return rv;
+ }
+
+ sched_clock_register(davinci_timer_read_sched_clock,
+ DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
+
+ return 0;
+}
+
+static int __init of_davinci_timer_register(struct device_node *np)
+{
+ struct davinci_timer_cfg timer_cfg = { };
+ struct clk *clk;
+ int rv;
+
+ rv = of_address_to_resource(np, 0, &timer_cfg.reg);
+ if (rv) {
+ pr_err("Unable to get the register range for timer");
+ return rv;
+ }
+
+ rv = of_irq_to_resource_table(np, timer_cfg.irq,
+ DAVINCI_TIMER_NUM_IRQS);
+ if (rv != DAVINCI_TIMER_NUM_IRQS) {
+ pr_err("Unable to get the interrupts for timer");
+ return rv;
+ }
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Unable to get the timer clock");
+ return PTR_ERR(clk);
+ }
+
+ rv = davinci_timer_register(clk, &timer_cfg);
+ if (rv)
+ clk_put(clk);
+
+ return rv;
+}
+TIMER_OF_DECLARE(davinci_timer, "ti,da830-timer", of_davinci_timer_register);
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
new file mode 100644
index 000000000000..fd7d68066efb
--- /dev/null
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2017-2019 NXP
+
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include "timer-of.h"
+
+#define CMP_OFFSET 0x10000
+
+#define CNTCV_LO 0x8
+#define CNTCV_HI 0xc
+#define CMPCV_LO (CMP_OFFSET + 0x20)
+#define CMPCV_HI (CMP_OFFSET + 0x24)
+#define CMPCR (CMP_OFFSET + 0x2c)
+
+#define SYS_CTR_EN 0x1
+#define SYS_CTR_IRQ_MASK 0x2
+
+static void __iomem *sys_ctr_base;
+static u32 cmpcr;
+
+static void sysctr_timer_enable(bool enable)
+{
+ writel(enable ? cmpcr | SYS_CTR_EN : cmpcr, sys_ctr_base + CMPCR);
+}
+
+static void sysctr_irq_acknowledge(void)
+{
+ /*
+ * clear the enable bit(EN =0) will clear
+ * the status bit(ISTAT = 0), then the interrupt
+ * signal will be negated(acknowledged).
+ */
+ sysctr_timer_enable(false);
+}
+
+static inline u64 sysctr_read_counter(void)
+{
+ u32 cnt_hi, tmp_hi, cnt_lo;
+
+ do {
+ cnt_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
+ cnt_lo = readl_relaxed(sys_ctr_base + CNTCV_LO);
+ tmp_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
+ } while (tmp_hi != cnt_hi);
+
+ return ((u64) cnt_hi << 32) | cnt_lo;
+}
+
+static int sysctr_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ u32 cmp_hi, cmp_lo;
+ u64 next;
+
+ sysctr_timer_enable(false);
+
+ next = sysctr_read_counter();
+
+ next += delta;
+
+ cmp_hi = (next >> 32) & 0x00fffff;
+ cmp_lo = next & 0xffffffff;
+
+ writel_relaxed(cmp_hi, sys_ctr_base + CMPCV_HI);
+ writel_relaxed(cmp_lo, sys_ctr_base + CMPCV_LO);
+
+ sysctr_timer_enable(true);
+
+ return 0;
+}
+
+static int sysctr_set_state_oneshot(struct clock_event_device *evt)
+{
+ return 0;
+}
+
+static int sysctr_set_state_shutdown(struct clock_event_device *evt)
+{
+ sysctr_timer_enable(false);
+
+ return 0;
+}
+
+static irqreturn_t sysctr_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ sysctr_irq_acknowledge();
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct timer_of to_sysctr = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE,
+ .clkevt = {
+ .name = "i.MX system counter timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ,
+ .set_state_oneshot = sysctr_set_state_oneshot,
+ .set_next_event = sysctr_set_next_event,
+ .set_state_shutdown = sysctr_set_state_shutdown,
+ .rating = 200,
+ },
+ .of_irq = {
+ .handler = sysctr_timer_interrupt,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+ .of_clk = {
+ .name = "per",
+ },
+};
+
+static void __init sysctr_clockevent_init(void)
+{
+ to_sysctr.clkevt.cpumask = cpumask_of(0);
+
+ clockevents_config_and_register(&to_sysctr.clkevt,
+ timer_of_rate(&to_sysctr),
+ 0xff, 0x7fffffff);
+}
+
+static int __init sysctr_timer_init(struct device_node *np)
+{
+ int ret = 0;
+
+ ret = timer_of_init(np, &to_sysctr);
+ if (ret)
+ return ret;
+
+ sys_ctr_base = timer_of_base(&to_sysctr);
+ cmpcr = readl(sys_ctr_base + CMPCR);
+ cmpcr &= ~SYS_CTR_EN;
+
+ sysctr_clockevent_init();
+
+ return 0;
+}
+TIMER_OF_DECLARE(sysctr_timer, "nxp,sysctr-timer", sysctr_timer_init);
diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c
index 5c2190b654cd..9396745e1c17 100644
--- a/drivers/clocksource/timer-ixp4xx.c
+++ b/drivers/clocksource/timer-ixp4xx.c
@@ -75,14 +75,19 @@ to_ixp4xx_timer(struct clock_event_device *evt)
return container_of(evt, struct ixp4xx_timer, clkevt);
}
-static u64 notrace ixp4xx_read_sched_clock(void)
+static unsigned long ixp4xx_read_timer(void)
{
return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
}
+static u64 notrace ixp4xx_read_sched_clock(void)
+{
+ return ixp4xx_read_timer();
+}
+
static u64 ixp4xx_clocksource_read(struct clocksource *c)
{
- return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+ return ixp4xx_read_timer();
}
static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
@@ -224,6 +229,13 @@ static __init int ixp4xx_timer_register(void __iomem *base,
sched_clock_register(ixp4xx_read_sched_clock, 32, timer_freq);
+#ifdef CONFIG_ARM
+ /* Also use this timer for delays */
+ tmr->delay_timer.read_current_timer = ixp4xx_read_timer;
+ tmr->delay_timer.freq = timer_freq;
+ register_current_timer_delay(&tmr->delay_timer);
+#endif
+
return 0;
}
diff --git a/drivers/clocksource/timer-meson6.c b/drivers/clocksource/timer-meson6.c
index 84bd9479c3f8..9e8b467c71da 100644
--- a/drivers/clocksource/timer-meson6.c
+++ b/drivers/clocksource/timer-meson6.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Amlogic Meson6 SoCs timer handling.
*
* Copyright (C) 2014 Carlo Caione <carlo@caione.org>
*
* Based on code from Amlogic, Inc
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/bitfield.h>
diff --git a/drivers/clocksource/timer-tegra.c b/drivers/clocksource/timer-tegra.c
new file mode 100644
index 000000000000..e9635c25eef4
--- /dev/null
+++ b/drivers/clocksource/timer-tegra.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ */
+
+#define pr_fmt(fmt) "tegra-timer: " fmt
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/sched_clock.h>
+#include <linux/time.h>
+
+#include "timer-of.h"
+
+#define RTC_SECONDS 0x08
+#define RTC_SHADOW_SECONDS 0x0c
+#define RTC_MILLISECONDS 0x10
+
+#define TIMERUS_CNTR_1US 0x10
+#define TIMERUS_USEC_CFG 0x14
+#define TIMERUS_CNTR_FREEZE 0x4c
+
+#define TIMER_PTV 0x0
+#define TIMER_PTV_EN BIT(31)
+#define TIMER_PTV_PER BIT(30)
+#define TIMER_PCR 0x4
+#define TIMER_PCR_INTR_CLR BIT(30)
+
+#define TIMER1_BASE 0x00
+#define TIMER2_BASE 0x08
+#define TIMER3_BASE 0x50
+#define TIMER4_BASE 0x58
+#define TIMER10_BASE 0x90
+
+#define TIMER1_IRQ_IDX 0
+#define TIMER10_IRQ_IDX 10
+
+#define TIMER_1MHz 1000000
+
+static u32 usec_config;
+static void __iomem *timer_reg_base;
+
+static int tegra_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ /*
+ * Tegra's timer uses n+1 scheme for the counter, i.e. timer will
+ * fire after one tick if 0 is loaded.
+ *
+ * The minimum and maximum numbers of oneshot ticks are defined
+ * by clockevents_config_and_register(1, 0x1fffffff + 1) invocation
+ * below in the code. Hence the cycles (ticks) can't be outside of
+ * a range supportable by hardware.
+ */
+ writel_relaxed(TIMER_PTV_EN | (cycles - 1), reg_base + TIMER_PTV);
+
+ return 0;
+}
+
+static int tegra_timer_shutdown(struct clock_event_device *evt)
+{
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel_relaxed(0, reg_base + TIMER_PTV);
+
+ return 0;
+}
+
+static int tegra_timer_set_periodic(struct clock_event_device *evt)
+{
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+ unsigned long period = timer_of_period(to_timer_of(evt));
+
+ writel_relaxed(TIMER_PTV_EN | TIMER_PTV_PER | (period - 1),
+ reg_base + TIMER_PTV);
+
+ return 0;
+}
+
+static irqreturn_t tegra_timer_isr(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel_relaxed(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_timer_suspend(struct clock_event_device *evt)
+{
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel_relaxed(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
+}
+
+static void tegra_timer_resume(struct clock_event_device *evt)
+{
+ writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
+}
+
+static DEFINE_PER_CPU(struct timer_of, tegra_to) = {
+ .flags = TIMER_OF_CLOCK | TIMER_OF_BASE,
+
+ .clkevt = {
+ .name = "tegra_timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = tegra_timer_set_next_event,
+ .set_state_shutdown = tegra_timer_shutdown,
+ .set_state_periodic = tegra_timer_set_periodic,
+ .set_state_oneshot = tegra_timer_shutdown,
+ .tick_resume = tegra_timer_shutdown,
+ .suspend = tegra_timer_suspend,
+ .resume = tegra_timer_resume,
+ },
+};
+
+static int tegra_timer_setup(unsigned int cpu)
+{
+ struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
+
+ writel_relaxed(0, timer_of_base(to) + TIMER_PTV);
+ writel_relaxed(TIMER_PCR_INTR_CLR, timer_of_base(to) + TIMER_PCR);
+
+ irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
+ enable_irq(to->clkevt.irq);
+
+ /*
+ * Tegra's timer uses n+1 scheme for the counter, i.e. timer will
+ * fire after one tick if 0 is loaded and thus minimum number of
+ * ticks is 1. In result both of the clocksource's tick limits are
+ * higher than a minimum and maximum that hardware register can
+ * take by 1, this is then taken into account by set_next_event
+ * callback.
+ */
+ clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
+ 1, /* min */
+ 0x1fffffff + 1); /* max 29 bits + 1 */
+
+ return 0;
+}
+
+static int tegra_timer_stop(unsigned int cpu)
+{
+ struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
+
+ to->clkevt.set_state_shutdown(&to->clkevt);
+ disable_irq_nosync(to->clkevt.irq);
+
+ return 0;
+}
+
+static u64 notrace tegra_read_sched_clock(void)
+{
+ return readl_relaxed(timer_reg_base + TIMERUS_CNTR_1US);
+}
+
+#ifdef CONFIG_ARM
+static unsigned long tegra_delay_timer_read_counter_long(void)
+{
+ return readl_relaxed(timer_reg_base + TIMERUS_CNTR_1US);
+}
+
+static struct delay_timer tegra_delay_timer = {
+ .read_current_timer = tegra_delay_timer_read_counter_long,
+ .freq = TIMER_1MHz,
+};
+#endif
+
+static struct timer_of suspend_rtc_to = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+};
+
+/*
+ * tegra_rtc_read - Reads the Tegra RTC registers
+ * Care must be taken that this function is not called while the
+ * tegra_rtc driver could be executing to avoid race conditions
+ * on the RTC shadow register
+ */
+static u64 tegra_rtc_read_ms(struct clocksource *cs)
+{
+ void __iomem *reg_base = timer_of_base(&suspend_rtc_to);
+
+ u32 ms = readl_relaxed(reg_base + RTC_MILLISECONDS);
+ u32 s = readl_relaxed(reg_base + RTC_SHADOW_SECONDS);
+
+ return (u64)s * MSEC_PER_SEC + ms;
+}
+
+static struct clocksource suspend_rtc_clocksource = {
+ .name = "tegra_suspend_timer",
+ .rating = 200,
+ .read = tegra_rtc_read_ms,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+};
+
+static inline unsigned int tegra_base_for_cpu(int cpu, bool tegra20)
+{
+ if (tegra20) {
+ switch (cpu) {
+ case 0:
+ return TIMER1_BASE;
+ case 1:
+ return TIMER2_BASE;
+ case 2:
+ return TIMER3_BASE;
+ default:
+ return TIMER4_BASE;
+ }
+ }
+
+ return TIMER10_BASE + cpu * 8;
+}
+
+static inline unsigned int tegra_irq_idx_for_cpu(int cpu, bool tegra20)
+{
+ if (tegra20)
+ return TIMER1_IRQ_IDX + cpu;
+
+ return TIMER10_IRQ_IDX + cpu;
+}
+
+static inline unsigned long tegra_rate_for_timer(struct timer_of *to,
+ bool tegra20)
+{
+ /*
+ * TIMER1-9 are fixed to 1MHz, TIMER10-13 are running off the
+ * parent clock.
+ */
+ if (tegra20)
+ return TIMER_1MHz;
+
+ return timer_of_rate(to);
+}
+
+static int __init tegra_init_timer(struct device_node *np, bool tegra20,
+ int rating)
+{
+ struct timer_of *to;
+ int cpu, ret;
+
+ to = this_cpu_ptr(&tegra_to);
+ ret = timer_of_init(np, to);
+ if (ret)
+ goto out;
+
+ timer_reg_base = timer_of_base(to);
+
+ /*
+ * Configure microsecond timers to have 1MHz clock
+ * Config register is 0xqqww, where qq is "dividend", ww is "divisor"
+ * Uses n+1 scheme
+ */
+ switch (timer_of_rate(to)) {
+ case 12000000:
+ usec_config = 0x000b; /* (11+1)/(0+1) */
+ break;
+ case 12800000:
+ usec_config = 0x043f; /* (63+1)/(4+1) */
+ break;
+ case 13000000:
+ usec_config = 0x000c; /* (12+1)/(0+1) */
+ break;
+ case 16800000:
+ usec_config = 0x0453; /* (83+1)/(4+1) */
+ break;
+ case 19200000:
+ usec_config = 0x045f; /* (95+1)/(4+1) */
+ break;
+ case 26000000:
+ usec_config = 0x0019; /* (25+1)/(0+1) */
+ break;
+ case 38400000:
+ usec_config = 0x04bf; /* (191+1)/(4+1) */
+ break;
+ case 48000000:
+ usec_config = 0x002f; /* (47+1)/(0+1) */
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
+
+ for_each_possible_cpu(cpu) {
+ struct timer_of *cpu_to = per_cpu_ptr(&tegra_to, cpu);
+ unsigned long flags = IRQF_TIMER | IRQF_NOBALANCING;
+ unsigned long rate = tegra_rate_for_timer(to, tegra20);
+ unsigned int base = tegra_base_for_cpu(cpu, tegra20);
+ unsigned int idx = tegra_irq_idx_for_cpu(cpu, tegra20);
+ unsigned int irq = irq_of_parse_and_map(np, idx);
+
+ if (!irq) {
+ pr_err("failed to map irq for cpu%d\n", cpu);
+ ret = -EINVAL;
+ goto out_irq;
+ }
+
+ cpu_to->clkevt.irq = irq;
+ cpu_to->clkevt.rating = rating;
+ cpu_to->clkevt.cpumask = cpumask_of(cpu);
+ cpu_to->of_base.base = timer_reg_base + base;
+ cpu_to->of_clk.period = rate / HZ;
+ cpu_to->of_clk.rate = rate;
+
+ irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
+
+ ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr, flags,
+ cpu_to->clkevt.name, &cpu_to->clkevt);
+ if (ret) {
+ pr_err("failed to set up irq for cpu%d: %d\n",
+ cpu, ret);
+ irq_dispose_mapping(cpu_to->clkevt.irq);
+ cpu_to->clkevt.irq = 0;
+ goto out_irq;
+ }
+ }
+
+ sched_clock_register(tegra_read_sched_clock, 32, TIMER_1MHz);
+
+ ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
+ "timer_us", TIMER_1MHz, 300, 32,
+ clocksource_mmio_readl_up);
+ if (ret)
+ pr_err("failed to register clocksource: %d\n", ret);
+
+#ifdef CONFIG_ARM
+ register_current_timer_delay(&tegra_delay_timer);
+#endif
+
+ ret = cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
+ "AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
+ tegra_timer_stop);
+ if (ret)
+ pr_err("failed to set up cpu hp state: %d\n", ret);
+
+ return ret;
+
+out_irq:
+ for_each_possible_cpu(cpu) {
+ struct timer_of *cpu_to;
+
+ cpu_to = per_cpu_ptr(&tegra_to, cpu);
+ if (cpu_to->clkevt.irq) {
+ free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
+ irq_dispose_mapping(cpu_to->clkevt.irq);
+ }
+ }
+
+ to->of_base.base = timer_reg_base;
+out:
+ timer_of_cleanup(to);
+
+ return ret;
+}
+
+static int __init tegra210_init_timer(struct device_node *np)
+{
+ /*
+ * Arch-timer can't survive across power cycle of CPU core and
+ * after CPUPORESET signal due to a system design shortcoming,
+ * hence tegra-timer is more preferable on Tegra210.
+ */
+ return tegra_init_timer(np, false, 460);
+}
+TIMER_OF_DECLARE(tegra210_timer, "nvidia,tegra210-timer", tegra210_init_timer);
+
+static int __init tegra20_init_timer(struct device_node *np)
+{
+ int rating;
+
+ /*
+ * Tegra20 and Tegra30 have Cortex A9 CPU that has a TWD timer,
+ * that timer runs off the CPU clock and hence is subjected to
+ * a jitter caused by DVFS clock rate changes. Tegra-timer is
+ * more preferable for older Tegra's, while later SoC generations
+ * have arch-timer as a main per-CPU timer and it is not affected
+ * by DVFS changes.
+ */
+ if (of_machine_is_compatible("nvidia,tegra20") ||
+ of_machine_is_compatible("nvidia,tegra30"))
+ rating = 460;
+ else
+ rating = 330;
+
+ return tegra_init_timer(np, true, rating);
+}
+TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
+
+static int __init tegra20_init_rtc(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &suspend_rtc_to);
+ if (ret)
+ return ret;
+
+ return clocksource_register_hz(&suspend_rtc_clocksource, 1000);
+}
+TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/timer-tegra20.c b/drivers/clocksource/timer-tegra20.c
deleted file mode 100644
index 1e7ece279730..000000000000
--- a/drivers/clocksource/timer-tegra20.c
+++ /dev/null
@@ -1,379 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2010 Google, Inc.
- *
- * Author:
- * Colin Cross <ccross@google.com>
- */
-
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/percpu.h>
-#include <linux/sched_clock.h>
-#include <linux/time.h>
-
-#include "timer-of.h"
-
-#ifdef CONFIG_ARM
-#include <asm/mach/time.h>
-#endif
-
-#define RTC_SECONDS 0x08
-#define RTC_SHADOW_SECONDS 0x0c
-#define RTC_MILLISECONDS 0x10
-
-#define TIMERUS_CNTR_1US 0x10
-#define TIMERUS_USEC_CFG 0x14
-#define TIMERUS_CNTR_FREEZE 0x4c
-
-#define TIMER_PTV 0x0
-#define TIMER_PTV_EN BIT(31)
-#define TIMER_PTV_PER BIT(30)
-#define TIMER_PCR 0x4
-#define TIMER_PCR_INTR_CLR BIT(30)
-
-#ifdef CONFIG_ARM
-#define TIMER_CPU0 0x50 /* TIMER3 */
-#else
-#define TIMER_CPU0 0x90 /* TIMER10 */
-#define TIMER10_IRQ_IDX 10
-#define IRQ_IDX_FOR_CPU(cpu) (TIMER10_IRQ_IDX + cpu)
-#endif
-#define TIMER_BASE_FOR_CPU(cpu) (TIMER_CPU0 + (cpu) * 8)
-
-static u32 usec_config;
-static void __iomem *timer_reg_base;
-#ifdef CONFIG_ARM
-static struct delay_timer tegra_delay_timer;
-#endif
-
-static int tegra_timer_set_next_event(unsigned long cycles,
- struct clock_event_device *evt)
-{
- void __iomem *reg_base = timer_of_base(to_timer_of(evt));
-
- writel(TIMER_PTV_EN |
- ((cycles > 1) ? (cycles - 1) : 0), /* n+1 scheme */
- reg_base + TIMER_PTV);
-
- return 0;
-}
-
-static int tegra_timer_shutdown(struct clock_event_device *evt)
-{
- void __iomem *reg_base = timer_of_base(to_timer_of(evt));
-
- writel(0, reg_base + TIMER_PTV);
-
- return 0;
-}
-
-static int tegra_timer_set_periodic(struct clock_event_device *evt)
-{
- void __iomem *reg_base = timer_of_base(to_timer_of(evt));
-
- writel(TIMER_PTV_EN | TIMER_PTV_PER |
- ((timer_of_rate(to_timer_of(evt)) / HZ) - 1),
- reg_base + TIMER_PTV);
-
- return 0;
-}
-
-static irqreturn_t tegra_timer_isr(int irq, void *dev_id)
-{
- struct clock_event_device *evt = (struct clock_event_device *)dev_id;
- void __iomem *reg_base = timer_of_base(to_timer_of(evt));
-
- writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static void tegra_timer_suspend(struct clock_event_device *evt)
-{
- void __iomem *reg_base = timer_of_base(to_timer_of(evt));
-
- writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
-}
-
-static void tegra_timer_resume(struct clock_event_device *evt)
-{
- writel(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
-}
-
-#ifdef CONFIG_ARM64
-static DEFINE_PER_CPU(struct timer_of, tegra_to) = {
- .flags = TIMER_OF_CLOCK | TIMER_OF_BASE,
-
- .clkevt = {
- .name = "tegra_timer",
- .rating = 460,
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .set_next_event = tegra_timer_set_next_event,
- .set_state_shutdown = tegra_timer_shutdown,
- .set_state_periodic = tegra_timer_set_periodic,
- .set_state_oneshot = tegra_timer_shutdown,
- .tick_resume = tegra_timer_shutdown,
- .suspend = tegra_timer_suspend,
- .resume = tegra_timer_resume,
- },
-};
-
-static int tegra_timer_setup(unsigned int cpu)
-{
- struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
-
- irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
- enable_irq(to->clkevt.irq);
-
- clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
- 1, /* min */
- 0x1fffffff); /* 29 bits */
-
- return 0;
-}
-
-static int tegra_timer_stop(unsigned int cpu)
-{
- struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
-
- to->clkevt.set_state_shutdown(&to->clkevt);
- disable_irq_nosync(to->clkevt.irq);
-
- return 0;
-}
-#else /* CONFIG_ARM */
-static struct timer_of tegra_to = {
- .flags = TIMER_OF_CLOCK | TIMER_OF_BASE | TIMER_OF_IRQ,
-
- .clkevt = {
- .name = "tegra_timer",
- .rating = 300,
- .features = CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_DYNIRQ,
- .set_next_event = tegra_timer_set_next_event,
- .set_state_shutdown = tegra_timer_shutdown,
- .set_state_periodic = tegra_timer_set_periodic,
- .set_state_oneshot = tegra_timer_shutdown,
- .tick_resume = tegra_timer_shutdown,
- .suspend = tegra_timer_suspend,
- .resume = tegra_timer_resume,
- .cpumask = cpu_possible_mask,
- },
-
- .of_irq = {
- .index = 2,
- .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH,
- .handler = tegra_timer_isr,
- },
-};
-
-static u64 notrace tegra_read_sched_clock(void)
-{
- return readl(timer_reg_base + TIMERUS_CNTR_1US);
-}
-
-static unsigned long tegra_delay_timer_read_counter_long(void)
-{
- return readl(timer_reg_base + TIMERUS_CNTR_1US);
-}
-
-static struct timer_of suspend_rtc_to = {
- .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
-};
-
-/*
- * tegra_rtc_read - Reads the Tegra RTC registers
- * Care must be taken that this funciton is not called while the
- * tegra_rtc driver could be executing to avoid race conditions
- * on the RTC shadow register
- */
-static u64 tegra_rtc_read_ms(struct clocksource *cs)
-{
- u32 ms = readl(timer_of_base(&suspend_rtc_to) + RTC_MILLISECONDS);
- u32 s = readl(timer_of_base(&suspend_rtc_to) + RTC_SHADOW_SECONDS);
- return (u64)s * MSEC_PER_SEC + ms;
-}
-
-static struct clocksource suspend_rtc_clocksource = {
- .name = "tegra_suspend_timer",
- .rating = 200,
- .read = tegra_rtc_read_ms,
- .mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
-};
-#endif
-
-static int tegra_timer_common_init(struct device_node *np, struct timer_of *to)
-{
- int ret = 0;
-
- ret = timer_of_init(np, to);
- if (ret < 0)
- goto out;
-
- timer_reg_base = timer_of_base(to);
-
- /*
- * Configure microsecond timers to have 1MHz clock
- * Config register is 0xqqww, where qq is "dividend", ww is "divisor"
- * Uses n+1 scheme
- */
- switch (timer_of_rate(to)) {
- case 12000000:
- usec_config = 0x000b; /* (11+1)/(0+1) */
- break;
- case 12800000:
- usec_config = 0x043f; /* (63+1)/(4+1) */
- break;
- case 13000000:
- usec_config = 0x000c; /* (12+1)/(0+1) */
- break;
- case 16800000:
- usec_config = 0x0453; /* (83+1)/(4+1) */
- break;
- case 19200000:
- usec_config = 0x045f; /* (95+1)/(4+1) */
- break;
- case 26000000:
- usec_config = 0x0019; /* (25+1)/(0+1) */
- break;
- case 38400000:
- usec_config = 0x04bf; /* (191+1)/(4+1) */
- break;
- case 48000000:
- usec_config = 0x002f; /* (47+1)/(0+1) */
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- writel(usec_config, timer_of_base(to) + TIMERUS_USEC_CFG);
-
-out:
- return ret;
-}
-
-#ifdef CONFIG_ARM64
-static int __init tegra_init_timer(struct device_node *np)
-{
- int cpu, ret = 0;
- struct timer_of *to;
-
- to = this_cpu_ptr(&tegra_to);
- ret = tegra_timer_common_init(np, to);
- if (ret < 0)
- goto out;
-
- for_each_possible_cpu(cpu) {
- struct timer_of *cpu_to;
-
- cpu_to = per_cpu_ptr(&tegra_to, cpu);
- cpu_to->of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(cpu);
- cpu_to->of_clk.rate = timer_of_rate(to);
- cpu_to->clkevt.cpumask = cpumask_of(cpu);
- cpu_to->clkevt.irq =
- irq_of_parse_and_map(np, IRQ_IDX_FOR_CPU(cpu));
- if (!cpu_to->clkevt.irq) {
- pr_err("%s: can't map IRQ for CPU%d\n",
- __func__, cpu);
- ret = -EINVAL;
- goto out;
- }
-
- irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
- ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr,
- IRQF_TIMER | IRQF_NOBALANCING,
- cpu_to->clkevt.name, &cpu_to->clkevt);
- if (ret) {
- pr_err("%s: cannot setup irq %d for CPU%d\n",
- __func__, cpu_to->clkevt.irq, cpu);
- ret = -EINVAL;
- goto out_irq;
- }
- }
-
- cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
- "AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
- tegra_timer_stop);
-
- return ret;
-out_irq:
- for_each_possible_cpu(cpu) {
- struct timer_of *cpu_to;
-
- cpu_to = per_cpu_ptr(&tegra_to, cpu);
- if (cpu_to->clkevt.irq) {
- free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
- irq_dispose_mapping(cpu_to->clkevt.irq);
- }
- }
-out:
- timer_of_cleanup(to);
- return ret;
-}
-#else /* CONFIG_ARM */
-static int __init tegra_init_timer(struct device_node *np)
-{
- int ret = 0;
-
- ret = tegra_timer_common_init(np, &tegra_to);
- if (ret < 0)
- goto out;
-
- tegra_to.of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(0);
- tegra_to.of_clk.rate = 1000000; /* microsecond timer */
-
- sched_clock_register(tegra_read_sched_clock, 32,
- timer_of_rate(&tegra_to));
- ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
- "timer_us", timer_of_rate(&tegra_to),
- 300, 32, clocksource_mmio_readl_up);
- if (ret) {
- pr_err("Failed to register clocksource\n");
- goto out;
- }
-
- tegra_delay_timer.read_current_timer =
- tegra_delay_timer_read_counter_long;
- tegra_delay_timer.freq = timer_of_rate(&tegra_to);
- register_current_timer_delay(&tegra_delay_timer);
-
- clockevents_config_and_register(&tegra_to.clkevt,
- timer_of_rate(&tegra_to),
- 0x1,
- 0x1fffffff);
-
- return ret;
-out:
- timer_of_cleanup(&tegra_to);
-
- return ret;
-}
-
-static int __init tegra20_init_rtc(struct device_node *np)
-{
- int ret;
-
- ret = timer_of_init(np, &suspend_rtc_to);
- if (ret)
- return ret;
-
- clocksource_register_hz(&suspend_rtc_clocksource, 1000);
-
- return 0;
-}
-TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
-#endif
-TIMER_OF_DECLARE(tegra210_timer, "nvidia,tegra210-timer", tegra_init_timer);
-TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra_init_timer);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0af08081e305..603413f28fa3 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -520,10 +520,13 @@ config CRYPTO_DEV_ATMEL_SHA
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
+config CRYPTO_DEV_ATMEL_I2C
+ tristate
+
config CRYPTO_DEV_ATMEL_ECC
tristate "Support for Microchip / Atmel ECC hw accelerator"
- depends on ARCH_AT91 || COMPILE_TEST
depends on I2C
+ select CRYPTO_DEV_ATMEL_I2C
select CRYPTO_ECDH
select CRC16
help
@@ -534,6 +537,21 @@ config CRYPTO_DEV_ATMEL_ECC
To compile this driver as a module, choose M here: the module
will be called atmel-ecc.
+config CRYPTO_DEV_ATMEL_SHA204A
+ tristate "Support for Microchip / Atmel SHA accelerator and RNG"
+ depends on I2C
+ select CRYPTO_DEV_ATMEL_I2C
+ select HW_RANDOM
+ select CRC16
+ help
+ Microhip / Atmel SHA accelerator and RNG.
+ Select this if you want to use the Microchip / Atmel SHA204A
+ module as a random number generator. (Other functions of the
+ chip are currently not exposed by this driver)
+
+ To compile this driver as a module, choose M here: the module
+ will be called atmel-sha204a.
+
config CRYPTO_DEV_CCP
bool "Support for AMD Secure Processor"
depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index a23a7197fcd7..afc4753b5d28 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,7 +2,9 @@
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 49f3e0ce242c..cbfc607282f4 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -67,12 +67,16 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
}
static inline int crypto4xx_crypt(struct skcipher_request *req,
- const unsigned int ivlen, bool decrypt)
+ const unsigned int ivlen, bool decrypt,
+ bool check_blocksize)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
__le32 iv[AES_IV_SIZE];
+ if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+ return -EINVAL;
+
if (ivlen)
crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
@@ -81,24 +85,34 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
ctx->sa_len, 0, NULL);
}
-int crypto4xx_encrypt_noiv(struct skcipher_request *req)
+int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, 0, false, true);
+}
+
+int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
+}
+
+int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, 0, false);
+ return crypto4xx_crypt(req, 0, true, true);
}
-int crypto4xx_encrypt_iv(struct skcipher_request *req)
+int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, AES_IV_SIZE, false);
+ return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
}
-int crypto4xx_decrypt_noiv(struct skcipher_request *req)
+int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, 0, true);
+ return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
}
-int crypto4xx_decrypt_iv(struct skcipher_request *req)
+int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, AES_IV_SIZE, true);
+ return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
}
/**
@@ -269,8 +283,8 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
return ret;
}
- return encrypt ? crypto4xx_encrypt_iv(req)
- : crypto4xx_decrypt_iv(req);
+ return encrypt ? crypto4xx_encrypt_iv_stream(req)
+ : crypto4xx_decrypt_iv_stream(req);
}
static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 16d911aaa508..de5e9352e920 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -182,7 +182,6 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
dev->pdr_pa);
return -ENOMEM;
}
- memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
@@ -1210,8 +1209,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_cbc,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
+ .encrypt = crypto4xx_encrypt_iv_block,
+ .decrypt = crypto4xx_decrypt_iv_block,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
@@ -1222,7 +1221,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1230,8 +1229,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_cfb,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
+ .encrypt = crypto4xx_encrypt_iv_stream,
+ .decrypt = crypto4xx_decrypt_iv_stream,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
@@ -1243,7 +1242,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1263,7 +1262,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1290,8 +1289,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = crypto4xx_setkey_aes_ecb,
- .encrypt = crypto4xx_encrypt_noiv,
- .decrypt = crypto4xx_decrypt_noiv,
+ .encrypt = crypto4xx_encrypt_noiv_block,
+ .decrypt = crypto4xx_decrypt_noiv_block,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
@@ -1302,7 +1301,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1310,8 +1309,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_ofb,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
+ .encrypt = crypto4xx_encrypt_iv_stream,
+ .decrypt = crypto4xx_decrypt_iv_stream,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index ca1c25c40c23..6b6841359190 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -173,10 +173,12 @@ int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_encrypt_ctr(struct skcipher_request *req);
int crypto4xx_decrypt_ctr(struct skcipher_request *req);
-int crypto4xx_encrypt_iv(struct skcipher_request *req);
-int crypto4xx_decrypt_iv(struct skcipher_request *req);
-int crypto4xx_encrypt_noiv(struct skcipher_request *req);
-int crypto4xx_decrypt_noiv(struct skcipher_request *req);
+int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
+int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
+int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
+int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
+int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
+int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index ba00e4563ca0..ff02cc05affb 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -6,8 +6,6 @@
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
*/
-#include <linux/bitrev.h>
-#include <linux/crc16.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -23,42 +21,11 @@
#include <crypto/internal/kpp.h>
#include <crypto/ecdh.h>
#include <crypto/kpp.h>
-#include "atmel-ecc.h"
-
-/* Used for binding tfm objects to i2c clients. */
-struct atmel_ecc_driver_data {
- struct list_head i2c_client_list;
- spinlock_t i2c_list_lock;
-} ____cacheline_aligned;
+#include "atmel-i2c.h"
static struct atmel_ecc_driver_data driver_data;
/**
- * atmel_ecc_i2c_client_priv - i2c_client private data
- * @client : pointer to i2c client device
- * @i2c_client_list_node: part of i2c_client_list
- * @lock : lock for sending i2c commands
- * @wake_token : wake token array of zeros
- * @wake_token_sz : size in bytes of the wake_token
- * @tfm_count : number of active crypto transformations on i2c client
- *
- * Reads and writes from/to the i2c client are sequential. The first byte
- * transmitted to the device is treated as the byte size. Any attempt to send
- * more than this number of bytes will cause the device to not ACK those bytes.
- * After the host writes a single command byte to the input buffer, reads are
- * prohibited until after the device completes command execution. Use a mutex
- * when sending i2c commands.
- */
-struct atmel_ecc_i2c_client_priv {
- struct i2c_client *client;
- struct list_head i2c_client_list_node;
- struct mutex lock;
- u8 wake_token[WAKE_TOKEN_MAX_SIZE];
- size_t wake_token_sz;
- atomic_t tfm_count ____cacheline_aligned;
-};
-
-/**
* atmel_ecdh_ctx - transformation context
* @client : pointer to i2c client device
* @fallback : used for unsupported curves or when user wants to use its own
@@ -80,188 +47,12 @@ struct atmel_ecdh_ctx {
bool do_fallback;
};
-/**
- * atmel_ecc_work_data - data structure representing the work
- * @ctx : transformation context.
- * @cbk : pointer to a callback function to be invoked upon completion of this
- * request. This has the form:
- * callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status)
- * where:
- * @work_data: data structure representing the work
- * @areq : optional pointer to an argument passed with the original
- * request.
- * @status : status returned from the i2c client device or i2c error.
- * @areq: optional pointer to a user argument for use at callback time.
- * @work: describes the task to be executed.
- * @cmd : structure used for communicating with the device.
- */
-struct atmel_ecc_work_data {
- struct atmel_ecdh_ctx *ctx;
- void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq,
- int status);
- void *areq;
- struct work_struct work;
- struct atmel_ecc_cmd cmd;
-};
-
-static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len)
-{
- return cpu_to_le16(bitrev16(crc16(crc, buffer, len)));
-}
-
-/**
- * atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
- * CRC16 verification of the count, opcode, param1, param2 and data bytes.
- * The checksum is saved in little-endian format in the least significant
- * two bytes of the command. CRC polynomial is 0x8005 and the initial register
- * value should be zero.
- *
- * @cmd : structure used for communicating with the device.
- */
-static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd)
-{
- u8 *data = &cmd->count;
- size_t len = cmd->count - CRC_SIZE;
- u16 *crc16 = (u16 *)(data + len);
-
- *crc16 = atmel_ecc_crc16(0, data, len);
-}
-
-static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd)
-{
- cmd->word_addr = COMMAND;
- cmd->opcode = OPCODE_READ;
- /*
- * Read the word from Configuration zone that contains the lock bytes
- * (UserExtra, Selector, LockValue, LockConfig).
- */
- cmd->param1 = CONFIG_ZONE;
- cmd->param2 = DEVICE_LOCK_ADDR;
- cmd->count = READ_COUNT;
-
- atmel_ecc_checksum(cmd);
-
- cmd->msecs = MAX_EXEC_TIME_READ;
- cmd->rxsize = READ_RSP_SIZE;
-}
-
-static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid)
-{
- cmd->word_addr = COMMAND;
- cmd->count = GENKEY_COUNT;
- cmd->opcode = OPCODE_GENKEY;
- cmd->param1 = GENKEY_MODE_PRIVATE;
- /* a random private key will be generated and stored in slot keyID */
- cmd->param2 = cpu_to_le16(keyid);
-
- atmel_ecc_checksum(cmd);
-
- cmd->msecs = MAX_EXEC_TIME_GENKEY;
- cmd->rxsize = GENKEY_RSP_SIZE;
-}
-
-static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
- struct scatterlist *pubkey)
-{
- size_t copied;
-
- cmd->word_addr = COMMAND;
- cmd->count = ECDH_COUNT;
- cmd->opcode = OPCODE_ECDH;
- cmd->param1 = ECDH_PREFIX_MODE;
- /* private key slot */
- cmd->param2 = cpu_to_le16(DATA_SLOT_2);
-
- /*
- * The device only supports NIST P256 ECC keys. The public key size will
- * always be the same. Use a macro for the key size to avoid unnecessary
- * computations.
- */
- copied = sg_copy_to_buffer(pubkey,
- sg_nents_for_len(pubkey,
- ATMEL_ECC_PUBKEY_SIZE),
- cmd->data, ATMEL_ECC_PUBKEY_SIZE);
- if (copied != ATMEL_ECC_PUBKEY_SIZE)
- return -EINVAL;
-
- atmel_ecc_checksum(cmd);
-
- cmd->msecs = MAX_EXEC_TIME_ECDH;
- cmd->rxsize = ECDH_RSP_SIZE;
-
- return 0;
-}
-
-/*
- * After wake and after execution of a command, there will be error, status, or
- * result bytes in the device's output register that can be retrieved by the
- * system. When the length of that group is four bytes, the codes returned are
- * detailed in error_list.
- */
-static int atmel_ecc_status(struct device *dev, u8 *status)
-{
- size_t err_list_len = ARRAY_SIZE(error_list);
- int i;
- u8 err_id = status[1];
-
- if (*status != STATUS_SIZE)
- return 0;
-
- if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
- return 0;
-
- for (i = 0; i < err_list_len; i++)
- if (error_list[i].value == err_id)
- break;
-
- /* if err_id is not in the error_list then ignore it */
- if (i != err_list_len) {
- dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
- return err_id;
- }
-
- return 0;
-}
-
-static int atmel_ecc_wakeup(struct i2c_client *client)
-{
- struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
- u8 status[STATUS_RSP_SIZE];
- int ret;
-
- /*
- * The device ignores any levels or transitions on the SCL pin when the
- * device is idle, asleep or during waking up. Don't check for error
- * when waking up the device.
- */
- i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
-
- /*
- * Wait to wake the device. Typical execution times for ecdh and genkey
- * are around tens of milliseconds. Delta is chosen to 50 microseconds.
- */
- usleep_range(TWHI_MIN, TWHI_MAX);
-
- ret = i2c_master_recv(client, status, STATUS_SIZE);
- if (ret < 0)
- return ret;
-
- return atmel_ecc_status(&client->dev, status);
-}
-
-static int atmel_ecc_sleep(struct i2c_client *client)
-{
- u8 sleep = SLEEP_TOKEN;
-
- return i2c_master_send(client, &sleep, 1);
-}
-
-static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
+static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
int status)
{
struct kpp_request *req = areq;
struct atmel_ecdh_ctx *ctx = work_data->ctx;
- struct atmel_ecc_cmd *cmd = &work_data->cmd;
+ struct atmel_i2c_cmd *cmd = &work_data->cmd;
size_t copied, n_sz;
if (status)
@@ -282,82 +73,6 @@ free_work_data:
kpp_request_complete(req, status);
}
-/*
- * atmel_ecc_send_receive() - send a command to the device and receive its
- * response.
- * @client: i2c client device
- * @cmd : structure used to communicate with the device
- *
- * After the device receives a Wake token, a watchdog counter starts within the
- * device. After the watchdog timer expires, the device enters sleep mode
- * regardless of whether some I/O transmission or command execution is in
- * progress. If a command is attempted when insufficient time remains prior to
- * watchdog timer execution, the device will return the watchdog timeout error
- * code without attempting to execute the command. There is no way to reset the
- * counter other than to put the device into sleep or idle mode and then
- * wake it up again.
- */
-static int atmel_ecc_send_receive(struct i2c_client *client,
- struct atmel_ecc_cmd *cmd)
-{
- struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
- int ret;
-
- mutex_lock(&i2c_priv->lock);
-
- ret = atmel_ecc_wakeup(client);
- if (ret)
- goto err;
-
- /* send the command */
- ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
- if (ret < 0)
- goto err;
-
- /* delay the appropriate amount of time for command to execute */
- msleep(cmd->msecs);
-
- /* receive the response */
- ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
- if (ret < 0)
- goto err;
-
- /* put the device into low-power mode */
- ret = atmel_ecc_sleep(client);
- if (ret < 0)
- goto err;
-
- mutex_unlock(&i2c_priv->lock);
- return atmel_ecc_status(&client->dev, cmd->data);
-err:
- mutex_unlock(&i2c_priv->lock);
- return ret;
-}
-
-static void atmel_ecc_work_handler(struct work_struct *work)
-{
- struct atmel_ecc_work_data *work_data =
- container_of(work, struct atmel_ecc_work_data, work);
- struct atmel_ecc_cmd *cmd = &work_data->cmd;
- struct i2c_client *client = work_data->ctx->client;
- int status;
-
- status = atmel_ecc_send_receive(client, cmd);
- work_data->cbk(work_data, work_data->areq, status);
-}
-
-static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data,
- void (*cbk)(struct atmel_ecc_work_data *work_data,
- void *areq, int status),
- void *areq)
-{
- work_data->cbk = (void *)cbk;
- work_data->areq = areq;
-
- INIT_WORK(&work_data->work, atmel_ecc_work_handler);
- schedule_work(&work_data->work);
-}
-
static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
{
if (curve_id == ECC_CURVE_NIST_P256)
@@ -374,7 +89,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
- struct atmel_ecc_cmd *cmd;
+ struct atmel_i2c_cmd *cmd;
void *public_key;
struct ecdh params;
int ret = -ENOMEM;
@@ -412,9 +127,9 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
ctx->do_fallback = false;
ctx->curve_id = params.curve_id;
- atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2);
+ atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2);
- ret = atmel_ecc_send_receive(ctx->client, cmd);
+ ret = atmel_i2c_send_receive(ctx->client, cmd);
if (ret)
goto free_public_key;
@@ -444,6 +159,9 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
return crypto_kpp_generate_public_key(req);
}
+ if (!ctx->public_key)
+ return -EINVAL;
+
/* might want less than we've got */
nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
@@ -461,7 +179,7 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
- struct atmel_ecc_work_data *work_data;
+ struct atmel_i2c_work_data *work_data;
gfp_t gfp;
int ret;
@@ -482,12 +200,13 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
return -ENOMEM;
work_data->ctx = ctx;
+ work_data->client = ctx->client;
- ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src);
+ ret = atmel_i2c_init_ecdh_cmd(&work_data->cmd, req->src);
if (ret)
goto free_work_data;
- atmel_ecc_enqueue(work_data, atmel_ecdh_done, req);
+ atmel_i2c_enqueue(work_data, atmel_ecdh_done, req);
return -EINPROGRESS;
@@ -498,7 +217,7 @@ free_work_data:
static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
{
- struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
+ struct atmel_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
struct i2c_client *client = ERR_PTR(-ENODEV);
int min_tfm_cnt = INT_MAX;
int tfm_cnt;
@@ -533,7 +252,7 @@ static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
static void atmel_ecc_i2c_client_free(struct i2c_client *client)
{
- struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
atomic_dec(&i2c_priv->tfm_count);
}
@@ -604,96 +323,18 @@ static struct kpp_alg atmel_ecdh = {
},
};
-static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate)
-{
- u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
-
- /* return the size of the wake_token in bytes */
- return DIV_ROUND_UP(no_of_bits, 8);
-}
-
-static int device_sanity_check(struct i2c_client *client)
-{
- struct atmel_ecc_cmd *cmd;
- int ret;
-
- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- atmel_ecc_init_read_cmd(cmd);
-
- ret = atmel_ecc_send_receive(client, cmd);
- if (ret)
- goto free_cmd;
-
- /*
- * It is vital that the Configuration, Data and OTP zones be locked
- * prior to release into the field of the system containing the device.
- * Failure to lock these zones may permit modification of any secret
- * keys and may lead to other security problems.
- */
- if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
- dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
- ret = -ENOTSUPP;
- }
-
- /* fall through */
-free_cmd:
- kfree(cmd);
- return ret;
-}
-
static int atmel_ecc_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct atmel_ecc_i2c_client_priv *i2c_priv;
- struct device *dev = &client->dev;
+ struct atmel_i2c_client_priv *i2c_priv;
int ret;
- u32 bus_clk_rate;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(dev, "I2C_FUNC_I2C not supported\n");
- return -ENODEV;
- }
- ret = of_property_read_u32(client->adapter->dev.of_node,
- "clock-frequency", &bus_clk_rate);
- if (ret) {
- dev_err(dev, "of: failed to read clock-frequency property\n");
- return ret;
- }
-
- if (bus_clk_rate > 1000000L) {
- dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
- bus_clk_rate);
- return -EINVAL;
- }
-
- i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
- if (!i2c_priv)
- return -ENOMEM;
-
- i2c_priv->client = client;
- mutex_init(&i2c_priv->lock);
-
- /*
- * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
- * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
- * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
- */
- i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate);
-
- memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
-
- atomic_set(&i2c_priv->tfm_count, 0);
-
- i2c_set_clientdata(client, i2c_priv);
-
- ret = device_sanity_check(client);
+ ret = atmel_i2c_probe(client, id);
if (ret)
return ret;
+ i2c_priv = i2c_get_clientdata(client);
+
spin_lock(&driver_data.i2c_list_lock);
list_add_tail(&i2c_priv->i2c_client_list_node,
&driver_data.i2c_client_list);
@@ -705,10 +346,10 @@ static int atmel_ecc_probe(struct i2c_client *client,
list_del(&i2c_priv->i2c_client_list_node);
spin_unlock(&driver_data.i2c_list_lock);
- dev_err(dev, "%s alg registration failed\n",
+ dev_err(&client->dev, "%s alg registration failed\n",
atmel_ecdh.base.cra_driver_name);
} else {
- dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n");
+ dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n");
}
return ret;
@@ -716,7 +357,7 @@ static int atmel_ecc_probe(struct i2c_client *client,
static int atmel_ecc_remove(struct i2c_client *client)
{
- struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
/* Return EBUSY if i2c client already allocated. */
if (atomic_read(&i2c_priv->tfm_count)) {
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h
deleted file mode 100644
index 643a3b947338..000000000000
--- a/drivers/crypto/atmel-ecc.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
- */
-
-#ifndef __ATMEL_ECC_H__
-#define __ATMEL_ECC_H__
-
-#define ATMEL_ECC_PRIORITY 300
-
-#define COMMAND 0x03 /* packet function */
-#define SLEEP_TOKEN 0x01
-#define WAKE_TOKEN_MAX_SIZE 8
-
-/* Definitions of Data and Command sizes */
-#define WORD_ADDR_SIZE 1
-#define COUNT_SIZE 1
-#define CRC_SIZE 2
-#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
-
-/* size in bytes of the n prime */
-#define ATMEL_ECC_NIST_P256_N_SIZE 32
-#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
-
-#define STATUS_RSP_SIZE 4
-#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
-#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
- CMD_OVERHEAD_SIZE)
-#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
-#define MAX_RSP_SIZE GENKEY_RSP_SIZE
-
-/**
- * atmel_ecc_cmd - structure used for communicating with the device.
- * @word_addr: indicates the function of the packet sent to the device. This
- * byte should have a value of COMMAND for normal operation.
- * @count : number of bytes to be transferred to (or from) the device.
- * @opcode : the command code.
- * @param1 : the first parameter; always present.
- * @param2 : the second parameter; always present.
- * @data : optional remaining input data. Includes a 2-byte CRC.
- * @rxsize : size of the data received from i2c client.
- * @msecs : command execution time in milliseconds
- */
-struct atmel_ecc_cmd {
- u8 word_addr;
- u8 count;
- u8 opcode;
- u8 param1;
- u16 param2;
- u8 data[MAX_RSP_SIZE];
- u8 msecs;
- u16 rxsize;
-} __packed;
-
-/* Status/Error codes */
-#define STATUS_SIZE 0x04
-#define STATUS_NOERR 0x00
-#define STATUS_WAKE_SUCCESSFUL 0x11
-
-static const struct {
- u8 value;
- const char *error_text;
-} error_list[] = {
- { 0x01, "CheckMac or Verify miscompare" },
- { 0x03, "Parse Error" },
- { 0x05, "ECC Fault" },
- { 0x0F, "Execution Error" },
- { 0xEE, "Watchdog about to expire" },
- { 0xFF, "CRC or other communication error" },
-};
-
-/* Definitions for eeprom organization */
-#define CONFIG_ZONE 0
-
-/* Definitions for Indexes common to all commands */
-#define RSP_DATA_IDX 1 /* buffer index of data in response */
-#define DATA_SLOT_2 2 /* used for ECDH private key */
-
-/* Definitions for the device lock state */
-#define DEVICE_LOCK_ADDR 0x15
-#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
-#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
-
-/*
- * Wake High delay to data communication (microseconds). SDA should be stable
- * high for this entire duration.
- */
-#define TWHI_MIN 1500
-#define TWHI_MAX 1550
-
-/* Wake Low duration */
-#define TWLO_USEC 60
-
-/* Command execution time (milliseconds) */
-#define MAX_EXEC_TIME_ECDH 58
-#define MAX_EXEC_TIME_GENKEY 115
-#define MAX_EXEC_TIME_READ 1
-
-/* Command opcode */
-#define OPCODE_ECDH 0x43
-#define OPCODE_GENKEY 0x40
-#define OPCODE_READ 0x02
-
-/* Definitions for the READ Command */
-#define READ_COUNT 7
-
-/* Definitions for the GenKey Command */
-#define GENKEY_COUNT 7
-#define GENKEY_MODE_PRIVATE 0x04
-
-/* Definitions for the ECDH Command */
-#define ECDH_COUNT 71
-#define ECDH_PREFIX_MODE 0x00
-
-#endif /* __ATMEL_ECC_H__ */
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
new file mode 100644
index 000000000000..dc876fab2882
--- /dev/null
+++ b/drivers/crypto/atmel-i2c.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip / Atmel ECC (I2C) driver.
+ *
+ * Copyright (c) 2017, Microchip Technology Inc.
+ * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ */
+
+#include <linux/bitrev.h>
+#include <linux/crc16.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "atmel-i2c.h"
+
+/**
+ * atmel_i2c_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
+ * CRC16 verification of the count, opcode, param1, param2 and data bytes.
+ * The checksum is saved in little-endian format in the least significant
+ * two bytes of the command. CRC polynomial is 0x8005 and the initial register
+ * value should be zero.
+ *
+ * @cmd : structure used for communicating with the device.
+ */
+static void atmel_i2c_checksum(struct atmel_i2c_cmd *cmd)
+{
+ u8 *data = &cmd->count;
+ size_t len = cmd->count - CRC_SIZE;
+ __le16 *__crc16 = (__le16 *)(data + len);
+
+ *__crc16 = cpu_to_le16(bitrev16(crc16(0, data, len)));
+}
+
+void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd)
+{
+ cmd->word_addr = COMMAND;
+ cmd->opcode = OPCODE_READ;
+ /*
+ * Read the word from Configuration zone that contains the lock bytes
+ * (UserExtra, Selector, LockValue, LockConfig).
+ */
+ cmd->param1 = CONFIG_ZONE;
+ cmd->param2 = cpu_to_le16(DEVICE_LOCK_ADDR);
+ cmd->count = READ_COUNT;
+
+ atmel_i2c_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_READ;
+ cmd->rxsize = READ_RSP_SIZE;
+}
+EXPORT_SYMBOL(atmel_i2c_init_read_cmd);
+
+void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd)
+{
+ cmd->word_addr = COMMAND;
+ cmd->opcode = OPCODE_RANDOM;
+ cmd->param1 = 0;
+ cmd->param2 = 0;
+ cmd->count = RANDOM_COUNT;
+
+ atmel_i2c_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_RANDOM;
+ cmd->rxsize = RANDOM_RSP_SIZE;
+}
+EXPORT_SYMBOL(atmel_i2c_init_random_cmd);
+
+void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid)
+{
+ cmd->word_addr = COMMAND;
+ cmd->count = GENKEY_COUNT;
+ cmd->opcode = OPCODE_GENKEY;
+ cmd->param1 = GENKEY_MODE_PRIVATE;
+ /* a random private key will be generated and stored in slot keyID */
+ cmd->param2 = cpu_to_le16(keyid);
+
+ atmel_i2c_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_GENKEY;
+ cmd->rxsize = GENKEY_RSP_SIZE;
+}
+EXPORT_SYMBOL(atmel_i2c_init_genkey_cmd);
+
+int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd,
+ struct scatterlist *pubkey)
+{
+ size_t copied;
+
+ cmd->word_addr = COMMAND;
+ cmd->count = ECDH_COUNT;
+ cmd->opcode = OPCODE_ECDH;
+ cmd->param1 = ECDH_PREFIX_MODE;
+ /* private key slot */
+ cmd->param2 = cpu_to_le16(DATA_SLOT_2);
+
+ /*
+ * The device only supports NIST P256 ECC keys. The public key size will
+ * always be the same. Use a macro for the key size to avoid unnecessary
+ * computations.
+ */
+ copied = sg_copy_to_buffer(pubkey,
+ sg_nents_for_len(pubkey,
+ ATMEL_ECC_PUBKEY_SIZE),
+ cmd->data, ATMEL_ECC_PUBKEY_SIZE);
+ if (copied != ATMEL_ECC_PUBKEY_SIZE)
+ return -EINVAL;
+
+ atmel_i2c_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_ECDH;
+ cmd->rxsize = ECDH_RSP_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(atmel_i2c_init_ecdh_cmd);
+
+/*
+ * After wake and after execution of a command, there will be error, status, or
+ * result bytes in the device's output register that can be retrieved by the
+ * system. When the length of that group is four bytes, the codes returned are
+ * detailed in error_list.
+ */
+static int atmel_i2c_status(struct device *dev, u8 *status)
+{
+ size_t err_list_len = ARRAY_SIZE(error_list);
+ int i;
+ u8 err_id = status[1];
+
+ if (*status != STATUS_SIZE)
+ return 0;
+
+ if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
+ return 0;
+
+ for (i = 0; i < err_list_len; i++)
+ if (error_list[i].value == err_id)
+ break;
+
+ /* if err_id is not in the error_list then ignore it */
+ if (i != err_list_len) {
+ dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
+ return err_id;
+ }
+
+ return 0;
+}
+
+static int atmel_i2c_wakeup(struct i2c_client *client)
+{
+ struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ u8 status[STATUS_RSP_SIZE];
+ int ret;
+
+ /*
+ * The device ignores any levels or transitions on the SCL pin when the
+ * device is idle, asleep or during waking up. Don't check for error
+ * when waking up the device.
+ */
+ i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
+
+ /*
+ * Wait to wake the device. Typical execution times for ecdh and genkey
+ * are around tens of milliseconds. Delta is chosen to 50 microseconds.
+ */
+ usleep_range(TWHI_MIN, TWHI_MAX);
+
+ ret = i2c_master_recv(client, status, STATUS_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return atmel_i2c_status(&client->dev, status);
+}
+
+static int atmel_i2c_sleep(struct i2c_client *client)
+{
+ u8 sleep = SLEEP_TOKEN;
+
+ return i2c_master_send(client, &sleep, 1);
+}
+
+/*
+ * atmel_i2c_send_receive() - send a command to the device and receive its
+ * response.
+ * @client: i2c client device
+ * @cmd : structure used to communicate with the device
+ *
+ * After the device receives a Wake token, a watchdog counter starts within the
+ * device. After the watchdog timer expires, the device enters sleep mode
+ * regardless of whether some I/O transmission or command execution is in
+ * progress. If a command is attempted when insufficient time remains prior to
+ * watchdog timer execution, the device will return the watchdog timeout error
+ * code without attempting to execute the command. There is no way to reset the
+ * counter other than to put the device into sleep or idle mode and then
+ * wake it up again.
+ */
+int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd)
+{
+ struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ int ret;
+
+ mutex_lock(&i2c_priv->lock);
+
+ ret = atmel_i2c_wakeup(client);
+ if (ret)
+ goto err;
+
+ /* send the command */
+ ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
+ if (ret < 0)
+ goto err;
+
+ /* delay the appropriate amount of time for command to execute */
+ msleep(cmd->msecs);
+
+ /* receive the response */
+ ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
+ if (ret < 0)
+ goto err;
+
+ /* put the device into low-power mode */
+ ret = atmel_i2c_sleep(client);
+ if (ret < 0)
+ goto err;
+
+ mutex_unlock(&i2c_priv->lock);
+ return atmel_i2c_status(&client->dev, cmd->data);
+err:
+ mutex_unlock(&i2c_priv->lock);
+ return ret;
+}
+EXPORT_SYMBOL(atmel_i2c_send_receive);
+
+static void atmel_i2c_work_handler(struct work_struct *work)
+{
+ struct atmel_i2c_work_data *work_data =
+ container_of(work, struct atmel_i2c_work_data, work);
+ struct atmel_i2c_cmd *cmd = &work_data->cmd;
+ struct i2c_client *client = work_data->client;
+ int status;
+
+ status = atmel_i2c_send_receive(client, cmd);
+ work_data->cbk(work_data, work_data->areq, status);
+}
+
+void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
+ void (*cbk)(struct atmel_i2c_work_data *work_data,
+ void *areq, int status),
+ void *areq)
+{
+ work_data->cbk = (void *)cbk;
+ work_data->areq = areq;
+
+ INIT_WORK(&work_data->work, atmel_i2c_work_handler);
+ schedule_work(&work_data->work);
+}
+EXPORT_SYMBOL(atmel_i2c_enqueue);
+
+static inline size_t atmel_i2c_wake_token_sz(u32 bus_clk_rate)
+{
+ u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
+
+ /* return the size of the wake_token in bytes */
+ return DIV_ROUND_UP(no_of_bits, 8);
+}
+
+static int device_sanity_check(struct i2c_client *client)
+{
+ struct atmel_i2c_cmd *cmd;
+ int ret;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ atmel_i2c_init_read_cmd(cmd);
+
+ ret = atmel_i2c_send_receive(client, cmd);
+ if (ret)
+ goto free_cmd;
+
+ /*
+ * It is vital that the Configuration, Data and OTP zones be locked
+ * prior to release into the field of the system containing the device.
+ * Failure to lock these zones may permit modification of any secret
+ * keys and may lead to other security problems.
+ */
+ if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
+ dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
+ ret = -ENOTSUPP;
+ }
+
+ /* fall through */
+free_cmd:
+ kfree(cmd);
+ return ret;
+}
+
+int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct atmel_i2c_client_priv *i2c_priv;
+ struct device *dev = &client->dev;
+ int ret;
+ u32 bus_clk_rate;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "I2C_FUNC_I2C not supported\n");
+ return -ENODEV;
+ }
+
+ bus_clk_rate = i2c_acpi_find_bus_speed(&client->adapter->dev);
+ if (!bus_clk_rate) {
+ ret = device_property_read_u32(&client->adapter->dev,
+ "clock-frequency", &bus_clk_rate);
+ if (ret) {
+ dev_err(dev, "failed to read clock-frequency property\n");
+ return ret;
+ }
+ }
+
+ if (bus_clk_rate > 1000000L) {
+ dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
+ bus_clk_rate);
+ return -EINVAL;
+ }
+
+ i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
+ if (!i2c_priv)
+ return -ENOMEM;
+
+ i2c_priv->client = client;
+ mutex_init(&i2c_priv->lock);
+
+ /*
+ * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
+ * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
+ * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
+ */
+ i2c_priv->wake_token_sz = atmel_i2c_wake_token_sz(bus_clk_rate);
+
+ memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
+
+ atomic_set(&i2c_priv->tfm_count, 0);
+
+ i2c_set_clientdata(client, i2c_priv);
+
+ ret = device_sanity_check(client);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(atmel_i2c_probe);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h
new file mode 100644
index 000000000000..21860b99c3e3
--- /dev/null
+++ b/drivers/crypto/atmel-i2c.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, Microchip Technology Inc.
+ * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ */
+
+#ifndef __ATMEL_I2C_H__
+#define __ATMEL_I2C_H__
+
+#include <linux/hw_random.h>
+#include <linux/types.h>
+
+#define ATMEL_ECC_PRIORITY 300
+
+#define COMMAND 0x03 /* packet function */
+#define SLEEP_TOKEN 0x01
+#define WAKE_TOKEN_MAX_SIZE 8
+
+/* Definitions of Data and Command sizes */
+#define WORD_ADDR_SIZE 1
+#define COUNT_SIZE 1
+#define CRC_SIZE 2
+#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
+
+/* size in bytes of the n prime */
+#define ATMEL_ECC_NIST_P256_N_SIZE 32
+#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
+
+#define STATUS_RSP_SIZE 4
+#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
+#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
+ CMD_OVERHEAD_SIZE)
+#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
+#define RANDOM_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
+#define MAX_RSP_SIZE GENKEY_RSP_SIZE
+
+/**
+ * atmel_i2c_cmd - structure used for communicating with the device.
+ * @word_addr: indicates the function of the packet sent to the device. This
+ * byte should have a value of COMMAND for normal operation.
+ * @count : number of bytes to be transferred to (or from) the device.
+ * @opcode : the command code.
+ * @param1 : the first parameter; always present.
+ * @param2 : the second parameter; always present.
+ * @data : optional remaining input data. Includes a 2-byte CRC.
+ * @rxsize : size of the data received from i2c client.
+ * @msecs : command execution time in milliseconds
+ */
+struct atmel_i2c_cmd {
+ u8 word_addr;
+ u8 count;
+ u8 opcode;
+ u8 param1;
+ __le16 param2;
+ u8 data[MAX_RSP_SIZE];
+ u8 msecs;
+ u16 rxsize;
+} __packed;
+
+/* Status/Error codes */
+#define STATUS_SIZE 0x04
+#define STATUS_NOERR 0x00
+#define STATUS_WAKE_SUCCESSFUL 0x11
+
+static const struct {
+ u8 value;
+ const char *error_text;
+} error_list[] = {
+ { 0x01, "CheckMac or Verify miscompare" },
+ { 0x03, "Parse Error" },
+ { 0x05, "ECC Fault" },
+ { 0x0F, "Execution Error" },
+ { 0xEE, "Watchdog about to expire" },
+ { 0xFF, "CRC or other communication error" },
+};
+
+/* Definitions for eeprom organization */
+#define CONFIG_ZONE 0
+
+/* Definitions for Indexes common to all commands */
+#define RSP_DATA_IDX 1 /* buffer index of data in response */
+#define DATA_SLOT_2 2 /* used for ECDH private key */
+
+/* Definitions for the device lock state */
+#define DEVICE_LOCK_ADDR 0x15
+#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
+#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
+
+/*
+ * Wake High delay to data communication (microseconds). SDA should be stable
+ * high for this entire duration.
+ */
+#define TWHI_MIN 1500
+#define TWHI_MAX 1550
+
+/* Wake Low duration */
+#define TWLO_USEC 60
+
+/* Command execution time (milliseconds) */
+#define MAX_EXEC_TIME_ECDH 58
+#define MAX_EXEC_TIME_GENKEY 115
+#define MAX_EXEC_TIME_READ 1
+#define MAX_EXEC_TIME_RANDOM 50
+
+/* Command opcode */
+#define OPCODE_ECDH 0x43
+#define OPCODE_GENKEY 0x40
+#define OPCODE_READ 0x02
+#define OPCODE_RANDOM 0x1b
+
+/* Definitions for the READ Command */
+#define READ_COUNT 7
+
+/* Definitions for the RANDOM Command */
+#define RANDOM_COUNT 7
+
+/* Definitions for the GenKey Command */
+#define GENKEY_COUNT 7
+#define GENKEY_MODE_PRIVATE 0x04
+
+/* Definitions for the ECDH Command */
+#define ECDH_COUNT 71
+#define ECDH_PREFIX_MODE 0x00
+
+/* Used for binding tfm objects to i2c clients. */
+struct atmel_ecc_driver_data {
+ struct list_head i2c_client_list;
+ spinlock_t i2c_list_lock;
+} ____cacheline_aligned;
+
+/**
+ * atmel_i2c_client_priv - i2c_client private data
+ * @client : pointer to i2c client device
+ * @i2c_client_list_node: part of i2c_client_list
+ * @lock : lock for sending i2c commands
+ * @wake_token : wake token array of zeros
+ * @wake_token_sz : size in bytes of the wake_token
+ * @tfm_count : number of active crypto transformations on i2c client
+ *
+ * Reads and writes from/to the i2c client are sequential. The first byte
+ * transmitted to the device is treated as the byte size. Any attempt to send
+ * more than this number of bytes will cause the device to not ACK those bytes.
+ * After the host writes a single command byte to the input buffer, reads are
+ * prohibited until after the device completes command execution. Use a mutex
+ * when sending i2c commands.
+ */
+struct atmel_i2c_client_priv {
+ struct i2c_client *client;
+ struct list_head i2c_client_list_node;
+ struct mutex lock;
+ u8 wake_token[WAKE_TOKEN_MAX_SIZE];
+ size_t wake_token_sz;
+ atomic_t tfm_count ____cacheline_aligned;
+ struct hwrng hwrng;
+};
+
+/**
+ * atmel_i2c_work_data - data structure representing the work
+ * @ctx : transformation context.
+ * @cbk : pointer to a callback function to be invoked upon completion of this
+ * request. This has the form:
+ * callback(struct atmel_i2c_work_data *work_data, void *areq, u8 status)
+ * where:
+ * @work_data: data structure representing the work
+ * @areq : optional pointer to an argument passed with the original
+ * request.
+ * @status : status returned from the i2c client device or i2c error.
+ * @areq: optional pointer to a user argument for use at callback time.
+ * @work: describes the task to be executed.
+ * @cmd : structure used for communicating with the device.
+ */
+struct atmel_i2c_work_data {
+ void *ctx;
+ struct i2c_client *client;
+ void (*cbk)(struct atmel_i2c_work_data *work_data, void *areq,
+ int status);
+ void *areq;
+ struct work_struct work;
+ struct atmel_i2c_cmd cmd;
+};
+
+int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id);
+
+void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
+ void (*cbk)(struct atmel_i2c_work_data *work_data,
+ void *areq, int status),
+ void *areq);
+
+int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd);
+
+void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd);
+void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd);
+void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid);
+int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd,
+ struct scatterlist *pubkey);
+
+#endif /* __ATMEL_I2C_H__ */
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
new file mode 100644
index 000000000000..ea0d2068ea4f
--- /dev/null
+++ b/drivers/crypto/atmel-sha204a.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip / Atmel SHA204A (I2C) driver.
+ *
+ * Copyright (c) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "atmel-i2c.h"
+
+static void atmel_sha204a_rng_done(struct atmel_i2c_work_data *work_data,
+ void *areq, int status)
+{
+ struct atmel_i2c_client_priv *i2c_priv = work_data->ctx;
+ struct hwrng *rng = areq;
+
+ if (status)
+ dev_warn_ratelimited(&i2c_priv->client->dev,
+ "i2c transaction failed (%d)\n",
+ status);
+
+ rng->priv = (unsigned long)work_data;
+ atomic_dec(&i2c_priv->tfm_count);
+}
+
+static int atmel_sha204a_rng_read_nonblocking(struct hwrng *rng, void *data,
+ size_t max)
+{
+ struct atmel_i2c_client_priv *i2c_priv;
+ struct atmel_i2c_work_data *work_data;
+
+ i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng);
+
+ /* keep maximum 1 asynchronous read in flight at any time */
+ if (!atomic_add_unless(&i2c_priv->tfm_count, 1, 1))
+ return 0;
+
+ if (rng->priv) {
+ work_data = (struct atmel_i2c_work_data *)rng->priv;
+ max = min(sizeof(work_data->cmd.data), max);
+ memcpy(data, &work_data->cmd.data, max);
+ rng->priv = 0;
+ } else {
+ work_data = kmalloc(sizeof(*work_data), GFP_ATOMIC);
+ if (!work_data)
+ return -ENOMEM;
+
+ work_data->ctx = i2c_priv;
+ work_data->client = i2c_priv->client;
+
+ max = 0;
+ }
+
+ atmel_i2c_init_random_cmd(&work_data->cmd);
+ atmel_i2c_enqueue(work_data, atmel_sha204a_rng_done, rng);
+
+ return max;
+}
+
+static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
+{
+ struct atmel_i2c_client_priv *i2c_priv;
+ struct atmel_i2c_cmd cmd;
+ int ret;
+
+ if (!wait)
+ return atmel_sha204a_rng_read_nonblocking(rng, data, max);
+
+ i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng);
+
+ atmel_i2c_init_random_cmd(&cmd);
+
+ ret = atmel_i2c_send_receive(i2c_priv->client, &cmd);
+ if (ret)
+ return ret;
+
+ max = min(sizeof(cmd.data), max);
+ memcpy(data, cmd.data, max);
+
+ return max;
+}
+
+static int atmel_sha204a_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct atmel_i2c_client_priv *i2c_priv;
+ int ret;
+
+ ret = atmel_i2c_probe(client, id);
+ if (ret)
+ return ret;
+
+ i2c_priv = i2c_get_clientdata(client);
+
+ memset(&i2c_priv->hwrng, 0, sizeof(i2c_priv->hwrng));
+
+ i2c_priv->hwrng.name = dev_name(&client->dev);
+ i2c_priv->hwrng.read = atmel_sha204a_rng_read;
+ i2c_priv->hwrng.quality = 1024;
+
+ ret = hwrng_register(&i2c_priv->hwrng);
+ if (ret)
+ dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
+
+ return ret;
+}
+
+static int atmel_sha204a_remove(struct i2c_client *client)
+{
+ struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+
+ if (atomic_read(&i2c_priv->tfm_count)) {
+ dev_err(&client->dev, "Device is busy\n");
+ return -EBUSY;
+ }
+
+ if (i2c_priv->hwrng.priv)
+ kfree((void *)i2c_priv->hwrng.priv);
+ hwrng_unregister(&i2c_priv->hwrng);
+
+ return 0;
+}
+
+static const struct of_device_id atmel_sha204a_dt_ids[] = {
+ { .compatible = "atmel,atsha204a", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids);
+
+static const struct i2c_device_id atmel_sha204a_id[] = {
+ { "atsha204a", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id);
+
+static struct i2c_driver atmel_sha204a_driver = {
+ .probe = atmel_sha204a_probe,
+ .remove = atmel_sha204a_remove,
+ .id_table = atmel_sha204a_id,
+
+ .driver.name = "atmel-sha204a",
+ .driver.of_match_table = of_match_ptr(atmel_sha204a_dt_ids),
+};
+
+static int __init atmel_sha204a_init(void)
+{
+ return i2c_add_driver(&atmel_sha204a_driver);
+}
+
+static void __exit atmel_sha204a_exit(void)
+{
+ flush_scheduled_work();
+ i2c_del_driver(&atmel_sha204a_driver);
+}
+
+module_init(atmel_sha204a_init);
+module_exit(atmel_sha204a_exit);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 18410c9e7b29..869602fcfd96 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
* 0x70 - ring 2
* 0x78 - ring 3
*/
-char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
+static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
/*
* Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
* is set dynamically after reading SPU type from device tree.
@@ -2083,7 +2083,7 @@ static int __ahash_init(struct ahash_request *req)
* Return: true if incremental hashing is not supported
* false otherwise
*/
-bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
+static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
{
struct spu_hw *spu = &iproc_priv.spu;
@@ -4809,7 +4809,7 @@ static int spu_dt_read(struct platform_device *pdev)
return 0;
}
-int bcm_spu_probe(struct platform_device *pdev)
+static int bcm_spu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spu_hw *spu = &iproc_priv.spu;
@@ -4853,7 +4853,7 @@ failure:
return err;
}
-int bcm_spu_remove(struct platform_device *pdev)
+static int bcm_spu_remove(struct platform_device *pdev)
{
int i;
struct device *dev = &pdev->dev;
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index cb477259a2e2..2add51024575 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -38,21 +38,21 @@ enum spu2_proto_sel {
SPU2_DTLS_AEAD = 10
};
-char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
+static char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
"DES", "3DES"
};
-char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", "XTS",
- "CCM", "GCM"
+static char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB",
+ "XTS", "CCM", "GCM"
};
-char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
+static char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
"Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384",
"SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256",
"SHA3-384", "SHA3-512"
};
-char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
+static char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
"Rabin", "CCM", "GCM", "Reserved"
};
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 577c9844b322..3720ddabb507 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -2,6 +2,12 @@
config CRYPTO_DEV_FSL_CAAM_COMMON
tristate
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ tristate
+
+config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
+ tristate
+
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore platform driver backend"
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
@@ -25,7 +31,7 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
Selecting this will enable printing of various debug
information in the CAAM driver.
-config CRYPTO_DEV_FSL_CAAM_JR
+menuconfig CRYPTO_DEV_FSL_CAAM_JR
tristate "Freescale CAAM Job Ring driver backend"
default y
help
@@ -86,8 +92,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
threshold. Range is 1-65535.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
- tristate "Register algorithm implementations with the Crypto API"
+ bool "Register algorithm implementations with the Crypto API"
default y
+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
@@ -97,13 +104,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
scatterlist crypto API (such as the linux native IPSec
stack) to the SEC4 via job ring.
- To compile this as a module, choose M here: the module
- will be called caamalg.
-
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
- tristate "Queue Interface as Crypto API backend"
+ bool "Queue Interface as Crypto API backend"
depends on FSL_DPAA && NET
default y
+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
help
@@ -114,33 +119,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
assigned to the kernel should also be more than the number of
job rings.
- To compile this as a module, choose M here: the module
- will be called caamalg_qi.
-
config CRYPTO_DEV_FSL_CAAM_AHASH_API
- tristate "Register hash algorithm implementations with Crypto API"
+ bool "Register hash algorithm implementations with Crypto API"
default y
+ select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
select CRYPTO_HASH
help
Selecting this will offload ahash for users of the
scatterlist crypto API to the SEC4 via job ring.
- To compile this as a module, choose M here: the module
- will be called caamhash.
-
config CRYPTO_DEV_FSL_CAAM_PKC_API
- tristate "Register public key cryptography implementations with Crypto API"
+ bool "Register public key cryptography implementations with Crypto API"
default y
select CRYPTO_RSA
help
Selecting this will allow SEC Public key support for RSA.
Supported cryptographic primitives: encryption, decryption,
signature and verification.
- To compile this as a module, choose M here: the module
- will be called caam_pkc.
config CRYPTO_DEV_FSL_CAAM_RNG_API
- tristate "Register caam device for hwrng API"
+ bool "Register caam device for hwrng API"
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -148,9 +146,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
Selecting this will register the SEC4 hardware rng to
the hw_random API for suppying the kernel entropy pool.
- To compile this as a module, choose M here: the module
- will be called caamrng.
-
endif # CRYPTO_DEV_FSL_CAAM_JR
endif # CRYPTO_DEV_FSL_CAAM
@@ -160,6 +155,8 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
depends on FSL_MC_DPIO
depends on NETDEVICES
select CRYPTO_DEV_FSL_CAAM_COMMON
+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
select CRYPTO_BLKCIPHER
select CRYPTO_AUTHENC
select CRYPTO_AEAD
@@ -171,12 +168,3 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
To compile this as a module, choose M here: the module
will be called dpaa2_caam.
-
-config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
- def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
- CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
- CRYPTO_DEV_FSL_DPAA2_CAAM)
-
-config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
- def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
- CRYPTO_DEV_FSL_DPAA2_CAAM)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 7bbfd06a11ff..9ab4e81ea21e 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -11,20 +11,20 @@ ccflags-y += -DVERSION=\"\"
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
-caam-objs := ctrl.o
-caam_jr-objs := jr.o key_gen.o
-caam_pkc-y := caampkc.o pkc_desc.o
+caam-y := ctrl.o
+caam_jr-y := jr.o key_gen.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
+
+caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
ccflags-y += -DCONFIG_CAAM_QI
- caam-objs += qi.o
endif
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index c0ece44f303b..43f18253e5b6 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -77,13 +77,6 @@
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
-#ifdef DEBUG
-/* for print_hex_dumps with line references */
-#define debug(format, arg...) printk(format, arg)
-#else
-#define debug(format, arg...)
-#endif
-
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
@@ -583,13 +576,11 @@ static int aead_setkey(struct crypto_aead *aead,
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
-#ifdef DEBUG
- printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
+ dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
/*
* If DKP is supported, use it in the shared descriptor to generate
@@ -623,11 +614,10 @@ static int aead_setkey(struct crypto_aead *aead,
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
keys.enckeylen, ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad + keys.enckeylen, 1);
-#endif
+
+ print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
@@ -678,10 +668,8 @@ static int gcm_setkey(struct crypto_aead *aead,
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
@@ -699,10 +687,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
if (keylen < 4)
return -EINVAL;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
@@ -725,10 +711,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
if (keylen < 4)
return -EINVAL;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
@@ -757,10 +741,8 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
/*
* AES-CTR needs to load IV in CONTEXT1 reg
* at an offset of 128bits (16bytes)
@@ -916,7 +898,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE);
@@ -949,9 +931,7 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct aead_request *req = context;
struct aead_edesc *edesc;
-#ifdef DEBUG
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
@@ -971,9 +951,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct aead_request *req = context;
struct aead_edesc *edesc;
-#ifdef DEBUG
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
@@ -1001,33 +979,32 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
int ivsize = crypto_skcipher_ivsize(skcipher);
-#ifdef DEBUG
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
- edesc->src_nents > 1 ? 100 : ivsize, 1);
-#endif
- caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
-
skcipher_unmap(jrdev, edesc, req);
/*
* The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block. This is used e.g. by the CTS mode.
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
*/
- if (ivsize)
- scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
- ivsize, ivsize, 0);
+ if (ivsize) {
+ memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
+ ivsize);
+
+ print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ }
+
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
kfree(edesc);
@@ -1039,26 +1016,35 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct skcipher_request *req = context;
struct skcipher_edesc *edesc;
-#ifdef DEBUG
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
int ivsize = crypto_skcipher_ivsize(skcipher);
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
-#endif
- caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ skcipher_unmap(jrdev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
+ */
+ if (ivsize) {
+ memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
+ ivsize);
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ ivsize, 1);
+ }
+
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
skcipher_request_complete(req, err);
@@ -1106,6 +1092,7 @@ static void init_aead_job(struct aead_request *req,
if (unlikely(req->src != req->dst)) {
if (!edesc->mapped_dst_nents) {
dst_dma = 0;
+ out_options = 0;
} else if (edesc->mapped_dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
out_options = 0;
@@ -1249,6 +1236,7 @@ static void init_skcipher_job(struct skcipher_request *req,
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *jrdev = ctx->jrdev;
int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc;
u32 *sh_desc;
@@ -1256,13 +1244,12 @@ static void init_skcipher_job(struct skcipher_request *req,
dma_addr_t src_dma, dst_dma, ptr;
int len, sec4_sg_index = 0;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
- pr_err("asked=%d, cryptlen%d\n",
+ print_hex_dump_debug("presciv@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+ dev_dbg(jrdev, "asked=%d, cryptlen%d\n",
(int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
-#endif
- caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
+
+ caam_dump_sg("src @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
@@ -1285,7 +1272,7 @@ static void init_skcipher_job(struct skcipher_request *req,
if (likely(req->src == req->dst)) {
dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
out_options = in_options;
- } else if (edesc->mapped_dst_nents == 1) {
+ } else if (!ivsize && edesc->mapped_dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
} else {
dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
@@ -1293,7 +1280,7 @@ static void init_skcipher_job(struct skcipher_request *req,
out_options = LDST_SGF;
}
- append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
}
/*
@@ -1309,37 +1296,36 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ int src_len, dst_len = 0;
struct aead_edesc *edesc;
int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
unsigned int authsize = ctx->authsize;
if (unlikely(req->dst != req->src)) {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen);
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen);
+ src_len);
return ERR_PTR(src_nents);
}
- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize :
- (-authsize)));
+ dst_nents = sg_nents_for_len(req->dst, dst_len);
if (unlikely(dst_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : (-authsize)));
+ dst_len);
return ERR_PTR(dst_nents);
}
} else {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len = req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0);
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len);
return ERR_PTR(src_nents);
}
}
@@ -1380,8 +1366,16 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
}
}
+ /*
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries.
+ */
sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
- sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ if (mapped_dst_nents > 1)
+ sec4_sg_len += pad_sg_nents(mapped_dst_nents);
+ else
+ sec4_sg_len = pad_sg_nents(sec4_sg_len);
+
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
@@ -1403,12 +1397,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_index = 0;
if (mapped_src_nents > 1) {
- sg_to_sec4_sg_last(req->src, mapped_src_nents,
+ sg_to_sec4_sg_last(req->src, src_len,
edesc->sec4_sg + sec4_sg_index, 0);
sec4_sg_index += mapped_src_nents;
}
if (mapped_dst_nents > 1) {
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+ sg_to_sec4_sg_last(req->dst, dst_len,
edesc->sec4_sg + sec4_sg_index, 0);
}
@@ -1446,11 +1440,10 @@ static int gcm_encrypt(struct aead_request *req)
/* Create and submit job descriptor */
init_gcm_job(req, edesc, all_contig, true);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
@@ -1556,11 +1549,10 @@ static int aead_encrypt(struct aead_request *req)
/* Create and submit job descriptor */
init_authenc_job(req, edesc, all_contig, true);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
@@ -1591,11 +1583,10 @@ static int gcm_decrypt(struct aead_request *req)
/* Create and submit job descriptor*/
init_gcm_job(req, edesc, all_contig, false);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
@@ -1627,7 +1618,7 @@ static int aead_decrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
- caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
+ caam_dump_sg("dec src@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
req->assoclen + req->cryptlen, 1);
@@ -1639,11 +1630,10 @@ static int aead_decrypt(struct aead_request *req)
/* Create and submit job descriptor*/
init_authenc_job(req, edesc, all_contig, false);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
@@ -1719,7 +1709,29 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
else
sec4_sg_ents = mapped_src_nents + !!ivsize;
dst_sg_idx = sec4_sg_ents;
- sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+
+ /*
+ * Input, output HW S/G tables: [IV, src][dst, IV]
+ * IV entries point to the same buffer
+ * If src == dst, S/G entries are reused (S/G tables overlap)
+ *
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries. Logic:
+ * if (output S/G)
+ * pad output S/G, if needed
+ * else if (input S/G) ...
+ * pad input S/G, if needed
+ */
+ if (ivsize || mapped_dst_nents > 1) {
+ if (req->src == req->dst)
+ sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
+ else
+ sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
+ !!ivsize);
+ } else {
+ sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
+ }
+
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
/*
@@ -1744,10 +1756,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/* Make sure IV is located in a DMAable area */
if (ivsize) {
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+ iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
memcpy(iv, req->iv, ivsize);
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
+ iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
caam_unmap(jrdev, req->src, req->dst, src_nents,
@@ -1759,13 +1771,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
}
if (dst_sg_idx)
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg +
- !!ivsize, 0);
+ sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
+ !!ivsize, 0);
- if (mapped_dst_nents > 1) {
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
- edesc->sec4_sg + dst_sg_idx, 0);
- }
+ if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
+ sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
+ dst_sg_idx, 0);
+
+ if (ivsize)
+ dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
+ mapped_dst_nents, iv_dma, ivsize, 0);
+
+ if (ivsize || mapped_dst_nents > 1)
+ sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
+ mapped_dst_nents);
if (sec4_sg_bytes) {
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1782,11 +1801,9 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
edesc->iv_dma = iv_dma;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
- sec4_sg_bytes, 1);
-#endif
+ print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ sec4_sg_bytes, 1);
return edesc;
}
@@ -1807,11 +1824,11 @@ static int skcipher_encrypt(struct skcipher_request *req)
/* Create and submit job descriptor*/
init_skcipher_job(req, edesc, true);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
@@ -1830,7 +1847,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- int ivsize = crypto_skcipher_ivsize(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
@@ -1840,22 +1856,13 @@ static int skcipher_decrypt(struct skcipher_request *req)
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- /*
- * The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block.
- */
- if (ivsize)
- scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
- ivsize, ivsize, 0);
-
/* Create and submit job descriptor*/
init_skcipher_job(req, edesc, false);
desc = edesc->hw_desc;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
if (!ret) {
@@ -3444,7 +3451,7 @@ static void caam_aead_exit(struct crypto_aead *tfm)
caam_exit_common(crypto_aead_ctx(tfm));
}
-static void __exit caam_algapi_exit(void)
+void caam_algapi_exit(void)
{
int i;
@@ -3489,43 +3496,15 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->exit = caam_aead_exit;
}
-static int __init caam_algapi_init(void)
+int caam_algapi_init(struct device *ctrldev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct caam_drv_private *priv;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
int i = 0, err = 0;
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
u32 arc4_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false, gcm_support;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev) {
- of_node_put(dev_node);
- return -ENODEV;
- }
-
- priv = dev_get_drvdata(&pdev->dev);
- of_node_put(dev_node);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv) {
- err = -ENODEV;
- goto out_put_dev;
- }
-
-
/*
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
@@ -3668,14 +3647,5 @@ static int __init caam_algapi_init(void)
if (registered)
pr_info("caam algorithms registered in /proc/crypto\n");
-out_put_dev:
- put_device(&pdev->dev);
return err;
}
-
-module_init(caam_algapi_init);
-module_exit(caam_algapi_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("FSL CAAM support for crypto API");
-MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 1e1a376edc2f..72531837571e 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -33,12 +33,11 @@ static inline void append_dec_op1(u32 *desc, u32 type)
}
jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
+ append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT);
uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, jump_cmd);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT |
+ OP_ALG_AAI_DK);
set_jump_tgt_here(desc, uncond_jump_cmd);
}
@@ -115,11 +114,9 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead null enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
@@ -204,11 +201,9 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead null dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
@@ -358,10 +353,9 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
@@ -475,10 +469,9 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
@@ -613,11 +606,9 @@ copy_iv:
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead givenc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead givenc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
@@ -742,10 +733,9 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("gcm enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
@@ -838,10 +828,9 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("gcm dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
@@ -933,11 +922,9 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("rfc4106 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
@@ -1030,11 +1017,9 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("rfc4106 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
@@ -1115,11 +1100,9 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("rfc4543 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
@@ -1205,11 +1188,9 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("rfc4543 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
@@ -1410,17 +1391,21 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
LDST_OFFSET_SHIFT));
/* Load operation */
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
OP_ALG_ENCRYPT);
/* Perform operation */
skcipher_append_src_dst(desc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "skcipher enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ /* Store IV */
+ if (ivsize)
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
+
+ print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
@@ -1479,7 +1464,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Choose operation */
if (ctx1_iv_off)
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
OP_ALG_DECRYPT);
else
append_dec_op1(desc, cdata->algtype);
@@ -1487,11 +1472,15 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Perform operation */
skcipher_append_src_dst(desc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "skcipher dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ /* Store IV */
+ if (ivsize)
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
+
+ print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
@@ -1538,11 +1527,13 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
/* Perform operation */
skcipher_append_src_dst(desc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts skcipher enc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ /* Store upper 8B of IV */
+ append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+
+ print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__)
+ ": ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
}
EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
@@ -1588,11 +1579,13 @@ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
/* Perform operation */
skcipher_append_src_dst(desc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts skcipher dec shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ /* Store upper 8B of IV */
+ append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+
+ print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__)
+ ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
}
EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index d5ca42ff961a..da4a4ee60c80 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -44,9 +44,9 @@
#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
- 20 * CAAM_CMD_SZ)
+ 21 * CAAM_CMD_SZ)
#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
- 15 * CAAM_CMD_SZ)
+ 16 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize, int era);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index d290d6b41825..32f0f8a72067 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -4,7 +4,7 @@
* Based on caamalg.c
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2019 NXP
*/
#include "compat.h"
@@ -214,13 +214,11 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
-#ifdef DEBUG
- dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
+ dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
/*
* If DKP is supported, use it in the shared descriptor to generate
@@ -237,7 +235,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
memcpy(ctx->key, keys.authkey, keys.authkeylen);
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
keys.enckeylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma,
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
ctx->adata.keylen_pad +
keys.enckeylen, ctx->dir);
goto skip_split_key;
@@ -251,8 +249,9 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, ctx->dir);
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
+ ctx->adata.keylen_pad + keys.enckeylen,
+ ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
@@ -386,13 +385,12 @@ static int gcm_setkey(struct crypto_aead *aead,
struct device *jrdev = ctx->jrdev;
int ret;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen,
+ ctx->dir);
ctx->cdata.keylen = keylen;
ret = gcm_set_sh_desc(aead);
@@ -485,10 +483,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
if (keylen < 4)
return -EINVAL;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
/*
@@ -496,8 +492,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
* in the nonce. Update the AES key length.
*/
ctx->cdata.keylen = keylen - 4;
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- ctx->dir);
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
+ ctx->cdata.keylen, ctx->dir);
ret = rfc4106_set_sh_desc(aead);
if (ret)
@@ -589,10 +585,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
if (keylen < 4)
return -EINVAL;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
/*
@@ -600,8 +594,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
* in the nonce. Update the AES key length.
*/
ctx->cdata.keylen = keylen - 4;
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- ctx->dir);
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
+ ctx->cdata.keylen, ctx->dir);
ret = rfc4543_set_sh_desc(aead);
if (ret)
@@ -644,10 +638,9 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
const bool is_rfc3686 = alg->caam.rfc3686;
int ret = 0;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
/*
* AES-CTR needs to load IV in CONTEXT1 reg
* at an offset of 128bits (16bytes)
@@ -838,7 +831,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize,
- dma_addr_t qm_sg_dma, int qm_sg_bytes)
+ enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
+ int qm_sg_bytes)
{
if (dst != src) {
if (src_nents)
@@ -850,7 +844,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
}
@@ -863,7 +857,8 @@ static void aead_unmap(struct device *dev,
int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
}
@@ -874,7 +869,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
}
static void aead_done(struct caam_drv_req *drv_req, u32 status)
@@ -924,6 +920,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ int src_len, dst_len = 0;
struct aead_edesc *edesc;
dma_addr_t qm_sg_dma, iv_dma = 0;
int ivsize = 0;
@@ -945,13 +942,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
}
if (likely(req->src == req->dst)) {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len = req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0);
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
@@ -964,23 +961,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
return ERR_PTR(-ENOMEM);
}
} else {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen);
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen);
+ src_len);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize :
- (-authsize)));
+ dst_nents = sg_nents_for_len(req->dst, dst_len);
if (unlikely(dst_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : (-authsize)));
+ dst_len);
qi_cache_free(edesc);
return ERR_PTR(dst_nents);
}
@@ -1019,9 +1014,24 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
/*
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
* Input is not contiguous.
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries. Logic:
+ * if (src != dst && output S/G)
+ * pad output S/G, if needed
+ * else if (src == dst && S/G)
+ * overlapping S/Gs; pad one of them
+ * else if (input S/G) ...
+ * pad input S/G, if needed
*/
- qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
+ if (mapped_dst_nents > 1)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents);
+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
+ qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
+ 1 + !!ivsize + pad_sg_nents(mapped_src_nents));
+ else
+ qm_sg_ents = pad_sg_nents(qm_sg_ents);
+
sg_table = &edesc->sgt[0];
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
@@ -1029,7 +1039,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1044,7 +1054,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, 0, 0);
+ dst_nents, 0, 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1063,7 +1073,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
dev_err(qidev, "unable to map assoclen\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1074,19 +1084,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
qm_sg_index++;
}
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
qm_sg_index += mapped_src_nents;
if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- qm_sg_index, 0);
+ sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(qidev, qm_sg_dma)) {
dev_err(qidev, "unable to map S/G table\n");
dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1109,7 +1118,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
(1 + !!ivsize) * sizeof(*sg_table),
out_len, 0);
- } else if (mapped_dst_nents == 1) {
+ } else if (mapped_dst_nents <= 1) {
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
0);
} else {
@@ -1182,33 +1191,28 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
struct device *qidev = caam_ctx->qidev;
int ivsize = crypto_skcipher_ivsize(skcipher);
-#ifdef DEBUG
- dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
-#endif
+ dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
edesc = container_of(drv_req, typeof(*edesc), drv_req);
if (status)
caam_jr_strstatus(qidev, status);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
- edesc->src_nents > 1 ? 100 : ivsize, 1);
- caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
-#endif
skcipher_unmap(qidev, edesc, req);
/*
* The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block. This is used e.g. by the CTS mode.
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
*/
- if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
- scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
- ivsize, ivsize, 0);
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, status);
@@ -1276,14 +1280,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
qm_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = qm_sg_ents;
- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ /*
+ * Input, output HW S/G tables: [IV, src][dst, IV]
+ * IV entries point to the same buffer
+ * If src == dst, S/G entries are reused (S/G tables overlap)
+ *
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries.
+ */
+ if (req->src != req->dst)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
+ else
+ qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
+
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1292,7 +1308,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1301,11 +1317,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
iv = (u8 *)(sg_table + qm_sg_ents);
memcpy(iv, req->iv, ivsize);
- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1319,18 +1335,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+ sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
- if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- dst_sg_idx, 0);
+ if (req->src != req->dst)
+ sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
+
+ dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
+ ivsize, 0);
edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
dev_err(qidev, "unable to map S/G table\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1340,16 +1358,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
ivsize + req->cryptlen, 0);
- if (req->src == req->dst) {
+ if (req->src == req->dst)
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
- sizeof(*sg_table), req->cryptlen, 0);
- } else if (mapped_dst_nents > 1) {
+ sizeof(*sg_table), req->cryptlen + ivsize,
+ 0);
+ else
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), req->cryptlen, 0);
- } else {
- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
- req->cryptlen, 0);
- }
+ sizeof(*sg_table), req->cryptlen + ivsize,
+ 0);
return edesc;
}
@@ -1359,7 +1375,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- int ivsize = crypto_skcipher_ivsize(skcipher);
int ret;
if (unlikely(caam_congested))
@@ -1370,14 +1385,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- /*
- * The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block.
- */
- if (!encrypt)
- scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
- ivsize, ivsize, 0);
-
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
if (!ret) {
ret = -EINPROGRESS;
@@ -2382,6 +2389,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{
struct caam_drv_private *priv;
+ struct device *dev;
/*
* distribute tfms across job rings to ensure in-order
@@ -2393,16 +2401,17 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
return PTR_ERR(ctx->jrdev);
}
- priv = dev_get_drvdata(ctx->jrdev->parent);
+ dev = ctx->jrdev->parent;
+ priv = dev_get_drvdata(dev);
if (priv->era >= 6 && uses_dkp)
ctx->dir = DMA_BIDIRECTIONAL;
else
ctx->dir = DMA_TO_DEVICE;
- ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
+ ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
ctx->dir);
- if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
- dev_err(ctx->jrdev, "unable to map key\n");
+ if (dma_mapping_error(dev, ctx->key_dma)) {
+ dev_err(dev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
return -ENOMEM;
}
@@ -2411,7 +2420,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- ctx->qidev = priv->qidev;
+ ctx->qidev = dev;
spin_lock_init(&ctx->lock);
ctx->drv_ctx[ENCRYPT] = NULL;
@@ -2445,7 +2454,8 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
+ dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key),
+ ctx->dir);
caam_jr_free(ctx->jrdev);
}
@@ -2460,7 +2470,7 @@ static void caam_aead_exit(struct crypto_aead *tfm)
caam_exit_common(crypto_aead_ctx(tfm));
}
-static void __exit caam_qi_algapi_exit(void)
+void caam_qi_algapi_exit(void)
{
int i;
@@ -2505,45 +2515,17 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->exit = caam_aead_exit;
}
-static int __init caam_qi_algapi_init(void)
+int caam_qi_algapi_init(struct device *ctrldev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
int i = 0, err = 0;
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- of_node_put(dev_node);
- if (!pdev)
- return -ENODEV;
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv || !priv->qi_present) {
- err = -ENODEV;
- goto out_put_dev;
- }
-
if (caam_dpaa2) {
dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
- err = -ENODEV;
- goto out_put_dev;
+ return -ENODEV;
}
/*
@@ -2598,7 +2580,7 @@ static int __init caam_qi_algapi_init(void)
err = crypto_register_skcipher(&t_alg->skcipher);
if (err) {
- dev_warn(priv->qidev, "%s alg registration failed\n",
+ dev_warn(ctrldev, "%s alg registration failed\n",
t_alg->skcipher.base.cra_driver_name);
continue;
}
@@ -2654,16 +2636,7 @@ static int __init caam_qi_algapi_init(void)
}
if (registered)
- dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
+ dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
-out_put_dev:
- put_device(ctrldev);
return err;
}
-
-module_init(caam_qi_algapi_init);
-module_exit(caam_qi_algapi_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
-MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 2b2980a8a9b9..06bf32c32cbd 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2015-2016 Freescale Semiconductor Inc.
- * Copyright 2017-2018 NXP
+ * Copyright 2017-2019 NXP
*/
#include "compat.h"
@@ -140,7 +140,8 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq)
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize,
- dma_addr_t qm_sg_dma, int qm_sg_bytes)
+ enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
+ int qm_sg_bytes)
{
if (dst != src) {
if (src_nents)
@@ -152,7 +153,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
@@ -371,6 +372,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ int src_len, dst_len = 0;
struct aead_edesc *edesc;
dma_addr_t qm_sg_dma, iv_dma = 0;
int ivsize = 0;
@@ -387,23 +389,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
}
if (unlikely(req->dst != req->src)) {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen);
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen);
+ src_len);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize :
- (-authsize)));
+ dst_nents = sg_nents_for_len(req->dst, dst_len);
if (unlikely(dst_nents < 0)) {
dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : (-authsize)));
+ dst_len);
qi_cache_free(edesc);
return ERR_PTR(dst_nents);
}
@@ -434,13 +434,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
mapped_dst_nents = 0;
}
} else {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len = req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0);
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
@@ -460,9 +460,25 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
/*
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
* Input is not contiguous.
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries. Logic:
+ * if (src != dst && output S/G)
+ * pad output S/G, if needed
+ * else if (src == dst && S/G)
+ * overlapping S/Gs; pad one of them
+ * else if (input S/G) ...
+ * pad input S/G, if needed
*/
- qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
+ if (mapped_dst_nents > 1)
+ qm_sg_nents += pad_sg_nents(mapped_dst_nents);
+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
+ qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
+ 1 + !!ivsize +
+ pad_sg_nents(mapped_src_nents));
+ else
+ qm_sg_nents = pad_sg_nents(qm_sg_nents);
+
sg_table = &edesc->sgt[0];
qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
@@ -470,7 +486,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_nents, ivsize);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -485,7 +501,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(dev, iv_dma)) {
dev_err(dev, "unable to map IV\n");
caam_unmap(dev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, 0, 0);
+ dst_nents, 0, 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -509,7 +525,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(dev, edesc->assoclen_dma)) {
dev_err(dev, "unable to map assoclen\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -520,19 +536,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
qm_sg_index++;
}
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
qm_sg_index += mapped_src_nents;
if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- qm_sg_index, 0);
+ sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(dev, qm_sg_dma)) {
dev_err(dev, "unable to map S/G table\n");
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -559,6 +574,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dpaa2_fl_set_addr(out_fle, qm_sg_dma +
(1 + !!ivsize) * sizeof(*sg_table));
}
+ } else if (!mapped_dst_nents) {
+ /*
+ * crypto engine requires the output entry to be present when
+ * "frame list" FD is used.
+ * Since engine does not support FMT=2'b11 (unused entry type),
+ * leaving out_fle zeroized is the best option.
+ */
+ goto skip_out_fle;
} else if (mapped_dst_nents == 1) {
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
@@ -570,6 +593,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dpaa2_fl_set_len(out_fle, out_len);
+skip_out_fle:
return edesc;
}
@@ -1077,14 +1101,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
qm_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = qm_sg_ents;
- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ /*
+ * Input, output HW S/G tables: [IV, src][dst, IV]
+ * IV entries point to the same buffer
+ * If src == dst, S/G entries are reused (S/G tables overlap)
+ *
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries.
+ */
+ if (req->src != req->dst)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
+ else
+ qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
+
qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1093,7 +1129,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
if (unlikely(!edesc)) {
dev_err(dev, "could not allocate extended descriptor\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1102,11 +1138,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
iv = (u8 *)(sg_table + qm_sg_ents);
memcpy(iv, req->iv, ivsize);
- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, iv_dma)) {
dev_err(dev, "unable to map IV\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1117,18 +1153,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
edesc->qm_sg_bytes = qm_sg_bytes;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+ sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
- if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- dst_sg_idx, 0);
+ if (req->src != req->dst)
+ sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
+
+ dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
+ ivsize, 0);
edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
dev_err(dev, "unable to map S/G table\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1136,23 +1174,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
- dpaa2_fl_set_len(out_fle, req->cryptlen);
+ dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
- if (req->src == req->dst) {
- dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+
+ if (req->src == req->dst)
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
sizeof(*sg_table));
- } else if (mapped_dst_nents > 1) {
- dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ else
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
sizeof(*sg_table));
- } else {
- dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
- }
return edesc;
}
@@ -1164,7 +1198,8 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
}
@@ -1175,7 +1210,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
}
static void aead_encrypt_done(void *cbk_ctx, u32 status)
@@ -1324,7 +1360,7 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
@@ -1332,10 +1368,10 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
/*
* The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block. This is used e.g. by the CTS mode.
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
- ivsize, 0);
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
@@ -1362,11 +1398,19 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
skcipher_unmap(ctx->dev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
+ */
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
+
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
}
@@ -1405,7 +1449,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct caam_request *caam_req = skcipher_request_ctx(req);
- int ivsize = crypto_skcipher_ivsize(skcipher);
int ret;
/* allocate extended descriptor */
@@ -1413,13 +1456,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- /*
- * The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block.
- */
- scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
- ivsize, 0);
-
caam_req->flc = &ctx->flc[DECRYPT];
caam_req->flc_dma = ctx->flc_dma[DECRYPT];
caam_req->cbk = skcipher_decrypt_done;
@@ -3380,9 +3416,9 @@ static int ahash_update_ctx(struct ahash_request *req)
if (to_hash) {
struct dpaa2_sg_entry *sg_table;
+ int src_len = req->nbytes - *next_buflen;
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - (*next_buflen));
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n");
return src_nents;
@@ -3409,7 +3445,7 @@ static int ahash_update_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
qm_sg_src_index = 1 + (*buflen ? 1 : 0);
- qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
+ qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
sizeof(*sg_table);
sg_table = &edesc->sgt[0];
@@ -3423,7 +3459,7 @@ static int ahash_update_ctx(struct ahash_request *req)
goto unmap_ctx;
if (mapped_nents) {
- sg_to_qm_sg_last(req->src, mapped_nents,
+ sg_to_qm_sg_last(req->src, src_len,
sg_table + qm_sg_src_index, 0);
if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src,
@@ -3494,7 +3530,7 @@ static int ahash_final_ctx(struct ahash_request *req)
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int buflen = *current_buflen(state);
- int qm_sg_bytes, qm_sg_src_index;
+ int qm_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
struct dpaa2_sg_entry *sg_table;
@@ -3505,8 +3541,7 @@ static int ahash_final_ctx(struct ahash_request *req)
if (!edesc)
return -ENOMEM;
- qm_sg_src_index = 1 + (buflen ? 1 : 0);
- qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
+ qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
sg_table = &edesc->sgt[0];
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
@@ -3518,7 +3553,7 @@ static int ahash_final_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
+ dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
DMA_TO_DEVICE);
@@ -3599,7 +3634,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
qm_sg_src_index = 1 + (buflen ? 1 : 0);
- qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
+ qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
+ sizeof(*sg_table);
sg_table = &edesc->sgt[0];
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
@@ -3611,7 +3647,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
+ sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
DMA_TO_DEVICE);
@@ -3696,8 +3732,8 @@ static int ahash_digest(struct ahash_request *req)
int qm_sg_bytes;
struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
- qm_sg_bytes = mapped_nents * sizeof(*sg_table);
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+ qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
+ sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
@@ -3840,9 +3876,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (to_hash) {
struct dpaa2_sg_entry *sg_table;
+ int src_len = req->nbytes - *next_buflen;
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - *next_buflen);
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n");
return src_nents;
@@ -3868,14 +3904,15 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
edesc->src_nents = src_nents;
- qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
+ qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
+ sizeof(*sg_table);
sg_table = &edesc->sgt[0];
ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
if (ret)
goto unmap_ctx;
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+ sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src,
@@ -3987,14 +4024,14 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
}
edesc->src_nents = src_nents;
- qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
+ qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
sg_table = &edesc->sgt[0];
ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
if (ret)
goto unmap;
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+ sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
DMA_TO_DEVICE);
@@ -4064,9 +4101,9 @@ static int ahash_update_first(struct ahash_request *req)
if (to_hash) {
struct dpaa2_sg_entry *sg_table;
+ int src_len = req->nbytes - *next_buflen;
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - (*next_buflen));
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n");
return src_nents;
@@ -4101,8 +4138,9 @@ static int ahash_update_first(struct ahash_request *req)
if (mapped_nents > 1) {
int qm_sg_bytes;
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
- qm_sg_bytes = mapped_nents * sizeof(*sg_table);
+ sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
+ qm_sg_bytes = pad_sg_nents(mapped_nents) *
+ sizeof(*sg_table);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes,
DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 7205d9f4029e..e4ac5d591ad6 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -82,14 +82,6 @@
#define HASH_MSG_LEN 8
#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
-#ifdef DEBUG
-/* for print_hex_dumps with line references */
-#define debug(format, arg...) printk(format, arg)
-#else
-#define debug(format, arg...)
-#endif
-
-
static struct list_head hash_list;
/* ahash per-session context */
@@ -243,11 +235,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
desc_bytes(desc), ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash update shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
@@ -255,11 +246,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash update first shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
+ ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
@@ -267,11 +256,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
desc_bytes(desc), ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
@@ -279,12 +267,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
desc_bytes(desc), ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash digest shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
return 0;
}
@@ -328,9 +314,9 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
ctx->ctx_len, ctx->key_dma);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
- print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
- 1);
+ print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
+ " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
/* shared descriptor for ahash_digest */
desc = ctx->sh_desc_digest;
@@ -377,8 +363,8 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
ctx->ctx_len, 0);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
- print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
+ " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
/* shared descriptor for ahash_digest */
@@ -429,12 +415,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
result.err = 0;
init_completion(&result.completion);
@@ -444,11 +429,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "digested key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1);
-#endif
+
+ print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key,
+ digestsize, 1);
}
dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
@@ -463,15 +447,14 @@ static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
u8 *hashed_key = NULL;
-#ifdef DEBUG
- printk(KERN_ERR "keylen %d\n", keylen);
-#endif
+ dev_dbg(jrdev, "keylen %d\n", keylen);
if (keylen > blocksize) {
hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
@@ -600,11 +583,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
-#ifdef DEBUG
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
@@ -614,11 +595,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
-#endif
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
req->base.complete(&req->base, err);
}
@@ -631,11 +610,9 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
-#ifdef DEBUG
int digestsize = crypto_ahash_digestsize(ahash);
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
@@ -645,15 +622,13 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
switch_buf(state);
kfree(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
if (req->result)
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
-#endif
+ print_hex_dump_debug("result@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
req->base.complete(&req->base, err);
}
@@ -666,11 +641,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
-#ifdef DEBUG
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
@@ -680,11 +653,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
-#endif
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
req->base.complete(&req->base, err);
}
@@ -697,11 +668,9 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
-#ifdef DEBUG
int digestsize = crypto_ahash_digestsize(ahash);
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
@@ -711,15 +680,13 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
switch_buf(state);
kfree(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
if (req->result)
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
-#endif
+ print_hex_dump_debug("result@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
req->base.complete(&req->base, err);
}
@@ -759,9 +726,10 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
if (nents > 1 || first_sg) {
struct sec4_sg_entry *sg = edesc->sec4_sg;
- unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
+ unsigned int sgsize = sizeof(*sg) *
+ pad_sg_nents(first_sg + nents);
- sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
+ sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->jrdev, src_dma)) {
@@ -819,8 +787,10 @@ static int ahash_update_ctx(struct ahash_request *req)
}
if (to_hash) {
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - (*next_buflen));
+ int pad_nents;
+ int src_len = req->nbytes - *next_buflen;
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
@@ -838,15 +808,14 @@ static int ahash_update_ctx(struct ahash_request *req)
}
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
- sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
- sizeof(struct sec4_sg_entry);
+ pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
+ sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_update,
+ edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
ctx->sh_desc_update_dma, flags);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
@@ -866,7 +835,7 @@ static int ahash_update_ctx(struct ahash_request *req)
goto unmap_ctx;
if (mapped_nents)
- sg_to_sec4_sg_last(req->src, mapped_nents,
+ sg_to_sec4_sg_last(req->src, src_len,
edesc->sec4_sg + sec4_sg_src_index,
0);
else
@@ -893,11 +862,9 @@ static int ahash_update_ctx(struct ahash_request *req)
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
if (ret)
@@ -910,13 +877,12 @@ static int ahash_update_ctx(struct ahash_request *req)
*buflen = *next_buflen;
*next_buflen = last_buflen;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
-#endif
+
+ print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
return ret;
unmap_ctx:
@@ -935,18 +901,17 @@ static int ahash_final_ctx(struct ahash_request *req)
GFP_KERNEL : GFP_ATOMIC;
int buflen = *current_buflen(state);
u32 *desc;
- int sec4_sg_bytes, sec4_sg_src_index;
+ int sec4_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret;
- sec4_sg_src_index = 1 + (buflen ? 1 : 0);
- sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
+ sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
+ sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
- ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
- flags);
+ edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
+ ctx->sh_desc_fin_dma, flags);
if (!edesc)
return -ENOMEM;
@@ -963,7 +928,7 @@ static int ahash_final_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
+ sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
@@ -977,10 +942,9 @@ static int ahash_final_ctx(struct ahash_request *req)
LDST_SGF);
append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
if (ret)
@@ -1058,10 +1022,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
if (ret)
@@ -1135,10 +1098,9 @@ static int ahash_digest(struct ahash_request *req)
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
@@ -1190,10 +1152,9 @@ static int ahash_final_no_ctx(struct ahash_request *req)
if (ret)
goto unmap;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
@@ -1246,8 +1207,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
if (to_hash) {
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - *next_buflen);
+ int pad_nents;
+ int src_len = req->nbytes - *next_buflen;
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
@@ -1264,14 +1227,14 @@ static int ahash_update_no_ctx(struct ahash_request *req)
mapped_nents = 0;
}
- sec4_sg_bytes = (1 + mapped_nents) *
- sizeof(struct sec4_sg_entry);
+ pad_nents = pad_sg_nents(1 + mapped_nents);
+ sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
+ edesc = ahash_edesc_alloc(ctx, pad_nents,
ctx->sh_desc_update_first,
ctx->sh_desc_update_first_dma,
flags);
@@ -1287,8 +1250,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- sg_to_sec4_sg_last(req->src, mapped_nents,
- edesc->sec4_sg + 1, 0);
+ sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
if (*next_buflen) {
scatterwalk_map_and_copy(next_buf, req->src,
@@ -1313,11 +1275,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
if (ret)
@@ -1333,13 +1293,12 @@ static int ahash_update_no_ctx(struct ahash_request *req)
*buflen = *next_buflen;
*next_buflen = 0;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
-#endif
+
+ print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
return ret;
unmap_ctx:
@@ -1414,10 +1373,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
if (ret)
goto unmap;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
@@ -1517,11 +1475,9 @@ static int ahash_update_first(struct ahash_request *req)
if (ret)
goto unmap_ctx;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
if (ret)
@@ -1539,11 +1495,10 @@ static int ahash_update_first(struct ahash_request *req)
req->nbytes, 0);
switch_buf(state);
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
-#endif
+
+ print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
return ret;
unmap_ctx:
@@ -1930,7 +1885,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
caam_jr_free(ctx->jrdev);
}
-static void __exit caam_algapi_hash_exit(void)
+void caam_algapi_hash_exit(void)
{
struct caam_hash_alg *t_alg, *n;
@@ -1988,40 +1943,13 @@ caam_hash_alloc(struct caam_hash_template *template,
return t_alg;
}
-static int __init caam_algapi_hash_init(void)
+int caam_algapi_hash_init(struct device *ctrldev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
int i = 0, err = 0;
- struct caam_drv_private *priv;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
unsigned int md_limit = SHA512_DIGEST_SIZE;
u32 md_inst, md_vid;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev) {
- of_node_put(dev_node);
- return -ENODEV;
- }
-
- priv = dev_get_drvdata(&pdev->dev);
- of_node_put(dev_node);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv) {
- err = -ENODEV;
- goto out_put_dev;
- }
-
/*
* Register crypto algorithms the device supports. First, identify
* presence and attributes of MD block.
@@ -2042,10 +1970,8 @@ static int __init caam_algapi_hash_init(void)
* Skip registration of any hashing algorithms if MD block
* is not present.
*/
- if (!md_inst) {
- err = -ENODEV;
- goto out_put_dev;
- }
+ if (!md_inst)
+ return -ENODEV;
/* Limit digest size based on LP256 */
if (md_vid == CHA_VER_VID_MD_LP256)
@@ -2102,14 +2028,5 @@ static int __init caam_algapi_hash_init(void)
list_add_tail(&t_alg->entry, &hash_list);
}
-out_put_dev:
- put_device(&pdev->dev);
return err;
}
-
-module_init(caam_algapi_hash_init);
-module_exit(caam_algapi_hash_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
-MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index fe24485274e1..80574106af29 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
* Copyright 2016 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
*
* There is no Shared Descriptor for PKC so that the Job Descriptor must carry
* all the desired key parameters, input and output pointers.
@@ -24,12 +24,18 @@
sizeof(struct rsa_priv_f2_pdb))
#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
sizeof(struct rsa_priv_f3_pdb))
+#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
+
+/* buffer filled with zeros, used for padding */
+static u8 *zero_buffer;
static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
- dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
@@ -168,6 +174,13 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
akcipher_request_complete(req, err);
}
+/**
+ * Count leading zeros, need it to strip, from a given scatterlist
+ *
+ * @sgl : scatterlist to count zeros from
+ * @nbytes: number of zeros, in bytes, to strip
+ * @flags : operation flags
+ */
static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
unsigned int nbytes,
unsigned int flags)
@@ -187,7 +200,8 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
lzeros = 0;
len = 0;
while (nbytes > 0) {
- while (len && !*buff) {
+ /* do not strip more than given bytes */
+ while (len && !*buff && lzeros < nbytes) {
lzeros++;
len--;
buff++;
@@ -218,6 +232,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = ctx->dev;
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+ struct caam_rsa_key *key = &ctx->key;
struct rsa_edesc *edesc;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
@@ -225,22 +240,45 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
int sgc;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
int src_nents, dst_nents;
+ unsigned int diff_size = 0;
int lzeros;
- lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
- if (lzeros < 0)
- return ERR_PTR(lzeros);
-
- req->src_len -= lzeros;
- req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
+ if (req->src_len > key->n_sz) {
+ /*
+ * strip leading zeros and
+ * return the number of zeros to skip
+ */
+ lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
+ key->n_sz, sg_flags);
+ if (lzeros < 0)
+ return ERR_PTR(lzeros);
+
+ req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
+ lzeros);
+ req_ctx->fixup_src_len = req->src_len - lzeros;
+ } else {
+ /*
+ * input src is less then n key modulus,
+ * so there will be zero padding
+ */
+ diff_size = key->n_sz - req->src_len;
+ req_ctx->fixup_src = req->src;
+ req_ctx->fixup_src_len = req->src_len;
+ }
- src_nents = sg_nents_for_len(req->src, req->src_len);
+ src_nents = sg_nents_for_len(req_ctx->fixup_src,
+ req_ctx->fixup_src_len);
dst_nents = sg_nents_for_len(req->dst, req->dst_len);
- if (src_nents > 1)
- sec4_sg_len = src_nents;
+ if (!diff_size && src_nents == 1)
+ sec4_sg_len = 0; /* no need for an input hw s/g table */
+ else
+ sec4_sg_len = src_nents + !!diff_size;
+ sec4_sg_index = sec4_sg_len;
if (dst_nents > 1)
- sec4_sg_len += dst_nents;
+ sec4_sg_len += pad_sg_nents(dst_nents);
+ else
+ sec4_sg_len = pad_sg_nents(sec4_sg_len);
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
@@ -250,7 +288,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
if (!edesc)
return ERR_PTR(-ENOMEM);
- sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
if (unlikely(!sgc)) {
dev_err(dev, "unable to map source\n");
goto src_fail;
@@ -263,14 +301,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
}
edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
+ if (diff_size)
+ dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
+ 0);
+
+ if (sec4_sg_index)
+ sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
+ edesc->sec4_sg + !!diff_size, 0);
- sec4_sg_index = 0;
- if (src_nents > 1) {
- sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
- sec4_sg_index += src_nents;
- }
if (dst_nents > 1)
- sg_to_sec4_sg_last(req->dst, dst_nents,
+ sg_to_sec4_sg_last(req->dst, req->dst_len,
edesc->sec4_sg + sec4_sg_index, 0);
/* Save nents for later use in Job Descriptor */
@@ -289,12 +329,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
edesc->sec4_sg_bytes = sec4_sg_bytes;
+ print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ edesc->sec4_sg_bytes, 1);
+
return edesc;
sec4_sg_fail:
dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
dst_fail:
- dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
src_fail:
kfree(edesc);
return ERR_PTR(-ENOMEM);
@@ -304,6 +348,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
@@ -328,7 +373,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
pdb->f_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->f_dma = sg_dma_address(req->src);
+ pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
}
if (edesc->dst_nents > 1) {
@@ -340,7 +385,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
}
pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
- pdb->f_len = req->src_len;
+ pdb->f_len = req_ctx->fixup_src_len;
return 0;
}
@@ -373,7 +418,9 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->g_dma = sg_dma_address(req->src);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
if (edesc->dst_nents > 1) {
@@ -436,7 +483,9 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->g_dma = sg_dma_address(req->src);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
if (edesc->dst_nents > 1) {
@@ -523,7 +572,9 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->g_dma = sg_dma_address(req->src);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
if (edesc->dst_nents > 1) {
@@ -978,6 +1029,15 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->dev);
}
+ ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
+ CAAM_RSA_MAX_INPUT_SIZE - 1,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
+ dev_err(ctx->dev, "unable to map padding\n");
+ caam_jr_free(ctx->dev);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -987,6 +1047,8 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
+ dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
+ 1, DMA_TO_DEVICE);
caam_rsa_free_key(key);
caam_jr_free(ctx->dev);
}
@@ -1010,41 +1072,12 @@ static struct akcipher_alg caam_rsa = {
};
/* Public Key Cryptography module initialization handler */
-static int __init caam_pkc_init(void)
+int caam_pkc_init(struct device *ctrldev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
u32 pk_inst;
int err;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev) {
- of_node_put(dev_node);
- return -ENODEV;
- }
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv) {
- err = -ENODEV;
- goto out_put_dev;
- }
-
/* Determine public key hardware accelerator presence. */
if (priv->era < 10)
pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
@@ -1053,31 +1086,29 @@ static int __init caam_pkc_init(void)
pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
/* Do not register algorithms if PKHA is not present. */
- if (!pk_inst) {
- err = -ENODEV;
- goto out_put_dev;
- }
+ if (!pk_inst)
+ return 0;
+
+ /* allocate zero buffer, used for padding input */
+ zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
+ GFP_KERNEL);
+ if (!zero_buffer)
+ return -ENOMEM;
err = crypto_register_akcipher(&caam_rsa);
- if (err)
+ if (err) {
+ kfree(zero_buffer);
dev_warn(ctrldev, "%s alg registration failed\n",
caam_rsa.base.cra_driver_name);
- else
+ } else {
dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
+ }
-out_put_dev:
- put_device(ctrldev);
return err;
}
-static void __exit caam_pkc_exit(void)
+void caam_pkc_exit(void)
{
+ kfree(zero_buffer);
crypto_unregister_akcipher(&caam_rsa);
}
-
-module_init(caam_pkc_init);
-module_exit(caam_pkc_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
-MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index 82645bcf8b27..2c488c9a3812 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -89,18 +89,25 @@ struct caam_rsa_key {
* caam_rsa_ctx - per session context.
* @key : RSA key in DMA zone
* @dev : device structure
+ * @padding_dma : dma address of padding, for adding it to the input
*/
struct caam_rsa_ctx {
struct caam_rsa_key key;
struct device *dev;
+ dma_addr_t padding_dma;
+
};
/**
* caam_rsa_req_ctx - per request context.
- * @src: input scatterlist (stripped of leading zeros)
+ * @src : input scatterlist (stripped of leading zeros)
+ * @fixup_src : input scatterlist (that might be stripped of leading zeros)
+ * @fixup_src_len : length of the fixup_src input scatterlist
*/
struct caam_rsa_req_ctx {
struct scatterlist src[2];
+ struct scatterlist *fixup_src;
+ unsigned int fixup_src_len;
};
/**
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 95eb5402c59f..561bcb535184 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for hw_random
*
* Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -113,10 +113,8 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
/* Buffer refilled, invalidate cache */
dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
- DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
-#endif
+ print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ bd->buf, RN_BUF_SIZE, 1);
}
static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
@@ -209,10 +207,10 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
- desc, desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+
return 0;
}
@@ -233,10 +231,10 @@ static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
}
append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
- desc, desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+
return 0;
}
@@ -296,47 +294,20 @@ static struct hwrng caam_rng = {
.read = caam_read,
};
-static void __exit caam_rng_exit(void)
+void caam_rng_exit(void)
{
caam_jr_free(rng_ctx->jrdev);
hwrng_unregister(&caam_rng);
kfree(rng_ctx);
}
-static int __init caam_rng_init(void)
+int caam_rng_init(struct device *ctrldev)
{
struct device *dev;
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct caam_drv_private *priv;
u32 rng_inst;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
int err;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev) {
- of_node_put(dev_node);
- return -ENODEV;
- }
-
- priv = dev_get_drvdata(&pdev->dev);
- of_node_put(dev_node);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv) {
- err = -ENODEV;
- goto out_put_dev;
- }
-
/* Check for an instantiated RNG before registration */
if (priv->era < 10)
rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
@@ -344,16 +315,13 @@ static int __init caam_rng_init(void)
else
rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
- if (!rng_inst) {
- err = -ENODEV;
- goto out_put_dev;
- }
+ if (!rng_inst)
+ return 0;
dev = caam_jr_alloc();
if (IS_ERR(dev)) {
pr_err("Job Ring Device allocation for transform failed\n");
- err = PTR_ERR(dev);
- goto out_put_dev;
+ return PTR_ERR(dev);
}
rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
if (!rng_ctx) {
@@ -364,7 +332,6 @@ static int __init caam_rng_init(void)
if (err)
goto free_rng_ctx;
- put_device(&pdev->dev);
dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
@@ -372,14 +339,5 @@ free_rng_ctx:
kfree(rng_ctx);
free_caam_alloc:
caam_jr_free(dev);
-out_put_dev:
- put_device(&pdev->dev);
return err;
}
-
-module_init(caam_rng_init);
-module_exit(caam_rng_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
-MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index fec39c35c877..4e43ca4d3656 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -3,7 +3,7 @@
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
*/
#include <linux/device.h>
@@ -323,8 +323,8 @@ static int caam_remove(struct platform_device *pdev)
of_platform_depopulate(ctrldev);
#ifdef CONFIG_CAAM_QI
- if (ctrlpriv->qidev)
- caam_qi_shutdown(ctrlpriv->qidev);
+ if (ctrlpriv->qi_init)
+ caam_qi_shutdown(ctrldev);
#endif
/*
@@ -540,7 +540,8 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->caam_ipg = clk;
if (!of_machine_is_compatible("fsl,imx7d") &&
- !of_machine_is_compatible("fsl,imx7s")) {
+ !of_machine_is_compatible("fsl,imx7s") &&
+ !of_machine_is_compatible("fsl,imx7ulp")) {
clk = caam_drv_identify_clk(&pdev->dev, "mem");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
@@ -562,7 +563,8 @@ static int caam_probe(struct platform_device *pdev)
if (!of_machine_is_compatible("fsl,imx6ul") &&
!of_machine_is_compatible("fsl,imx7d") &&
- !of_machine_is_compatible("fsl,imx7s")) {
+ !of_machine_is_compatible("fsl,imx7s") &&
+ !of_machine_is_compatible("fsl,imx7ulp")) {
clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
@@ -702,12 +704,7 @@ static int caam_probe(struct platform_device *pdev)
}
ctrlpriv->era = caam_get_era(ctrl);
-
- ret = of_platform_populate(nprop, caam_match, NULL, dev);
- if (ret) {
- dev_err(dev, "JR platform devices creation error\n");
- goto iounmap_ctrl;
- }
+ ctrlpriv->domain = iommu_get_domain_for_dev(dev);
#ifdef CONFIG_DEBUG_FS
/*
@@ -721,19 +718,6 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
#endif
- ring = 0;
- for_each_available_child_of_node(nprop, np)
- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
- ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
- ((__force uint8_t *)ctrl +
- (ring + JR_BLOCK_NUMBER) *
- BLOCK_OFFSET
- );
- ctrlpriv->total_jobrs++;
- ring++;
- }
-
/* Check to see if (DPAA 1.x) QI present. If so, enable */
ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present && !caam_dpaa2) {
@@ -752,6 +736,25 @@ static int caam_probe(struct platform_device *pdev)
#endif
}
+ ret = of_platform_populate(nprop, caam_match, NULL, dev);
+ if (ret) {
+ dev_err(dev, "JR platform devices creation error\n");
+ goto shutdown_qi;
+ }
+
+ ring = 0;
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+ ((__force uint8_t *)ctrl +
+ (ring + JR_BLOCK_NUMBER) *
+ BLOCK_OFFSET
+ );
+ ctrlpriv->total_jobrs++;
+ ring++;
+ }
+
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
@@ -898,6 +901,11 @@ caam_remove:
caam_remove(pdev);
return ret;
+shutdown_qi:
+#ifdef CONFIG_CAAM_QI
+ if (ctrlpriv->qi_init)
+ caam_qi_shutdown(dev);
+#endif
iounmap_ctrl:
iounmap(ctrl);
disable_caam_emi_slow:
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 2980b8ef1fb1..5988a26a2441 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -3,6 +3,7 @@
* caam descriptor construction helper functions
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2019 NXP
*/
#ifndef DESC_CONSTR_H
@@ -37,6 +38,16 @@
extern bool caam_little_end;
+/*
+ * HW fetches 4 S/G table entries at a time, irrespective of how many entries
+ * are in the table. It's SW's responsibility to make sure these accesses
+ * do not have side effects.
+ */
+static inline int pad_sg_nents(int sg_nents)
+{
+ return ALIGN(sg_nents, 4);
+}
+
static inline int desc_len(u32 * const desc)
{
return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 4da844e4b61d..4f0d45865aa2 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -13,7 +13,7 @@
#ifdef DEBUG
#include <linux/highmem.h>
-void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+void caam_dump_sg(const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii)
{
@@ -35,15 +35,15 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
buf = it_page + it->offset;
len = min_t(size_t, tlen, it->length);
- print_hex_dump(level, prefix_str, prefix_type, rowsize,
- groupsize, buf, len, ascii);
+ print_hex_dump_debug(prefix_str, prefix_type, rowsize,
+ groupsize, buf, len, ascii);
tlen -= len;
kunmap_atomic(it_page);
}
}
#else
-void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+void caam_dump_sg(const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii)
{}
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 8c6b83e02a70..d9726e66edbf 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -17,7 +17,7 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
-void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+void caam_dump_sg(const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii);
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 3392615dc91b..6af84bbc612c 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -4,7 +4,7 @@
* Private/internal definitions between modules
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- *
+ * Copyright 2019 NXP
*/
#ifndef INTERN_H
@@ -63,10 +63,6 @@ struct caam_drv_private_jr {
* Driver-private storage for a single CAAM block instance
*/
struct caam_drv_private {
-#ifdef CONFIG_CAAM_QI
- struct device *qidev;
-#endif
-
/* Physical-presence section */
struct caam_ctrl __iomem *ctrl; /* controller region */
struct caam_deco __iomem *deco; /* DECO/CCB views */
@@ -74,12 +70,17 @@ struct caam_drv_private {
struct caam_queue_if __iomem *qi; /* QI control region */
struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
+ struct iommu_domain *domain;
+
/*
* Detected geometry block. Filled in from device tree if powerpc,
* or from register-based version detection code
*/
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
+#ifdef CONFIG_CAAM_QI
+ u8 qi_init; /* Nonzero if QI has been initialized */
+#endif
u8 mc_en; /* Nonzero if MC f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
@@ -107,8 +108,95 @@ struct caam_drv_private {
#endif
};
-void caam_jr_algapi_init(struct device *dev);
-void caam_jr_algapi_remove(struct device *dev);
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+
+int caam_algapi_init(struct device *dev);
+void caam_algapi_exit(void);
+
+#else
+
+static inline int caam_algapi_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_algapi_exit(void)
+{
+}
+
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
+
+int caam_algapi_hash_init(struct device *dev);
+void caam_algapi_hash_exit(void);
+
+#else
+
+static inline int caam_algapi_hash_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_algapi_hash_exit(void)
+{
+}
+
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
+
+int caam_pkc_init(struct device *dev);
+void caam_pkc_exit(void);
+
+#else
+
+static inline int caam_pkc_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_pkc_exit(void)
+{
+}
+
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
+
+int caam_rng_init(struct device *dev);
+void caam_rng_exit(void);
+
+#else
+
+static inline int caam_rng_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_rng_exit(void)
+{
+}
+
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
+
+#ifdef CONFIG_CAAM_QI
+
+int caam_qi_algapi_init(struct device *dev);
+void caam_qi_algapi_exit(void);
+
+#else
+
+static inline int caam_qi_algapi_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_qi_algapi_exit(void)
+{
+}
+
+#endif /* CONFIG_CAAM_QI */
#ifdef CONFIG_DEBUG_FS
static int caam_debugfs_u64_get(void *data, u64 *val)
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 1de2562d0982..cea811fed320 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -4,6 +4,7 @@
* JobR backend functionality
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2019 NXP
*/
#include <linux/of_irq.h>
@@ -23,6 +24,43 @@ struct jr_driver_data {
} ____cacheline_aligned;
static struct jr_driver_data driver_data;
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+static void register_algs(struct device *dev)
+{
+ mutex_lock(&algs_lock);
+
+ if (++active_devs != 1)
+ goto algs_unlock;
+
+ caam_algapi_init(dev);
+ caam_algapi_hash_init(dev);
+ caam_pkc_init(dev);
+ caam_rng_init(dev);
+ caam_qi_algapi_init(dev);
+
+algs_unlock:
+ mutex_unlock(&algs_lock);
+}
+
+static void unregister_algs(void)
+{
+ mutex_lock(&algs_lock);
+
+ if (--active_devs != 0)
+ goto algs_unlock;
+
+ caam_qi_algapi_exit();
+
+ caam_rng_exit();
+ caam_pkc_exit();
+ caam_algapi_hash_exit();
+ caam_algapi_exit();
+
+algs_unlock:
+ mutex_unlock(&algs_lock);
+}
static int caam_reset_hw_jr(struct device *dev)
{
@@ -109,6 +147,9 @@ static int caam_jr_remove(struct platform_device *pdev)
return -EBUSY;
}
+ /* Unregister JR-based RNG & crypto algorithms */
+ unregister_algs();
+
/* Remove the node from Physical JobR list maintained by driver */
spin_lock(&driver_data.jr_alloc_lock);
list_del(&jrpriv->list_node);
@@ -541,6 +582,8 @@ static int caam_jr_probe(struct platform_device *pdev)
atomic_set(&jrpriv->tfm_count, 0);
+ register_algs(jrdev->parent);
+
return 0;
}
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 8d0713fae6ac..48dd3536060d 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -16,9 +16,7 @@ void split_key_done(struct device *dev, u32 *desc, u32 err,
{
struct split_key_result *res = context;
-#ifdef DEBUG
- dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
if (err)
caam_jr_strstatus(dev, err);
@@ -55,12 +53,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
adata->keylen_pad = split_key_pad_len(adata->algtype &
OP_ALG_ALGSEL_MASK);
-#ifdef DEBUG
- dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+ dev_dbg(jrdev, "split keylen %d split keylen padded %d\n",
adata->keylen, adata->keylen_pad);
- print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
-#endif
+ print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
if (adata->keylen_pad > max_keylen)
return -EINVAL;
@@ -102,10 +98,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
append_fifo_store(desc, dma_addr, adata->keylen,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
result.err = 0;
init_completion(&result.completion);
@@ -115,11 +110,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_out,
- adata->keylen_pad, 1);
-#endif
+
+ print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_out,
+ adata->keylen_pad, 1);
}
dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL);
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 9f08f84cca59..0fe618e3804a 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -4,7 +4,7 @@
* Queue Interface backend functionality
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2017, 2019 NXP
*/
#include <linux/cpumask.h>
@@ -18,6 +18,7 @@
#include "desc_constr.h"
#define PREHDR_RSLS_SHIFT 31
+#define PREHDR_ABS BIT(25)
/*
* Use a reasonable backlog of frames (per CPU) as congestion threshold,
@@ -58,11 +59,9 @@ static DEFINE_PER_CPU(int, last_cpu);
/*
* caam_qi_priv - CAAM QI backend private params
* @cgr: QMan congestion group
- * @qi_pdev: platform device for QI backend
*/
struct caam_qi_priv {
struct qman_cgr cgr;
- struct platform_device *qi_pdev;
};
static struct caam_qi_priv qipriv ____cacheline_aligned;
@@ -95,6 +94,16 @@ static u64 times_congested;
*/
static struct kmem_cache *qi_cache;
+static void *caam_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
{
struct qm_fd fd;
@@ -135,6 +144,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
const struct qm_fd *fd;
struct caam_drv_req *drv_req;
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+ struct caam_drv_private *priv = dev_get_drvdata(qidev);
fd = &msg->ern.fd;
@@ -143,7 +153,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
return;
}
- drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
if (!drv_req) {
dev_err(qidev,
"Can't find original request for CAAM response\n");
@@ -346,6 +356,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
*/
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
num_words);
+ drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
dma_sync_single_for_device(qidev, drv_ctx->context_a,
sizeof(drv_ctx->sh_desc) +
@@ -401,6 +412,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
*/
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
num_words);
+ drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
@@ -488,7 +500,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel);
void caam_qi_shutdown(struct device *qidev)
{
int i;
- struct caam_qi_priv *priv = dev_get_drvdata(qidev);
+ struct caam_qi_priv *priv = &qipriv;
const cpumask_t *cpus = qman_affine_cpus();
for_each_cpu(i, cpus) {
@@ -506,8 +518,6 @@ void caam_qi_shutdown(struct device *qidev)
qman_release_cgrid(priv->cgr.cgrid);
kmem_cache_destroy(qi_cache);
-
- platform_device_unregister(priv->qi_pdev);
}
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
@@ -550,6 +560,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
struct caam_drv_req *drv_req;
const struct qm_fd *fd;
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+ struct caam_drv_private *priv = dev_get_drvdata(qidev);
u32 status;
if (caam_qi_napi_schedule(p, caam_napi))
@@ -572,7 +583,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
return qman_cb_dqrr_consume;
}
- drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
if (unlikely(!drv_req)) {
dev_err(qidev,
"Can't find original request for caam response\n");
@@ -692,33 +703,17 @@ static void free_rsp_fqs(void)
int caam_qi_init(struct platform_device *caam_pdev)
{
int err, i;
- struct platform_device *qi_pdev;
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
- static struct platform_device_info qi_pdev_info = {
- .name = "caam_qi",
- .id = PLATFORM_DEVID_NONE
- };
-
- qi_pdev_info.parent = ctrldev;
- qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
- qi_pdev = platform_device_register_full(&qi_pdev_info);
- if (IS_ERR(qi_pdev))
- return PTR_ERR(qi_pdev);
- set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
ctrlpriv = dev_get_drvdata(ctrldev);
- qidev = &qi_pdev->dev;
-
- qipriv.qi_pdev = qi_pdev;
- dev_set_drvdata(qidev, &qipriv);
+ qidev = ctrldev;
/* Initialize the congestion detection */
err = init_cgr(qidev);
if (err) {
dev_err(qidev, "CGR initialization failed: %d\n", err);
- platform_device_unregister(qi_pdev);
return err;
}
@@ -727,7 +722,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
if (err) {
dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
free_rsp_fqs();
- platform_device_unregister(qi_pdev);
return err;
}
@@ -750,15 +744,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
napi_enable(irqtask);
}
- /* Hook up QI device to parent controlling caam device */
- ctrlpriv->qidev = qidev;
-
qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
SLAB_CACHE_DMA, NULL);
if (!qi_cache) {
dev_err(qidev, "Can't allocate CAAM cache\n");
free_rsp_fqs();
- platform_device_unregister(qi_pdev);
return -ENOMEM;
}
@@ -766,6 +756,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
&times_congested, &caam_fops_u64_ro);
#endif
+
+ ctrlpriv->qi_init = 1;
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
return 0;
}
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
index b3e1aaaeffea..d56cc7efbc13 100644
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -54,15 +54,19 @@ static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
* but does not have final bit; instead, returns last entry
*/
static inline struct qm_sg_entry *
-sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+sg_to_qm_sg(struct scatterlist *sg, int len,
struct qm_sg_entry *qm_sg_ptr, u16 offset)
{
- while (sg_count && sg) {
- dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
- sg_dma_len(sg), offset);
+ int ent_len;
+
+ while (len) {
+ ent_len = min_t(int, sg_dma_len(sg), len);
+
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
+ offset);
qm_sg_ptr++;
sg = sg_next(sg);
- sg_count--;
+ len -= ent_len;
}
return qm_sg_ptr - 1;
}
@@ -71,10 +75,10 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
-static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
struct qm_sg_entry *qm_sg_ptr, u16 offset)
{
- qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+ qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
}
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
index c9378402a5f8..b8b737d2b0ea 100644
--- a/drivers/crypto/caam/sg_sw_qm2.h
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -25,15 +25,19 @@ static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
* but does not have final bit; instead, returns last entry
*/
static inline struct dpaa2_sg_entry *
-sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+sg_to_qm_sg(struct scatterlist *sg, int len,
struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
{
- while (sg_count && sg) {
- dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
- sg_dma_len(sg), offset);
+ int ent_len;
+
+ while (len) {
+ ent_len = min_t(int, sg_dma_len(sg), len);
+
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
+ offset);
qm_sg_ptr++;
sg = sg_next(sg);
- sg_count--;
+ len -= ent_len;
}
return qm_sg_ptr - 1;
}
@@ -42,11 +46,11 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
-static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
struct dpaa2_sg_entry *qm_sg_ptr,
u16 offset)
{
- qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+ qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
dpaa2_sg_set_final(qm_sg_ptr, true);
}
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index dbfa9fce33e0..07e1ee99273b 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -35,11 +35,9 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
SEC4_SG_OFFSET_MASK);
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
- DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
- sizeof(struct sec4_sg_entry), 1);
-#endif
+
+ print_hex_dump_debug("sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ sec4_sg_ptr, sizeof(struct sec4_sg_entry), 1);
}
/*
@@ -47,15 +45,19 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
* but does not have final bit; instead, returns last entry
*/
static inline struct sec4_sg_entry *
-sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
+sg_to_sec4_sg(struct scatterlist *sg, int len,
struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
{
- while (sg_count) {
- dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
- sg_dma_len(sg), offset);
+ int ent_len;
+
+ while (len) {
+ ent_len = min_t(int, sg_dma_len(sg), len);
+
+ dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len,
+ offset);
sec4_sg_ptr++;
sg = sg_next(sg);
- sg_count--;
+ len -= ent_len;
}
return sec4_sg_ptr - 1;
}
@@ -72,11 +74,11 @@ static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
-static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len,
struct sec4_sg_entry *sec4_sg_ptr,
u16 offset)
{
- sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
+ sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset);
sg_to_sec4_set_last(sec4_sg_ptr);
}
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index e9f4704494fb..ff3cb1f8f2b6 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -7,7 +7,6 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
-#include <crypto/crypto_wq.h>
#include <crypto/des.h>
#include <crypto/xts.h>
#include <linux/crypto.h>
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
index f177b79bbab0..09c4cf2513fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NITROX_DEBUGFS_H
#define __NITROX_DEBUGFS_H
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.h b/drivers/crypto/cavium/nitrox/nitrox_mbx.h
index 5008399775a9..7c93d0282174 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NITROX_MBX_H
#define __NITROX_MBX_H
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index ea3d6de55ff6..58c6dddfc5e1 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -2,7 +2,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES crypto API support
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
@@ -76,8 +76,7 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
return -EINVAL;
if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
- (ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
- (ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
+ (ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
(req->nbytes & (AES_BLOCK_SIZE - 1)))
return -EINVAL;
@@ -288,7 +287,7 @@ static struct ccp_aes_def aes_algs[] = {
.version = CCP_VERSION(3, 0),
.name = "cfb(aes)",
.driver_name = "cfb-aes-ccp",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
.ivsize = AES_BLOCK_SIZE,
.alg_defaults = &ccp_aes_defaults,
},
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index cc3e96c4f5fb..f79eede71c62 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -32,56 +32,62 @@ struct ccp_tasklet_data {
};
/* Human-readable error strings */
+#define CCP_MAX_ERROR_CODE 64
static char *ccp_error_codes[] = {
"",
- "ERR 01: ILLEGAL_ENGINE",
- "ERR 02: ILLEGAL_KEY_ID",
- "ERR 03: ILLEGAL_FUNCTION_TYPE",
- "ERR 04: ILLEGAL_FUNCTION_MODE",
- "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
- "ERR 06: ILLEGAL_FUNCTION_SIZE",
- "ERR 07: Zlib_MISSING_INIT_EOM",
- "ERR 08: ILLEGAL_FUNCTION_RSVD",
- "ERR 09: ILLEGAL_BUFFER_LENGTH",
- "ERR 10: VLSB_FAULT",
- "ERR 11: ILLEGAL_MEM_ADDR",
- "ERR 12: ILLEGAL_MEM_SEL",
- "ERR 13: ILLEGAL_CONTEXT_ID",
- "ERR 14: ILLEGAL_KEY_ADDR",
- "ERR 15: 0xF Reserved",
- "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
- "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
- "ERR 18: CMD_TIMEOUT",
- "ERR 19: IDMA0_AXI_SLVERR",
- "ERR 20: IDMA0_AXI_DECERR",
- "ERR 21: 0x15 Reserved",
- "ERR 22: IDMA1_AXI_SLAVE_FAULT",
- "ERR 23: IDMA1_AIXI_DECERR",
- "ERR 24: 0x18 Reserved",
- "ERR 25: ZLIBVHB_AXI_SLVERR",
- "ERR 26: ZLIBVHB_AXI_DECERR",
- "ERR 27: 0x1B Reserved",
- "ERR 27: ZLIB_UNEXPECTED_EOM",
- "ERR 27: ZLIB_EXTRA_DATA",
- "ERR 30: ZLIB_BTYPE",
- "ERR 31: ZLIB_UNDEFINED_SYMBOL",
- "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
- "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
- "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
- "ERR 35: ZLIB_UNCOMPRESSED_LEN",
- "ERR 36: ZLIB_LIMIT_REACHED",
- "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
- "ERR 38: ODMA0_AXI_SLVERR",
- "ERR 39: ODMA0_AXI_DECERR",
- "ERR 40: 0x28 Reserved",
- "ERR 41: ODMA1_AXI_SLVERR",
- "ERR 42: ODMA1_AXI_DECERR",
- "ERR 43: LSB_PARITY_ERR",
+ "ILLEGAL_ENGINE",
+ "ILLEGAL_KEY_ID",
+ "ILLEGAL_FUNCTION_TYPE",
+ "ILLEGAL_FUNCTION_MODE",
+ "ILLEGAL_FUNCTION_ENCRYPT",
+ "ILLEGAL_FUNCTION_SIZE",
+ "Zlib_MISSING_INIT_EOM",
+ "ILLEGAL_FUNCTION_RSVD",
+ "ILLEGAL_BUFFER_LENGTH",
+ "VLSB_FAULT",
+ "ILLEGAL_MEM_ADDR",
+ "ILLEGAL_MEM_SEL",
+ "ILLEGAL_CONTEXT_ID",
+ "ILLEGAL_KEY_ADDR",
+ "0xF Reserved",
+ "Zlib_ILLEGAL_MULTI_QUEUE",
+ "Zlib_ILLEGAL_JOBID_CHANGE",
+ "CMD_TIMEOUT",
+ "IDMA0_AXI_SLVERR",
+ "IDMA0_AXI_DECERR",
+ "0x15 Reserved",
+ "IDMA1_AXI_SLAVE_FAULT",
+ "IDMA1_AIXI_DECERR",
+ "0x18 Reserved",
+ "ZLIBVHB_AXI_SLVERR",
+ "ZLIBVHB_AXI_DECERR",
+ "0x1B Reserved",
+ "ZLIB_UNEXPECTED_EOM",
+ "ZLIB_EXTRA_DATA",
+ "ZLIB_BTYPE",
+ "ZLIB_UNDEFINED_SYMBOL",
+ "ZLIB_UNDEFINED_DISTANCE_S",
+ "ZLIB_CODE_LENGTH_SYMBOL",
+ "ZLIB _VHB_ILLEGAL_FETCH",
+ "ZLIB_UNCOMPRESSED_LEN",
+ "ZLIB_LIMIT_REACHED",
+ "ZLIB_CHECKSUM_MISMATCH0",
+ "ODMA0_AXI_SLVERR",
+ "ODMA0_AXI_DECERR",
+ "0x28 Reserved",
+ "ODMA1_AXI_SLVERR",
+ "ODMA1_AXI_DECERR",
};
-void ccp_log_error(struct ccp_device *d, int e)
+void ccp_log_error(struct ccp_device *d, unsigned int e)
{
- dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
+ if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
+ return;
+
+ if (e < ARRAY_SIZE(ccp_error_codes))
+ dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
+ else
+ dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
}
/* List of CCPs, CCP count, read-write access lock, and access functions
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 90523a069bff..5e624920fd99 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -629,7 +629,7 @@ struct ccp5_desc {
void ccp_add_device(struct ccp_device *ccp);
void ccp_del_device(struct ccp_device *ccp);
-extern void ccp_log_error(struct ccp_device *, int);
+extern void ccp_log_error(struct ccp_device *, unsigned int);
struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
bool ccp_queues_suspended(struct ccp_device *ccp);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index db8de89d990f..866b2e05ca77 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -2,7 +2,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -890,8 +890,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return -EINVAL;
if (((aes->mode == CCP_AES_MODE_ECB) ||
- (aes->mode == CCP_AES_MODE_CBC) ||
- (aes->mode == CCP_AES_MODE_CFB)) &&
+ (aes->mode == CCP_AES_MODE_CBC)) &&
(aes->src_len & (AES_BLOCK_SIZE - 1)))
return -EINVAL;
@@ -1264,6 +1263,9 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
int ret;
/* Error checks */
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
+ return -EINVAL;
+
if (!cmd_q->ccp->vdata->perform->des3)
return -EINVAL;
@@ -1346,8 +1348,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
* passthru option to convert from big endian to little endian.
*/
if (des3->mode != CCP_DES3_MODE_ECB) {
- u32 load_mode;
-
op.sb_ctx = cmd_q->sb_ctx;
ret = ccp_init_dm_workarea(&ctx, cmd_q,
@@ -1363,12 +1363,8 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (ret)
goto e_ctx;
- if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
- load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
- else
- load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
- load_mode);
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
@@ -1430,10 +1426,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
}
/* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
- if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
- dm_offset = CCP_SB_BYTES - des3->iv_len;
- else
- dm_offset = 0;
ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
DES3_EDE_BLOCK_SIZE);
}
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 86ac7b443355..980aa04b655b 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -48,6 +48,7 @@ struct cc_hw_data {
};
#define CC_NUM_IDRS 4
+#define CC_HW_RESET_LOOP_COUNT 10
/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
@@ -133,6 +134,9 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
u32 imr;
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
+ /* if driver suspended return, probebly shared interrupt */
+ if (cc_pm_is_dev_suspended(dev))
+ return IRQ_NONE;
/* read the interrupt status */
irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
@@ -188,6 +192,31 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata)
+{
+ unsigned int val;
+ unsigned int i;
+
+ /* 712/710/63 has no reset completion indication, always return true */
+ if (drvdata->hw_rev <= CC_HW_REV_712)
+ return true;
+
+ for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
+ /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
+ * completed and device is fully functional
+ */
+ val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE));
+ if (val & CC_NVM_IS_IDLE_MASK) {
+ /* hw indicate reset completed */
+ return true;
+ }
+ /* allow scheduling other process on the processor */
+ schedule();
+ }
+ /* reset not completed */
+ return false;
+}
+
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
{
unsigned int val, cache_params;
@@ -315,15 +344,6 @@ static int init_cc_resources(struct platform_device *plat_dev)
return new_drvdata->irq;
}
- rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
- IRQF_SHARED, "ccree", new_drvdata);
- if (rc) {
- dev_err(dev, "Could not register to interrupt %d\n",
- new_drvdata->irq);
- return rc;
- }
- dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
-
init_completion(&new_drvdata->hw_queue_avail);
if (!plat_dev->dev.dma_mask)
@@ -352,6 +372,11 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata->sec_disabled = cc_sec_disable;
+ /* wait for Crytpcell reset completion */
+ if (!cc_wait_for_reset_completion(new_drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
+ }
+
if (hw_rev->rev <= CC_HW_REV_712) {
/* Verify correct mapping */
val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
@@ -383,6 +408,24 @@ static int init_cc_resources(struct platform_device *plat_dev)
}
sig_cidr = val;
+ /* Check HW engine configuration */
+ val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS));
+ switch (val) {
+ case CC_PINS_FULL:
+ /* This is fine */
+ break;
+ case CC_PINS_SLIM:
+ if (new_drvdata->std_bodies & CC_STD_NIST) {
+ dev_warn(dev, "703 mode forced due to HW configuration.\n");
+ new_drvdata->std_bodies = CC_STD_OSCCA;
+ }
+ break;
+ default:
+ dev_err(dev, "Unsupported engines configration.\n");
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+
/* Check security disable state */
val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
val &= CC_SECURITY_DISABLED_MASK;
@@ -401,6 +444,15 @@ static int init_cc_resources(struct platform_device *plat_dev)
/* Display HW versions */
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
+ /* register the driver isr function */
+ rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
+ IRQF_SHARED, "ccree", new_drvdata);
+ if (rc) {
+ dev_err(dev, "Could not register to interrupt %d\n",
+ new_drvdata->irq);
+ goto post_clk_err;
+ }
+ dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index b76181335c08..7cd99380bf1f 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -53,6 +53,9 @@ enum cc_std_body {
#define CC_COHERENT_CACHE_PARAMS 0xEEE
+#define CC_PINS_FULL 0x0
+#define CC_PINS_SLIM 0x9F
+
/* Maximum DMA mask supported by IP */
#define DMA_BIT_MASK_LEN 48
@@ -67,6 +70,8 @@ enum cc_std_body {
#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
+#define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)
+
#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
@@ -216,6 +221,7 @@ static inline void dump_byte_array(const char *name, const u8 *the_array,
__dump_byte_array(name, the_array, size);
}
+bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata);
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
void fini_cc_regs(struct cc_drvdata *drvdata);
int cc_clk_on(struct cc_drvdata *drvdata);
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index d0764147573f..efe3e1d8b87b 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -114,6 +114,9 @@
#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_NVM_IS_IDLE_REG_OFFSET 0x0A10UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL
#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL
#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL
#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL
@@ -203,6 +206,23 @@
#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REG_OFFSET 0x0A7CUL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SHIFT 0x0UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SHIFT 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SHIFT 0x2UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SHIFT 0x3UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SHIFT 0x4UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SHIFT 0x5UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SHIFT 0x6UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SHIFT 0x7UL
+#define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SIZE 0x1UL
// --------------------------------------
// BLOCK: ID_REGISTERS
// --------------------------------------
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 2dad9c9543c6..899a52f05b7a 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -49,6 +49,11 @@ int cc_pm_resume(struct device *dev)
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
+ /* wait for Crytpcell reset completion */
+ if (!cc_wait_for_reset_completion(drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
+ return -EBUSY;
+ }
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
rc = init_cc_regs(drvdata, false);
@@ -101,6 +106,12 @@ int cc_pm_put_suspend(struct device *dev)
return rc;
}
+bool cc_pm_is_dev_suspended(struct device *dev)
+{
+ /* check device state using runtime api */
+ return pm_runtime_suspended(dev);
+}
+
int cc_pm_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 6190cdba5dad..a7d98a5da2e1 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -22,6 +22,7 @@ int cc_pm_suspend(struct device *dev);
int cc_pm_resume(struct device *dev);
int cc_pm_get(struct device *dev);
int cc_pm_put_suspend(struct device *dev);
+bool cc_pm_is_dev_suspended(struct device *dev);
#else
@@ -54,6 +55,12 @@ static inline int cc_pm_put_suspend(struct device *dev)
return 0;
}
+static inline bool cc_pm_is_dev_suspended(struct device *dev)
+{
+ /* if PM not supported device is never suspend */
+ return false;
+}
+
#endif
#endif /*__POWER_MGR_H__*/
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
index 2d2f186674ba..4d9063a8b10b 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.h
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2016-2017 Hisilicon Limited. */
#ifndef _SEC_DRV_H_
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 86c699c14f84..df43a2c6933b 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -398,6 +398,12 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Processing Engine configuration */
+ /* Token & context configuration */
+ val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
+ EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX |
+ EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX;
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
+
/* H/W capabilities selection */
val = EIP197_FUNCTION_RSVD;
val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
@@ -589,9 +595,9 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
if (rdesc->result_data.error_code & 0x407f) {
/* Fatal error (bits 0-7, 14) */
dev_err(priv->dev,
- "cipher: result: result descriptor error (%d)\n",
+ "cipher: result: result descriptor error (0x%x)\n",
rdesc->result_data.error_code);
- return -EIO;
+ return -EINVAL;
} else if (rdesc->result_data.error_code == BIT(9)) {
/* Authentication failed */
return -EBADMSG;
@@ -720,11 +726,10 @@ handle_results:
}
acknowledge:
- if (i) {
+ if (i)
writel(EIP197_xDR_PROC_xD_PKT(i) |
EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
- }
/* If the number of requests overflowed the counter, try to proceed more
* requests.
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 65624a81f0fd..e0c202f33674 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -118,6 +118,7 @@
#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n)))
#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n)))
#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
@@ -249,6 +250,11 @@
#define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0)
#define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1)
+/* EIP197_PE_EIP96_TOKEN_CTRL */
+#define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16)
+#define EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX BIT(19)
+#define EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX BIT(20)
+
/* EIP197_PE_EIP96_FUNCTION_EN */
#define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23))
#define EIP197_PROTOCOL_HASH_ONLY BIT(0)
@@ -333,6 +339,7 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_IV3 BIT(8)
#define CONTEXT_CONTROL_DIGEST_CNT BIT(9)
#define CONTEXT_CONTROL_COUNTER_MODE BIT(10)
+#define CONTEXT_CONTROL_CRYPTO_STORE BIT(12)
#define CONTEXT_CONTROL_HASH_STORE BIT(19)
/* The hash counter given to the engine in the context has a granularity of
@@ -425,6 +432,10 @@ struct safexcel_token {
#define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16)
+#define EIP197_TOKEN_CTX_OFFSET(x) (x)
+#define EIP197_TOKEN_DIRECTION_EXTERNAL BIT(11)
+#define EIP197_TOKEN_EXEC_IF_SUCCESSFUL (0x1 << 12)
+
#define EIP197_TOKEN_STAT_LAST_HASH BIT(0)
#define EIP197_TOKEN_STAT_LAST_PACKET BIT(1)
#define EIP197_TOKEN_OPCODE_DIRECTION 0x0
@@ -432,6 +443,7 @@ struct safexcel_token {
#define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT
#define EIP197_TOKEN_OPCODE_RETRIEVE 0x4
#define EIP197_TOKEN_OPCODE_VERIFY 0xd
+#define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe
#define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0)
static inline void eip197_noop_token(struct safexcel_token *token)
@@ -442,6 +454,8 @@ static inline void eip197_noop_token(struct safexcel_token *token)
/* Instructions */
#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c
+#define EIP197_TOKEN_INS_ORIGIN_IV0 0x14
+#define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5)
#define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5)
#define EIP197_TOKEN_INS_TYPE_HASH BIT(6)
#define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7)
@@ -468,6 +482,7 @@ struct safexcel_control_data_desc {
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
#define EIP197_OPTION_64BIT_CTX BIT(1)
+#define EIP197_OPTION_RC_AUTO (0x2 << 3)
#define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8)
#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
@@ -629,7 +644,7 @@ struct safexcel_ahash_export_state {
u32 digest;
u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
- u8 cache[SHA512_BLOCK_SIZE];
+ u8 cache[SHA512_BLOCK_SIZE << 1];
};
/*
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index de4be10b172f..8cdbdbe35681 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {
struct safexcel_cipher_req {
enum safexcel_cipher_direction direction;
+ /* Number of result descriptors associated to the request */
+ unsigned int rdescs;
bool needs_inv;
};
@@ -59,27 +61,26 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
u32 length)
{
struct safexcel_token *token;
- unsigned offset = 0;
+ u32 offset = 0, block_sz = 0;
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
switch (ctx->alg) {
case SAFEXCEL_DES:
- offset = DES_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
+ block_sz = DES_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
break;
case SAFEXCEL_3DES:
- offset = DES3_EDE_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
+ block_sz = DES3_EDE_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
break;
-
case SAFEXCEL_AES:
- offset = AES_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+ block_sz = AES_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
break;
}
+
+ offset = block_sz / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, block_sz);
}
token = (struct safexcel_token *)(cdesc->control_data.token + offset);
@@ -91,6 +92,25 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
token[0].instructions = EIP197_TOKEN_INS_LAST |
EIP197_TOKEN_INS_TYPE_CRYTO |
EIP197_TOKEN_INS_TYPE_OUTPUT;
+
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
+ u32 last = (EIP197_MAX_TOKENS - 1) - offset;
+
+ token[last].opcode = EIP197_TOKEN_OPCODE_CTX_ACCESS;
+ token[last].packet_length = EIP197_TOKEN_DIRECTION_EXTERNAL |
+ EIP197_TOKEN_EXEC_IF_SUCCESSFUL|
+ EIP197_TOKEN_CTX_OFFSET(0x2);
+ token[last].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ token[last].instructions =
+ EIP197_TOKEN_INS_ORIGIN_LEN(block_sz / sizeof(u32)) |
+ EIP197_TOKEN_INS_ORIGIN_IV0;
+
+ /* Store the updated IV values back in the internal context
+ * registers.
+ */
+ cdesc->control_data.control1 |= CONTEXT_CONTROL_CRYPTO_STORE;
+ }
}
static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
@@ -333,7 +353,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = 0;
- do {
+ if (unlikely(!sreq->rdescs))
+ return 0;
+
+ while (sreq->rdescs--) {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
@@ -346,21 +369,15 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
- } while (!rdesc->last_seg);
+ }
safexcel_complete(priv, ring);
if (src == dst) {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, cryptlen),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, cryptlen),
- DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst,
- sg_nents_for_len(dst, cryptlen),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE);
}
*should_complete = true;
@@ -385,26 +402,21 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
int i, ret = 0;
if (src == dst) {
- nr_src = dma_map_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
+ nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
DMA_BIDIRECTIONAL);
nr_dst = nr_src;
if (!nr_src)
return -EINVAL;
} else {
- nr_src = dma_map_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
+ nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
DMA_TO_DEVICE);
if (!nr_src)
return -EINVAL;
- nr_dst = dma_map_sg(priv->dev, dst,
- sg_nents_for_len(dst, totlen),
+ nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst),
DMA_FROM_DEVICE);
if (!nr_dst) {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
- DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
return -EINVAL;
}
}
@@ -454,7 +466,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
/* result descriptors */
for_each_sg(dst, sg, nr_dst, i) {
- bool first = !i, last = (i == nr_dst - 1);
+ bool first = !i, last = sg_is_last(sg);
u32 len = sg_dma_len(sg);
rdesc = safexcel_add_rdesc(priv, ring, first, last,
@@ -483,16 +495,10 @@ cdesc_rollback:
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
if (src == dst) {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
- DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst,
- sg_nents_for_len(dst, totlen),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE);
}
return ret;
@@ -501,6 +507,7 @@ cdesc_rollback:
static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
int ring,
struct crypto_async_request *base,
+ struct safexcel_cipher_req *sreq,
bool *should_complete, int *ret)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
@@ -509,7 +516,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0;
- do {
+ if (unlikely(!sreq->rdescs))
+ return 0;
+
+ while (sreq->rdescs--) {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
@@ -522,7 +532,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
- } while (!rdesc->last_seg);
+ }
safexcel_complete(priv, ring);
@@ -560,16 +570,35 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(async->tfm);
int err;
if (sreq->needs_inv) {
sreq->needs_inv = false;
- err = safexcel_handle_inv_result(priv, ring, async,
+ err = safexcel_handle_inv_result(priv, ring, async, sreq,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async, req->src,
req->dst, req->cryptlen, sreq,
should_complete, ret);
+
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
+ u32 block_sz = 0;
+
+ switch (ctx->alg) {
+ case SAFEXCEL_DES:
+ block_sz = DES_BLOCK_SIZE;
+ break;
+ case SAFEXCEL_3DES:
+ block_sz = DES3_EDE_BLOCK_SIZE;
+ break;
+ case SAFEXCEL_AES:
+ block_sz = AES_BLOCK_SIZE;
+ break;
+ }
+
+ memcpy(req->iv, ctx->base.ctxr->data, block_sz);
+ }
}
return err;
@@ -587,7 +616,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
if (sreq->needs_inv) {
sreq->needs_inv = false;
- err = safexcel_handle_inv_result(priv, ring, async,
+ err = safexcel_handle_inv_result(priv, ring, async, sreq,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async, req->src,
@@ -633,6 +662,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, req->iv,
commands, results);
+
+ sreq->rdescs = *results;
return ret;
}
@@ -655,6 +686,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
req->cryptlen, req->assoclen,
crypto_aead_authsize(tfm), req->iv,
commands, results);
+ sreq->rdescs = *results;
return ret;
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index ac9282c1a5ec..a80a5e757b1f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -41,19 +41,21 @@ struct safexcel_ahash_req {
u64 len[2];
u64 processed[2];
- u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
dma_addr_t cache_dma;
unsigned int cache_sz;
- u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
};
static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
{
- if (req->len[1] > req->processed[1])
- return 0xffffffff - (req->len[0] - req->processed[0]);
+ u64 len, processed;
- return req->len[0] - req->processed[0];
+ len = (0xffffffff * req->len[1]) + req->len[0];
+ processed = (0xffffffff * req->processed[1]) + req->processed[0];
+
+ return len - processed;
}
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
@@ -87,6 +89,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
cdesc->control_data.control0 |= ctx->alg;
cdesc->control_data.control0 |= req->digest;
+ if (!req->finish)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
+
if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
if (req->processed[0] || req->processed[1]) {
if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
@@ -105,9 +110,6 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
}
- if (!req->finish)
- cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
-
/*
* Copy the input digest if needed, and setup the context
* fields. Do this now as we need it to setup the first command
@@ -183,6 +185,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
DMA_TO_DEVICE);
sreq->cache_dma = 0;
+ sreq->cache_sz = 0;
}
if (sreq->finish)
@@ -209,11 +212,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, extra, n_cdesc = 0, ret = 0;
- u64 queued, len, cache_len;
+ int i, extra = 0, n_cdesc = 0, ret = 0;
+ u64 queued, len, cache_len, cache_max;
+
+ cache_max = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_max <<= 1;
queued = len = safexcel_queued_len(req);
- if (queued <= crypto_ahash_blocksize(ahash))
+ if (queued <= cache_max)
cache_len = queued;
else
cache_len = queued - areq->nbytes;
@@ -223,26 +230,23 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
* fit into full blocks, cache it for the next send() call.
*/
extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC &&
+ extra < crypto_ahash_blocksize(ahash))
+ extra += crypto_ahash_blocksize(ahash);
+
+ /* If this is not the last request and the queued data
+ * is a multiple of a block, cache the last one for now.
+ */
if (!extra)
- /* If this is not the last request and the queued data
- * is a multiple of a block, cache the last one for now.
- */
extra = crypto_ahash_blocksize(ahash);
- if (extra) {
- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
- req->cache_next, extra,
- areq->nbytes - extra);
-
- queued -= extra;
- len -= extra;
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req->cache_next, extra,
+ areq->nbytes - extra);
- if (!queued) {
- *commands = 0;
- *results = 0;
- return 0;
- }
- }
+ queued -= extra;
+ len -= extra;
}
/* Add a command descriptor for the cached data, if any */
@@ -269,8 +273,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
}
/* Now handle the current ahash request buffer(s) */
- req->nents = dma_map_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes),
+ req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
if (!req->nents) {
ret = -ENOMEM;
@@ -345,6 +348,7 @@ unmap_cache:
if (req->cache_dma) {
dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
DMA_TO_DEVICE);
+ req->cache_dma = 0;
req->cache_sz = 0;
}
@@ -486,7 +490,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(req, 0, sizeof(struct ahash_request));
+ memset(req, 0, EIP197_AHASH_REQ_SIZE);
/* create invalidation request */
init_completion(&result.completion);
@@ -519,10 +523,9 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
/* safexcel_ahash_cache: cache data until at least one request can be sent to
* the engine, aka. when there is at least 1 block size in the pipe.
*/
-static int safexcel_ahash_cache(struct ahash_request *areq)
+static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
u64 queued, cache_len;
/* queued: everything accepted by the driver which will be handled by
@@ -539,7 +542,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
* In case there isn't enough bytes to proceed (less than a
* block size), cache the data until we have enough.
*/
- if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
+ if (cache_len + areq->nbytes <= cache_max) {
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
req->cache + cache_len,
areq->nbytes, 0);
@@ -599,6 +602,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+ u32 cache_max;
/* If the request is 0 length, do nothing */
if (!areq->nbytes)
@@ -608,7 +612,11 @@ static int safexcel_ahash_update(struct ahash_request *areq)
if (req->len[0] < areq->nbytes)
req->len[1]++;
- safexcel_ahash_cache(areq);
+ cache_max = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_max <<= 1;
+
+ safexcel_ahash_cache(areq, cache_max);
/*
* We're not doing partial updates when performing an hmac request.
@@ -621,7 +629,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
return safexcel_ahash_enqueue(areq);
if (!req->last_req &&
- safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
+ safexcel_queued_len(req) > cache_max)
return safexcel_ahash_enqueue(areq);
return 0;
@@ -678,6 +686,11 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct safexcel_ahash_export_state *export = out;
+ u32 cache_sz;
+
+ cache_sz = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_sz <<= 1;
export->len[0] = req->len[0];
export->len[1] = req->len[1];
@@ -687,7 +700,7 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
export->digest = req->digest;
memcpy(export->state, req->state, req->state_sz);
- memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
+ memcpy(export->cache, req->cache, cache_sz);
return 0;
}
@@ -697,12 +710,17 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
const struct safexcel_ahash_export_state *export = in;
+ u32 cache_sz;
int ret;
ret = crypto_ahash_init(areq);
if (ret)
return ret;
+ cache_sz = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_sz <<= 1;
+
req->len[0] = export->len[0];
req->len[1] = export->len[1];
req->processed[0] = export->processed[0];
@@ -710,7 +728,7 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
req->digest = export->digest;
- memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
+ memcpy(req->cache, export->cache, cache_sz);
memcpy(req->state, export->state, req->state_sz);
return 0;
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index eb75fa684876..142bc3f5c45c 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -145,6 +145,9 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
(lower_32_bits(context) & GENMASK(31, 2)) >> 2;
cdesc->control_data.context_hi = upper_32_bits(context);
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
+
/* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
cdesc->control_data.refresh = 2;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index e5cf3a59c420..acedafe3fa98 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -100,7 +100,7 @@ struct buffer_desc {
u16 pkt_len;
u16 buf_len;
#endif
- u32 phys_addr;
+ dma_addr_t phys_addr;
u32 __reserved[4];
struct buffer_desc *next;
enum dma_data_direction dir;
@@ -117,9 +117,9 @@ struct crypt_ctl {
u8 mode; /* NPE_OP_* operation mode */
#endif
u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
- u32 icv_rev_aes; /* icv or rev aes */
- u32 src_buf;
- u32 dst_buf;
+ dma_addr_t icv_rev_aes; /* icv or rev aes */
+ dma_addr_t src_buf;
+ dma_addr_t dst_buf;
#ifdef __ARMEB__
u16 auth_offs; /* Authentication start offset */
u16 auth_len; /* Authentication data length */
@@ -320,7 +320,8 @@ static struct crypt_ctl *get_crypt_desc_emerg(void)
}
}
-static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
+static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
+ dma_addr_t phys)
{
while (buf) {
struct buffer_desc *buf1;
@@ -602,7 +603,7 @@ static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
struct buffer_desc *buf;
int i;
u8 *pad;
- u32 pad_phys, buf_phys;
+ dma_addr_t pad_phys, buf_phys;
BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
@@ -787,7 +788,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
for (; nbytes > 0; sg = sg_next(sg)) {
unsigned len = min(nbytes, sg->length);
struct buffer_desc *next_buf;
- u32 next_buf_phys;
+ dma_addr_t next_buf_phys;
void *ptr;
nbytes -= len;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index bdc4c42d3ac8..f1fa637cb029 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -986,8 +986,6 @@ static int mxs_dcp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct dcp *sdcp = NULL;
int i, ret;
-
- struct resource *iores;
int dcp_vmi_irq, dcp_irq;
if (global_sdcp) {
@@ -995,7 +993,6 @@ static int mxs_dcp_probe(struct platform_device *pdev)
return -ENODEV;
}
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dcp_vmi_irq = platform_get_irq(pdev, 0);
if (dcp_vmi_irq < 0) {
dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
@@ -1013,7 +1010,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
return -ENOMEM;
sdcp->dev = dev;
- sdcp->base = devm_ioremap_resource(dev, iores);
+ sdcp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdcp->base))
return PTR_ERR(sdcp->base);
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 4acbc47973e9..e78ff5c65ed6 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -27,8 +27,6 @@ MODULE_ALIAS_CRYPTO("842-nx");
#define WORKMEM_ALIGN (CRB_ALIGN)
#define CSB_WAIT_MAX (5000) /* ms */
#define VAS_RETRIES (10)
-/* # of requests allowed per RxFIFO at a time. 0 for unlimited */
-#define MAX_CREDITS_PER_RXFIFO (1024)
struct nx842_workmem {
/* Below fields must be properly aligned */
@@ -812,7 +810,11 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
rxattr.lnotify_lpid = lpid;
rxattr.lnotify_pid = pid;
rxattr.lnotify_tid = tid;
- rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO;
+ /*
+ * Maximum RX window credits can not be more than #CRBs in
+ * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
+ */
+ rxattr.wcreds_max = fifo_size / CRB_SIZE;
/*
* Open a VAS receice window which is used to configure RxFIFO
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index 5c4aa606208c..2de5e3672e42 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -856,7 +856,7 @@ static ssize_t nx842_##_name##_show(struct device *dev, \
rcu_read_lock(); \
local_devdata = rcu_dereference(devdata); \
if (local_devdata) \
- p = snprintf(buf, PAGE_SIZE, "%ld\n", \
+ p = snprintf(buf, PAGE_SIZE, "%lld\n", \
atomic64_read(&local_devdata->counters->_name)); \
rcu_read_unlock(); \
return p; \
@@ -909,7 +909,7 @@ static ssize_t nx842_timehist_show(struct device *dev,
}
for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
- bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n",
+ bytes = snprintf(p, bytes_remain, "%u-%uus:\t%lld\n",
i ? (2<<(i-1)) : 0, (2<<i)-1,
atomic64_read(&times[i]));
bytes_remain -= bytes;
@@ -917,7 +917,7 @@ static ssize_t nx842_timehist_show(struct device *dev,
}
/* The last bucket holds everything over
* 2<<(NX842_HIST_SLOTS - 2) us */
- bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n",
+ bytes = snprintf(p, bytes_remain, "%uus - :\t%lld\n",
2<<(NX842_HIST_SLOTS - 2),
atomic64_read(&times[(NX842_HIST_SLOTS - 1)]));
p += bytes;
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 428c273a1ab6..28817880c76d 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -569,9 +569,7 @@ static int nx_register_algs(void)
memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
- rc = NX_DEBUGFS_INIT(&nx_driver);
- if (rc)
- goto out;
+ NX_DEBUGFS_INIT(&nx_driver);
nx_driver.of.status = NX_OKAY;
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index c3e54af18645..c6b5a3be02be 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -76,20 +76,12 @@ struct nx_stats {
atomic_t last_error_pid;
};
-struct nx_debugfs {
- struct dentry *dfs_root;
- struct dentry *dfs_aes_ops, *dfs_aes_bytes;
- struct dentry *dfs_sha256_ops, *dfs_sha256_bytes;
- struct dentry *dfs_sha512_ops, *dfs_sha512_bytes;
- struct dentry *dfs_errors, *dfs_last_error, *dfs_last_error_pid;
-};
-
struct nx_crypto_driver {
struct nx_stats stats;
struct nx_of of;
struct vio_dev *viodev;
struct vio_driver viodriver;
- struct nx_debugfs dfs;
+ struct dentry *dfs_root;
};
#define NX_GCM4106_NONCE_LEN (4)
@@ -177,7 +169,7 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv)
#define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv)
-int nx_debugfs_init(struct nx_crypto_driver *);
+void nx_debugfs_init(struct nx_crypto_driver *);
void nx_debugfs_fini(struct nx_crypto_driver *);
#else
#define NX_DEBUGFS_INIT(drv) (0)
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
index 03e4f0363c6a..e0d44a5512ab 100644
--- a/drivers/crypto/nx/nx_debugfs.c
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -30,62 +30,37 @@
* Documentation/ABI/testing/debugfs-pfo-nx-crypto
*/
-int nx_debugfs_init(struct nx_crypto_driver *drv)
+void nx_debugfs_init(struct nx_crypto_driver *drv)
{
- struct nx_debugfs *dfs = &drv->dfs;
+ struct dentry *root;
- dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL);
+ root = debugfs_create_dir(NX_NAME, NULL);
+ drv->dfs_root = root;
- dfs->dfs_aes_ops =
- debugfs_create_u32("aes_ops",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root, (u32 *)&drv->stats.aes_ops);
- dfs->dfs_sha256_ops =
- debugfs_create_u32("sha256_ops",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u32 *)&drv->stats.sha256_ops);
- dfs->dfs_sha512_ops =
- debugfs_create_u32("sha512_ops",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u32 *)&drv->stats.sha512_ops);
- dfs->dfs_aes_bytes =
- debugfs_create_u64("aes_bytes",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u64 *)&drv->stats.aes_bytes);
- dfs->dfs_sha256_bytes =
- debugfs_create_u64("sha256_bytes",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u64 *)&drv->stats.sha256_bytes);
- dfs->dfs_sha512_bytes =
- debugfs_create_u64("sha512_bytes",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u64 *)&drv->stats.sha512_bytes);
- dfs->dfs_errors =
- debugfs_create_u32("errors",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root, (u32 *)&drv->stats.errors);
- dfs->dfs_last_error =
- debugfs_create_u32("last_error",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u32 *)&drv->stats.last_error);
- dfs->dfs_last_error_pid =
- debugfs_create_u32("last_error_pid",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u32 *)&drv->stats.last_error_pid);
- return 0;
+ debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u32 *)&drv->stats.aes_ops);
+ debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u32 *)&drv->stats.sha256_ops);
+ debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u32 *)&drv->stats.sha512_ops);
+ debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u64 *)&drv->stats.aes_bytes);
+ debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u64 *)&drv->stats.sha256_bytes);
+ debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u64 *)&drv->stats.sha512_bytes);
+ debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u32 *)&drv->stats.errors);
+ debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u32 *)&drv->stats.last_error);
+ debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u32 *)&drv->stats.last_error_pid);
}
void
nx_debugfs_fini(struct nx_crypto_driver *drv)
{
- debugfs_remove_recursive(drv->dfs.dfs_root);
+ debugfs_remove_recursive(drv->dfs_root);
}
#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index c8d401646902..b50eb55f8f57 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -131,7 +131,6 @@ struct qat_alg_ablkcipher_ctx {
struct icp_qat_fw_la_bulk_req dec_fw_req;
struct qat_crypto_instance *inst;
struct crypto_tfm *tfm;
- spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
};
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
@@ -223,6 +222,9 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
return -EFAULT;
offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
+ if (offset < 0)
+ return -EFAULT;
+
hash_state_out = (__be32 *)(hash->sha.state1 + offset);
hash512_state_out = (__be64 *)hash_state_out;
@@ -253,7 +255,24 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
return 0;
}
-static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
+{
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_UPDATE_STATE);
+}
+
+static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
+{
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_UPDATE_STATE);
+}
+
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ int aead)
{
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -263,12 +282,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
QAT_COMN_PTR_TYPE_SGL);
ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_PARTIAL_NONE);
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+ if (aead)
+ qat_alg_init_hdr_no_iv_updt(header);
+ else
+ qat_alg_init_hdr_iv_updt(header);
ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_PROTO);
- ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
@@ -303,7 +322,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
return -EFAULT;
/* Request setup */
- qat_alg_init_common_hdr(header);
+ qat_alg_init_common_hdr(header, 1);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
@@ -390,7 +409,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
return -EFAULT;
/* Request setup */
- qat_alg_init_common_hdr(header);
+ qat_alg_init_common_hdr(header, 1);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
@@ -454,7 +473,7 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
memcpy(cd->aes.key, key, keylen);
- qat_alg_init_common_hdr(header);
+ qat_alg_init_common_hdr(header, 0);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
cd_pars->u.s.content_desc_params_sz =
sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
@@ -576,45 +595,52 @@ bad_key:
return -EINVAL;
}
-static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
+static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
+ unsigned int keylen)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+
+ return qat_alg_aead_init_sessions(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE);
+}
+
+static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct qat_crypto_instance *inst = NULL;
+ int node = get_current_node();
struct device *dev;
+ int ret;
- if (ctx->enc_cd) {
- /* rekeying */
- dev = &GET_DEV(ctx->inst->accel_dev);
- memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
- memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
- memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
- memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
- } else {
- /* new key */
- int node = get_current_node();
- struct qat_crypto_instance *inst =
- qat_crypto_get_instance_node(node);
- if (!inst) {
- return -EINVAL;
- }
-
- dev = &GET_DEV(inst->accel_dev);
- ctx->inst = inst;
- ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
- &ctx->enc_cd_paddr,
- GFP_ATOMIC);
- if (!ctx->enc_cd) {
- return -ENOMEM;
- }
- ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
- &ctx->dec_cd_paddr,
- GFP_ATOMIC);
- if (!ctx->dec_cd) {
- goto out_free_enc;
- }
+ inst = qat_crypto_get_instance_node(node);
+ if (!inst)
+ return -EINVAL;
+ dev = &GET_DEV(inst->accel_dev);
+ ctx->inst = inst;
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->enc_cd) {
+ ret = -ENOMEM;
+ goto out_free_inst;
+ }
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->dec_cd) {
+ ret = -ENOMEM;
+ goto out_free_enc;
}
- if (qat_alg_aead_init_sessions(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_CBC_MODE))
+
+ ret = qat_alg_aead_init_sessions(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE);
+ if (ret)
goto out_free_all;
return 0;
@@ -629,7 +655,21 @@ out_free_enc:
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
- return -ENOMEM;
+out_free_inst:
+ ctx->inst = NULL;
+ qat_crypto_put_instance(inst);
+ return ret;
+}
+
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
+ unsigned int keylen)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (ctx->enc_cd)
+ return qat_alg_aead_rekey(tfm, key, keylen);
+ else
+ return qat_alg_aead_newkey(tfm, key, keylen);
}
static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
@@ -677,8 +717,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
dma_addr_t blp;
dma_addr_t bloutp = 0;
struct scatterlist *sg;
- size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
- ((1 + n) * sizeof(struct qat_alg_buf));
+ size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
if (unlikely(!n))
return -EINVAL;
@@ -715,8 +754,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf *bufers;
n = sg_nents(sglout);
- sz_out = sizeof(struct qat_alg_buf_list) +
- ((1 + n) * sizeof(struct qat_alg_buf));
+ sz_out = struct_size(buflout, bufers, n + 1);
sg_nctr = 0;
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
@@ -801,11 +839,17 @@ static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_crypto_instance *inst = ctx->inst;
struct ablkcipher_request *areq = qat_req->ablkcipher_req;
uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
qat_alg_free_bufl(inst, qat_req);
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EINVAL;
+
+ memcpy(areq->info, qat_req->iv, AES_BLOCK_SIZE);
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
+ qat_req->iv_paddr);
+
areq->base.complete(&areq->base, res);
}
@@ -905,50 +949,49 @@ static int qat_alg_aead_enc(struct aead_request *areq)
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ int mode)
+{
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+
+ return qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
+}
+
+static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
const u8 *key, unsigned int keylen,
int mode)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct qat_crypto_instance *inst = NULL;
struct device *dev;
+ int node = get_current_node();
+ int ret;
- spin_lock(&ctx->lock);
- if (ctx->enc_cd) {
- /* rekeying */
- dev = &GET_DEV(ctx->inst->accel_dev);
- memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
- memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
- memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
- memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
- } else {
- /* new key */
- int node = get_current_node();
- struct qat_crypto_instance *inst =
- qat_crypto_get_instance_node(node);
- if (!inst) {
- spin_unlock(&ctx->lock);
- return -EINVAL;
- }
-
- dev = &GET_DEV(inst->accel_dev);
- ctx->inst = inst;
- ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
- &ctx->enc_cd_paddr,
- GFP_ATOMIC);
- if (!ctx->enc_cd) {
- spin_unlock(&ctx->lock);
- return -ENOMEM;
- }
- ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
- &ctx->dec_cd_paddr,
- GFP_ATOMIC);
- if (!ctx->dec_cd) {
- spin_unlock(&ctx->lock);
- goto out_free_enc;
- }
+ inst = qat_crypto_get_instance_node(node);
+ if (!inst)
+ return -EINVAL;
+ dev = &GET_DEV(inst->accel_dev);
+ ctx->inst = inst;
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->enc_cd) {
+ ret = -ENOMEM;
+ goto out_free_instance;
+ }
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->dec_cd) {
+ ret = -ENOMEM;
+ goto out_free_enc;
}
- spin_unlock(&ctx->lock);
- if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
+
+ ret = qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
+ if (ret)
goto out_free_all;
return 0;
@@ -963,7 +1006,22 @@ out_free_enc:
dma_free_coherent(dev, sizeof(*ctx->enc_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
- return -ENOMEM;
+out_free_instance:
+ ctx->inst = NULL;
+ qat_crypto_put_instance(inst);
+ return ret;
+}
+
+static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+ const u8 *key, unsigned int keylen,
+ int mode)
+{
+ struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ if (ctx->enc_cd)
+ return qat_alg_ablkcipher_rekey(ctx, key, keylen, mode);
+ else
+ return qat_alg_ablkcipher_newkey(ctx, key, keylen, mode);
}
static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
@@ -995,11 +1053,23 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
+ if (req->nbytes == 0)
+ return 0;
+
+ qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
+ &qat_req->iv_paddr, GFP_ATOMIC);
+ if (!qat_req->iv)
+ return -ENOMEM;
+
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
+ qat_req->iv_paddr);
return ret;
+ }
msg = &qat_req->req;
*msg = ctx->enc_fw_req;
@@ -1012,18 +1082,29 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = req->nbytes;
cipher_param->cipher_offset = 0;
- memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+ cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
+ memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
qat_alg_free_bufl(ctx->inst, qat_req);
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
+ qat_req->iv_paddr);
return -EBUSY;
}
return -EINPROGRESS;
}
+static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req)
+{
+ if (req->nbytes % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+ return qat_alg_ablkcipher_encrypt(req);
+}
+
static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
@@ -1032,11 +1113,23 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
+ if (req->nbytes == 0)
+ return 0;
+
+ qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
+ &qat_req->iv_paddr, GFP_ATOMIC);
+ if (!qat_req->iv)
+ return -ENOMEM;
+
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
+ qat_req->iv_paddr);
return ret;
+ }
msg = &qat_req->req;
*msg = ctx->dec_fw_req;
@@ -1049,18 +1142,28 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = req->nbytes;
cipher_param->cipher_offset = 0;
- memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+ cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
+ memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
qat_alg_free_bufl(ctx->inst, qat_req);
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
+ qat_req->iv_paddr);
return -EBUSY;
}
return -EINPROGRESS;
}
+static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req)
+{
+ if (req->nbytes % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+ return qat_alg_ablkcipher_decrypt(req);
+}
static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
const char *hash_name)
@@ -1119,7 +1222,6 @@ static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
{
struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- spin_lock_init(&ctx->lock);
tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
ctx->tfm = tfm;
return 0;
@@ -1221,8 +1323,8 @@ static struct crypto_alg qat_algs[] = { {
.cra_u = {
.ablkcipher = {
.setkey = qat_alg_ablkcipher_cbc_setkey,
- .decrypt = qat_alg_ablkcipher_decrypt,
- .encrypt = qat_alg_ablkcipher_encrypt,
+ .decrypt = qat_alg_ablkcipher_blk_decrypt,
+ .encrypt = qat_alg_ablkcipher_blk_encrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -1233,7 +1335,7 @@ static struct crypto_alg qat_algs[] = { {
.cra_driver_name = "qat_aes_ctr",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
@@ -1265,8 +1367,8 @@ static struct crypto_alg qat_algs[] = { {
.cra_u = {
.ablkcipher = {
.setkey = qat_alg_ablkcipher_xts_setkey,
- .decrypt = qat_alg_ablkcipher_decrypt,
- .encrypt = qat_alg_ablkcipher_encrypt,
+ .decrypt = qat_alg_ablkcipher_blk_decrypt,
+ .encrypt = qat_alg_ablkcipher_blk_encrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index dc0273fe3620..c77a80020cde 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -88,6 +88,8 @@ struct qat_crypto_request {
struct qat_crypto_request_buffs buf;
void (*cb)(struct icp_qat_fw_la_resp *resp,
struct qat_crypto_request *req);
+ void *iv;
+ dma_addr_t iv_paddr;
};
#endif
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 6b498a90181e..b0b8e3d48aef 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1384,7 +1384,6 @@ MODULE_DEVICE_TABLE(of, sahara_dt_ids);
static int sahara_probe(struct platform_device *pdev)
{
struct sahara_dev *dev;
- struct resource *res;
u32 version;
int irq;
int err;
@@ -1398,8 +1397,7 @@ static int sahara_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
/* Get the base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs_base))
return PTR_ERR(dev->regs_base);
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index ce77e38c77e0..518e0e0b11a9 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o
obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 440c9f1bd006..440c9f1bd006 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 29519d1c403f..23061f2bc74b 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -349,7 +349,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
return -ETIMEDOUT;
if ((hdev->flags & HASH_FLAGS_HMAC) &&
- (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
+ (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
hdev->flags |= HASH_FLAGS_HMAC_KEY;
stm32_hash_write_key(hdev);
if (stm32_hash_wait_busy(hdev))
@@ -447,8 +447,8 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
dma_async_issue_pending(hdev->dma_lch);
- if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
- msecs_to_jiffies(100)))
+ if (!wait_for_completion_timeout(&hdev->dma_completion,
+ msecs_to_jiffies(100)))
err = -ETIMEDOUT;
if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 7b0c42882830..4ab14d58e85b 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -12,7 +12,7 @@
*/
#include "sun4i-ss.h"
-static int sun4i_ss_opti_poll(struct skcipher_request *areq)
+static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
@@ -114,6 +114,29 @@ release_ss:
return err;
}
+
+static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+ int err;
+
+ skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+ skcipher_request_set_callback(subreq, areq->base.flags, NULL,
+ NULL);
+ skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (ctx->mode & SS_DECRYPTION)
+ err = crypto_skcipher_decrypt(subreq);
+ else
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+
+ return err;
+}
+
/* Generic function that support SG with size not multiple of 4 */
static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
{
@@ -140,8 +163,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
unsigned int todo;
struct sg_mapping_iter mi, mo;
unsigned int oi, oo; /* offset for in and out */
- char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
- char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
unsigned int ob = 0; /* offset in buf */
unsigned int obo = 0; /* offset in bufo*/
unsigned int obl = 0; /* length of data in bufo */
@@ -178,20 +199,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
if (no_chunk == 1 && !need_fallback)
return sun4i_ss_opti_poll(areq);
- if (need_fallback) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
- skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
- skcipher_request_set_callback(subreq, areq->base.flags, NULL,
- NULL);
- skcipher_request_set_crypt(subreq, areq->src, areq->dst,
- areq->cryptlen, areq->iv);
- if (ctx->mode & SS_DECRYPTION)
- err = crypto_skcipher_decrypt(subreq);
- else
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
- }
+ if (need_fallback)
+ return sun4i_ss_cipher_poll_fallback(areq);
spin_lock_irqsave(&ss->slock, flags);
@@ -224,6 +233,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
while (oleft) {
if (ileft) {
+ char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
+
/*
* todo is the number of consecutive 4byte word that we
* can read from current SG
@@ -281,6 +292,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
oo = 0;
}
} else {
+ char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+
/*
* read obl bytes in bufo, we read at maximum for
* emptying the device
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index fbc7bf9d7380..c9d686a0e805 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -265,11 +265,11 @@ static int init_device(struct device *dev)
* callback must check err and feedback in descriptor header
* for device processing status.
*/
-int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
- void (*callback)(struct device *dev,
- struct talitos_desc *desc,
- void *context, int error),
- void *context)
+static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
+ void (*callback)(struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error),
+ void *context)
{
struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_request *request;
@@ -319,7 +319,21 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
return -EINPROGRESS;
}
-EXPORT_SYMBOL(talitos_submit);
+
+static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
+{
+ struct talitos_edesc *edesc;
+
+ if (!is_sec1)
+ return request->desc->hdr;
+
+ if (!request->desc->next_desc)
+ return request->desc->hdr1;
+
+ edesc = container_of(request->desc, struct talitos_edesc, desc);
+
+ return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
+}
/*
* process what was done, notify callback of error if not
@@ -342,12 +356,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
/* descriptors with their done bits set don't get the error */
rmb();
- if (!is_sec1)
- hdr = request->desc->hdr;
- else if (request->desc->next_desc)
- hdr = (request->desc + 1)->hdr1;
- else
- hdr = request->desc->hdr1;
+ hdr = get_request_hdr(request, is_sec1);
if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
status = 0;
@@ -477,8 +486,14 @@ static u32 current_desc_hdr(struct device *dev, int ch)
}
}
- if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
- return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
+ if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
+ struct talitos_edesc *edesc;
+
+ edesc = container_of(priv->chan[ch].fifo[iter].desc,
+ struct talitos_edesc, desc);
+ return ((struct talitos_desc *)
+ (edesc->buf + edesc->dma_len))->hdr;
+ }
return priv->chan[ch].fifo[iter].desc->hdr;
}
@@ -824,7 +839,11 @@ static void talitos_unregister_rng(struct device *dev)
* HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
*/
#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
+#ifdef CONFIG_CRYPTO_DEV_TALITOS2
#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
+#else
+#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
+#endif
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
struct talitos_ctx {
@@ -948,36 +967,6 @@ badkey:
goto out;
}
-/*
- * talitos_edesc - s/w-extended descriptor
- * @src_nents: number of segments in input scatterlist
- * @dst_nents: number of segments in output scatterlist
- * @icv_ool: whether ICV is out-of-line
- * @iv_dma: dma address of iv for checking continuity and link table
- * @dma_len: length of dma mapped link_tbl space
- * @dma_link_tbl: bus physical address of link_tbl/buf
- * @desc: h/w descriptor
- * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
- * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
- *
- * if decrypting (with authcheck), or either one of src_nents or dst_nents
- * is greater than 1, an integrity check value is concatenated to the end
- * of link_tbl data
- */
-struct talitos_edesc {
- int src_nents;
- int dst_nents;
- bool icv_ool;
- dma_addr_t iv_dma;
- int dma_len;
- dma_addr_t dma_link_tbl;
- struct talitos_desc desc;
- union {
- struct talitos_ptr link_tbl[0];
- u8 buf[0];
- };
-};
-
static void talitos_sg_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct scatterlist *src,
@@ -1008,11 +997,13 @@ static void talitos_sg_unmap(struct device *dev,
static void ipsec_esp_unmap(struct device *dev,
struct talitos_edesc *edesc,
- struct aead_request *areq)
+ struct aead_request *areq, bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int authsize = crypto_aead_authsize(aead);
+ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
@@ -1021,8 +1012,8 @@ static void ipsec_esp_unmap(struct device *dev,
DMA_FROM_DEVICE);
unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
- talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
- areq->assoclen);
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
+ cryptlen + authsize, areq->assoclen);
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
@@ -1032,7 +1023,7 @@ static void ipsec_esp_unmap(struct device *dev,
unsigned int dst_nents = edesc->dst_nents ? : 1;
sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
- areq->assoclen + areq->cryptlen - ivsize);
+ areq->assoclen + cryptlen - ivsize);
}
}
@@ -1043,31 +1034,14 @@ static void ipsec_esp_encrypt_done(struct device *dev,
struct talitos_desc *desc, void *context,
int err)
{
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
struct aead_request *areq = context;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
- unsigned int authsize = crypto_aead_authsize(authenc);
unsigned int ivsize = crypto_aead_ivsize(authenc);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
- void *icvdata;
edesc = container_of(desc, struct talitos_edesc, desc);
- ipsec_esp_unmap(dev, edesc, areq);
-
- /* copy the generated ICV to dst */
- if (edesc->icv_ool) {
- if (is_sec1)
- icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
- else
- icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
- sg = sg_last(areq->dst, edesc->dst_nents);
- memcpy((char *)sg_virt(sg) + sg->length - authsize,
- icvdata, authsize);
- }
+ ipsec_esp_unmap(dev, edesc, areq, true);
dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
@@ -1084,32 +1058,16 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
char *oicv, *icv;
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
edesc = container_of(desc, struct talitos_edesc, desc);
- ipsec_esp_unmap(dev, edesc, req);
+ ipsec_esp_unmap(dev, edesc, req, false);
if (!err) {
/* auth check */
- sg = sg_last(req->dst, edesc->dst_nents ? : 1);
- icv = (char *)sg_virt(sg) + sg->length - authsize;
-
- if (edesc->dma_len) {
- if (is_sec1)
- oicv = (char *)&edesc->dma_link_tbl +
- req->assoclen + req->cryptlen;
- else
- oicv = (char *)
- &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
- if (edesc->icv_ool)
- icv = oicv + authsize;
- } else
- oicv = (char *)&edesc->link_tbl[0];
+ oicv = edesc->buf + edesc->dma_len;
+ icv = oicv - authsize;
err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
}
@@ -1128,7 +1086,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
edesc = container_of(desc, struct talitos_edesc, desc);
- ipsec_esp_unmap(dev, edesc, req);
+ ipsec_esp_unmap(dev, edesc, req, false);
/* check ICV auth status */
if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
@@ -1145,11 +1103,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
* stop at cryptlen bytes
*/
static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
- unsigned int offset, int cryptlen,
+ unsigned int offset, int datalen, int elen,
struct talitos_ptr *link_tbl_ptr)
{
- int n_sg = sg_count;
+ int n_sg = elen ? sg_count + 1 : sg_count;
int count = 0;
+ int cryptlen = datalen + elen;
while (cryptlen && sg && n_sg--) {
unsigned int len = sg_dma_len(sg);
@@ -1164,11 +1123,20 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
if (len > cryptlen)
len = cryptlen;
+ if (datalen > 0 && len > datalen) {
+ to_talitos_ptr(link_tbl_ptr + count,
+ sg_dma_address(sg) + offset, datalen, 0);
+ to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
+ count++;
+ len -= datalen;
+ offset += datalen;
+ }
to_talitos_ptr(link_tbl_ptr + count,
sg_dma_address(sg) + offset, len, 0);
to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
count++;
cryptlen -= len;
+ datalen -= len;
offset = 0;
next:
@@ -1178,7 +1146,7 @@ next:
/* tag end of link table */
if (count > 0)
to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
- DESC_PTR_LNKTBL_RETURN, 0);
+ DESC_PTR_LNKTBL_RET, 0);
return count;
}
@@ -1186,7 +1154,8 @@ next:
static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
unsigned int len, struct talitos_edesc *edesc,
struct talitos_ptr *ptr, int sg_count,
- unsigned int offset, int tbl_off, int elen)
+ unsigned int offset, int tbl_off, int elen,
+ bool force)
{
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1196,7 +1165,7 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
return 1;
}
to_talitos_ptr_ext_set(ptr, elen, is_sec1);
- if (sg_count == 1) {
+ if (sg_count == 1 && !force) {
to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
return sg_count;
}
@@ -1204,9 +1173,9 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
return sg_count;
}
- sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
+ sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
&edesc->link_tbl[tbl_off]);
- if (sg_count == 1) {
+ if (sg_count == 1 && !force) {
/* Only one segment now, so no link tbl needed*/
copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
return sg_count;
@@ -1224,13 +1193,14 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
unsigned int offset, int tbl_off)
{
return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
- tbl_off, 0);
+ tbl_off, 0, false);
}
/*
* fill in and submit ipsec_esp descriptor
*/
static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ bool encrypt,
void (*callback)(struct device *dev,
struct talitos_desc *desc,
void *context, int error))
@@ -1240,7 +1210,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
- unsigned int cryptlen = areq->cryptlen;
+ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
unsigned int ivsize = crypto_aead_ivsize(aead);
int tbl_off = 0;
int sg_count, ret;
@@ -1251,6 +1221,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
+ dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
/* hmac key */
to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
@@ -1290,7 +1261,8 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
elen = authsize;
ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
- sg_count, areq->assoclen, tbl_off, elen);
+ sg_count, areq->assoclen, tbl_off, elen,
+ false);
if (ret > 1) {
tbl_off += ret;
@@ -1304,55 +1276,32 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
}
- ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
- sg_count, areq->assoclen, tbl_off);
-
- if (is_ipsec_esp)
- to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
-
- /* ICV data */
- if (ret > 1) {
- tbl_off += ret;
- edesc->icv_ool = true;
- sync_needed = true;
-
- if (is_ipsec_esp) {
- struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
- int offset = (edesc->src_nents + edesc->dst_nents + 2) *
- sizeof(struct talitos_ptr) + authsize;
-
- /* Add an entry to the link table for ICV data */
- to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
- to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
- is_sec1);
+ if (is_ipsec_esp && encrypt)
+ elen = authsize;
+ else
+ elen = 0;
+ ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+ sg_count, areq->assoclen, tbl_off, elen,
+ is_ipsec_esp && !encrypt);
+ tbl_off += ret;
- /* icv data follows link tables */
- to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
- authsize, is_sec1);
- } else {
- dma_addr_t addr = edesc->dma_link_tbl;
+ if (!encrypt && is_ipsec_esp) {
+ struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
- if (is_sec1)
- addr += areq->assoclen + cryptlen;
- else
- addr += sizeof(struct talitos_ptr) * tbl_off;
+ /* Add an entry to the link table for ICV data */
+ to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
+ to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
- to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
- }
+ /* icv data follows link tables */
+ to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
+ to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
+ sync_needed = true;
+ } else if (!encrypt) {
+ to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
+ sync_needed = true;
} else if (!is_ipsec_esp) {
- ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
- &desc->ptr[6], sg_count, areq->assoclen +
- cryptlen,
- tbl_off);
- if (ret > 1) {
- tbl_off += ret;
- edesc->icv_ool = true;
- sync_needed = true;
- } else {
- edesc->icv_ool = false;
- }
- } else {
- edesc->icv_ool = false;
+ talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
+ sg_count, areq->assoclen + cryptlen, tbl_off);
}
/* iv out */
@@ -1367,7 +1316,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
- ipsec_esp_unmap(dev, edesc, areq);
+ ipsec_esp_unmap(dev, edesc, areq, encrypt);
kfree(edesc);
}
return ret;
@@ -1435,18 +1384,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
* and space for two sets of ICVs (stashed and generated)
*/
alloc_len = sizeof(struct talitos_edesc);
- if (src_nents || dst_nents) {
+ if (src_nents || dst_nents || !encrypt) {
if (is_sec1)
dma_len = (src_nents ? src_len : 0) +
- (dst_nents ? dst_len : 0);
+ (dst_nents ? dst_len : 0) + authsize;
else
dma_len = (src_nents + dst_nents + 2) *
- sizeof(struct talitos_ptr) + authsize * 2;
+ sizeof(struct talitos_ptr) + authsize;
alloc_len += dma_len;
} else {
dma_len = 0;
- alloc_len += icv_stashing ? authsize : 0;
}
+ alloc_len += icv_stashing ? authsize : 0;
/* if its a ahash, add space for a second desc next to the first one */
if (is_sec1 && !dst)
@@ -1466,15 +1415,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
edesc->dma_len = dma_len;
- if (dma_len) {
- void *addr = &edesc->link_tbl[0];
-
- if (is_sec1 && !dst)
- addr += sizeof(struct talitos_desc);
- edesc->dma_link_tbl = dma_map_single(dev, addr,
+ if (dma_len)
+ edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
edesc->dma_len,
DMA_BIDIRECTIONAL);
- }
+
return edesc;
}
@@ -1485,9 +1430,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
unsigned int ivsize = crypto_aead_ivsize(authenc);
+ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
- iv, areq->assoclen, areq->cryptlen,
+ iv, areq->assoclen, cryptlen,
authsize, ivsize, icv_stashing,
areq->base.flags, encrypt);
}
@@ -1506,7 +1452,7 @@ static int aead_encrypt(struct aead_request *req)
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
- return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
+ return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
}
static int aead_decrypt(struct aead_request *req)
@@ -1516,17 +1462,15 @@ static int aead_decrypt(struct aead_request *req)
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct talitos_private *priv = dev_get_drvdata(ctx->dev);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
void *icvdata;
- req->cryptlen -= authsize;
-
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, req->iv, 1, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
+ if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+ (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
((!edesc->src_nents && !edesc->dst_nents) ||
priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
@@ -1537,24 +1481,20 @@ static int aead_decrypt(struct aead_request *req)
/* reset integrity check result bits */
- return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
+ return ipsec_esp(edesc, req, false,
+ ipsec_esp_decrypt_hwauth_done);
}
/* Have to check the ICV with software */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
/* stash incoming ICV for later cmp with ICV generated by the h/w */
- if (edesc->dma_len)
- icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
- else
- icvdata = &edesc->link_tbl[0];
+ icvdata = edesc->buf + edesc->dma_len;
- sg = sg_last(req->src, edesc->src_nents ? : 1);
+ sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
+ req->assoclen + req->cryptlen - authsize);
- memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
-
- return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
+ return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
}
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@ -1605,6 +1545,18 @@ static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
return ablkcipher_setkey(cipher, key, keylen);
}
+static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
+ keylen == AES_KEYSIZE_256)
+ return ablkcipher_setkey(cipher, key, keylen);
+
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ return -EINVAL;
+}
+
static void common_nonsnoop_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct ablkcipher_request *areq)
@@ -1624,11 +1576,15 @@ static void ablkcipher_done(struct device *dev,
int err)
{
struct ablkcipher_request *areq = context;
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
struct talitos_edesc *edesc;
edesc = container_of(desc, struct talitos_edesc, desc);
common_nonsnoop_unmap(dev, edesc, areq);
+ memcpy(areq->info, ctx->iv, ivsize);
kfree(edesc);
@@ -1723,6 +1679,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+ if (!areq->nbytes)
+ return 0;
+
+ if (areq->nbytes % blocksize)
+ return -EINVAL;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc(areq, true);
@@ -1740,6 +1704,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+ if (!areq->nbytes)
+ return 0;
+
+ if (areq->nbytes % blocksize)
+ return -EINVAL;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc(areq, false);
@@ -1759,14 +1731,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
struct talitos_desc *desc = &edesc->desc;
- struct talitos_desc *desc2 = desc + 1;
+ struct talitos_desc *desc2 = (struct talitos_desc *)
+ (edesc->buf + edesc->dma_len);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
if (desc->next_desc &&
desc->ptr[5].ptr != desc2->ptr[5].ptr)
unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
- talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
+ if (req_ctx->psrc)
+ talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
/* When using hashctx-in, must unmap it. */
if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
@@ -1833,7 +1807,6 @@ static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
static int common_nonsnoop_hash(struct talitos_edesc *edesc,
struct ahash_request *areq, unsigned int length,
- unsigned int offset,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
@@ -1872,9 +1845,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
- sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
- edesc->buf + sizeof(struct talitos_desc),
- length, req_ctx->nbuf);
+ sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
else if (length)
sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
DMA_TO_DEVICE);
@@ -1887,7 +1858,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
DMA_TO_DEVICE);
} else {
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
- &desc->ptr[3], sg_count, offset, 0);
+ &desc->ptr[3], sg_count, 0, 0);
if (sg_count > 1)
sync_needed = true;
}
@@ -1911,7 +1882,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
if (is_sec1 && req_ctx->nbuf && length) {
- struct talitos_desc *desc2 = desc + 1;
+ struct talitos_desc *desc2 = (struct talitos_desc *)
+ (edesc->buf + edesc->dma_len);
dma_addr_t next_desc;
memset(desc2, 0, sizeof(*desc2));
@@ -1932,7 +1904,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
DMA_TO_DEVICE);
copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
- &desc2->ptr[3], sg_count, offset, 0);
+ &desc2->ptr[3], sg_count, 0, 0);
if (sg_count > 1)
sync_needed = true;
copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
@@ -2043,7 +2015,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
struct device *dev = ctx->dev;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
- int offset = 0;
u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
@@ -2083,6 +2054,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
sg_chain(req_ctx->bufsl, 2, areq->src);
req_ctx->psrc = req_ctx->bufsl;
} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
+ int offset;
+
if (nbytes_to_hash > blocksize)
offset = blocksize - req_ctx->nbuf;
else
@@ -2095,7 +2068,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
sg_copy_to_buffer(areq->src, nents,
ctx_buf + req_ctx->nbuf, offset);
req_ctx->nbuf += offset;
- req_ctx->psrc = areq->src;
+ req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
+ offset);
} else
req_ctx->psrc = areq->src;
@@ -2135,8 +2109,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
if (ctx->keylen && (req_ctx->first || req_ctx->last))
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
- return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
- ahash_done);
+ return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
}
static int ahash_update(struct ahash_request *areq)
@@ -2339,7 +2312,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2384,7 +2357,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "authenc(hmac(sha1),"
"cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha1-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2427,7 +2400,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha224-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2472,7 +2445,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "authenc(hmac(sha224),"
"cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha224-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2515,7 +2488,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2560,7 +2533,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "authenc(hmac(sha256),"
"cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha256-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2689,7 +2662,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2732,7 +2705,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2760,7 +2733,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablkcipher_aes_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2777,6 +2750,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ .setkey = ablkcipher_aes_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2787,13 +2761,14 @@ static struct talitos_alg_template driver_algs[] = {
.alg.crypto = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ .setkey = ablkcipher_aes_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
@@ -2810,7 +2785,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
.setkey = ablkcipher_des_setkey,
}
},
@@ -2845,7 +2819,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = ablkcipher_des3_setkey,
}
},
@@ -3270,7 +3243,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_priority = t_alg->algt.priority;
else
alg->cra_priority = TALITOS_CRA_PRIORITY;
- alg->cra_alignmask = 0;
+ if (has_ftr_sec1(priv))
+ alg->cra_alignmask = 3;
+ else
+ alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
@@ -3418,7 +3394,7 @@ static int talitos_probe(struct platform_device *ofdev)
if (err)
goto err_out;
- if (of_device_is_compatible(np, "fsl,sec1.0")) {
+ if (has_ftr_sec1(priv)) {
if (priv->num_channels == 1)
tasklet_init(&priv->done_task[0], talitos1_done_ch0,
(unsigned long)dev);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index a65a63e0d6c1..1469b956948a 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -1,31 +1,8 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
* Freescale SEC (talitos) device register and descriptor header defines
*
* Copyright (c) 2006-2011 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#define TALITOS_TIMEOUT 100000
@@ -65,6 +42,34 @@ struct talitos_desc {
#define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32))
+/*
+ * talitos_edesc - s/w-extended descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @dma_len: length of dma mapped link_tbl space
+ * @dma_link_tbl: bus physical address of link_tbl/buf
+ * @desc: h/w descriptor
+ * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
+ * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
+ *
+ * if decrypting (with authcheck), or either one of src_nents or dst_nents
+ * is greater than 1, an integrity check value is concatenated to the end
+ * of link_tbl data
+ */
+struct talitos_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int dma_len;
+ dma_addr_t dma_link_tbl;
+ struct talitos_desc desc;
+ union {
+ struct talitos_ptr link_tbl[0];
+ u8 buf[0];
+ };
+};
+
/**
* talitos_request - descriptor submission request
* @desc: descriptor pointer (kernel virtual)
@@ -150,12 +155,6 @@ struct talitos_private {
bool rng_registered;
};
-extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
- void (*callback)(struct device *dev,
- struct talitos_desc *desc,
- void *context, int error),
- void *context);
-
/* .features flag */
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
@@ -170,13 +169,11 @@ extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
*/
static inline bool has_ftr_sec1(struct talitos_private *priv)
{
-#if defined(CONFIG_CRYPTO_DEV_TALITOS1) && defined(CONFIG_CRYPTO_DEV_TALITOS2)
- return priv->features & TALITOS_FTR_SEC1 ? true : false;
-#elif defined(CONFIG_CRYPTO_DEV_TALITOS1)
- return true;
-#else
- return false;
-#endif
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS1) &&
+ IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS2))
+ return priv->features & TALITOS_FTR_SEC1;
+
+ return IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS1);
}
/*
@@ -412,5 +409,5 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
/* link table extent field bits */
#define DESC_PTR_LNKTBL_JUMP 0x80
-#define DESC_PTR_LNKTBL_RETURN 0x02
+#define DESC_PTR_LNKTBL_RET 0x02
#define DESC_PTR_LNKTBL_NEXT 0x01
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index c7e515a1bc97..d88084447f1c 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -7,64 +7,52 @@
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
*/
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include "aesp8-ppc.h"
struct p8_aes_cbc_ctx {
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
};
-static int p8_aes_cbc_init(struct crypto_tfm *tfm)
+static int p8_aes_cbc_init(struct crypto_skcipher *tfm)
{
- const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_sync_skcipher *fallback;
- struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- fallback = crypto_alloc_sync_skcipher(alg, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *fallback;
+ fallback = crypto_alloc_skcipher("cbc(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(fallback)) {
- printk(KERN_ERR
- "Failed to allocate transformation for '%s': %ld\n",
- alg, PTR_ERR(fallback));
+ pr_err("Failed to allocate cbc(aes) fallback: %ld\n",
+ PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- crypto_sync_skcipher_set_flags(
- fallback,
- crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(fallback));
ctx->fallback = fallback;
-
return 0;
}
-static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
+static void p8_aes_cbc_exit(struct crypto_skcipher *tfm)
{
- struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (ctx->fallback) {
- crypto_free_sync_skcipher(ctx->fallback);
- ctx->fallback = NULL;
- }
+ crypto_free_skcipher(ctx->fallback);
}
-static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
+static int p8_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
+ struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
preempt_disable();
pagefault_disable();
@@ -75,108 +63,71 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
return ret ? -EINVAL : 0;
}
-static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int p8_aes_cbc_crypt(struct skcipher_request *req, int enc)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int ret;
- struct blkcipher_walk walk;
- struct p8_aes_cbc_ctx *ctx =
- crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (!crypto_simd_usable()) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_sync_tfm(req, ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- } else {
- blkcipher_walk_init(&walk, dst, src, nbytes);
- ret = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_cbc_encrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- nbytes & AES_BLOCK_MASK,
- &ctx->enc_key, walk.iv, 1);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ return enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
}
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ aes_p8_cbc_encrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ enc ? &ctx->enc_key : &ctx->dec_key,
+ walk.iv, enc);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
+ }
return ret;
}
-static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int p8_aes_cbc_encrypt(struct skcipher_request *req)
{
- int ret;
- struct blkcipher_walk walk;
- struct p8_aes_cbc_ctx *ctx =
- crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
-
- if (!crypto_simd_usable()) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_sync_tfm(req, ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- } else {
- blkcipher_walk_init(&walk, dst, src, nbytes);
- ret = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_cbc_encrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- nbytes & AES_BLOCK_MASK,
- &ctx->dec_key, walk.iv, 0);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
- }
-
- return ret;
+ return p8_aes_cbc_crypt(req, 1);
}
+static int p8_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return p8_aes_cbc_crypt(req, 0);
+}
-struct crypto_alg p8_aes_cbc_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "p8_aes_cbc",
- .cra_module = THIS_MODULE,
- .cra_priority = 2000,
- .cra_type = &crypto_blkcipher_type,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
- .cra_alignmask = 0,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
- .cra_init = p8_aes_cbc_init,
- .cra_exit = p8_aes_cbc_exit,
- .cra_blkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = p8_aes_cbc_setkey,
- .encrypt = p8_aes_cbc_encrypt,
- .decrypt = p8_aes_cbc_decrypt,
- },
+struct skcipher_alg p8_aes_cbc_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "p8_aes_cbc",
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 2000,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
+ .setkey = p8_aes_cbc_setkey,
+ .encrypt = p8_aes_cbc_encrypt,
+ .decrypt = p8_aes_cbc_decrypt,
+ .init = p8_aes_cbc_init,
+ .exit = p8_aes_cbc_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
};
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index dd017ef42fa9..79ba062ee1c1 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -7,62 +7,51 @@
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
*/
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include "aesp8-ppc.h"
struct p8_aes_ctr_ctx {
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
struct aes_key enc_key;
};
-static int p8_aes_ctr_init(struct crypto_tfm *tfm)
+static int p8_aes_ctr_init(struct crypto_skcipher *tfm)
{
- const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_sync_skcipher *fallback;
- struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *fallback;
- fallback = crypto_alloc_sync_skcipher(alg, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_skcipher("ctr(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(fallback)) {
- printk(KERN_ERR
- "Failed to allocate transformation for '%s': %ld\n",
- alg, PTR_ERR(fallback));
+ pr_err("Failed to allocate ctr(aes) fallback: %ld\n",
+ PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- crypto_sync_skcipher_set_flags(
- fallback,
- crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(fallback));
ctx->fallback = fallback;
-
return 0;
}
-static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
+static void p8_aes_ctr_exit(struct crypto_skcipher *tfm)
{
- struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (ctx->fallback) {
- crypto_free_sync_skcipher(ctx->fallback);
- ctx->fallback = NULL;
- }
+ crypto_free_skcipher(ctx->fallback);
}
-static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
+static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
+ struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
preempt_disable();
pagefault_disable();
@@ -72,13 +61,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
return ret ? -EINVAL : 0;
}
-static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
- struct blkcipher_walk *walk)
+static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx,
+ struct skcipher_walk *walk)
{
u8 *ctrblk = walk->iv;
u8 keystream[AES_BLOCK_SIZE];
@@ -98,77 +87,63 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
-static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int p8_aes_ctr_crypt(struct skcipher_request *req)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int ret;
- u64 inc;
- struct blkcipher_walk walk;
- struct p8_aes_ctr_ctx *ctx =
- crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (!crypto_simd_usable()) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_sync_tfm(req, ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- } else {
- blkcipher_walk_init(&walk, dst, src, nbytes);
- ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
- while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
- walk.dst.virt.addr,
- (nbytes &
- AES_BLOCK_MASK) /
- AES_BLOCK_SIZE,
- &ctx->enc_key,
- walk.iv);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- /* We need to update IV mostly for last bytes/round */
- inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
- if (inc > 0)
- while (inc--)
- crypto_inc(walk.iv, AES_BLOCK_SIZE);
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
- if (walk.nbytes) {
- p8_aes_ctr_final(ctx, &walk);
- ret = blkcipher_walk_done(desc, &walk, 0);
- }
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ return crypto_skcipher_encrypt(subreq);
}
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes / AES_BLOCK_SIZE,
+ &ctx->enc_key, walk.iv);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ do {
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ } while ((nbytes -= AES_BLOCK_SIZE) >= AES_BLOCK_SIZE);
+
+ ret = skcipher_walk_done(&walk, nbytes);
+ }
+ if (nbytes) {
+ p8_aes_ctr_final(ctx, &walk);
+ ret = skcipher_walk_done(&walk, 0);
+ }
return ret;
}
-struct crypto_alg p8_aes_ctr_alg = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "p8_aes_ctr",
- .cra_module = THIS_MODULE,
- .cra_priority = 2000,
- .cra_type = &crypto_blkcipher_type,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
- .cra_alignmask = 0,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
- .cra_init = p8_aes_ctr_init,
- .cra_exit = p8_aes_ctr_exit,
- .cra_blkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = p8_aes_ctr_setkey,
- .encrypt = p8_aes_ctr_crypt,
- .decrypt = p8_aes_ctr_crypt,
- },
+struct skcipher_alg p8_aes_ctr_alg = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "p8_aes_ctr",
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 2000,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
+ .setkey = p8_aes_ctr_setkey,
+ .encrypt = p8_aes_ctr_crypt,
+ .decrypt = p8_aes_ctr_crypt,
+ .init = p8_aes_ctr_init,
+ .exit = p8_aes_ctr_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
};
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 536167e737a0..49f7258045fa 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -7,67 +7,56 @@
* Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
*/
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
-#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
-#include <crypto/skcipher.h>
#include "aesp8-ppc.h"
struct p8_aes_xts_ctx {
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
struct aes_key tweak_key;
};
-static int p8_aes_xts_init(struct crypto_tfm *tfm)
+static int p8_aes_xts_init(struct crypto_skcipher *tfm)
{
- const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_sync_skcipher *fallback;
- struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *fallback;
- fallback = crypto_alloc_sync_skcipher(alg, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_skcipher("xts(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(fallback)) {
- printk(KERN_ERR
- "Failed to allocate transformation for '%s': %ld\n",
- alg, PTR_ERR(fallback));
+ pr_err("Failed to allocate xts(aes) fallback: %ld\n",
+ PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- crypto_sync_skcipher_set_flags(
- fallback,
- crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(fallback));
ctx->fallback = fallback;
-
return 0;
}
-static void p8_aes_xts_exit(struct crypto_tfm *tfm)
+static void p8_aes_xts_exit(struct crypto_skcipher *tfm)
{
- struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (ctx->fallback) {
- crypto_free_sync_skcipher(ctx->fallback);
- ctx->fallback = NULL;
- }
+ crypto_free_skcipher(ctx->fallback);
}
-static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
+static int p8_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
+ struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- ret = xts_check_key(tfm, key, keylen);
+ ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
@@ -81,100 +70,90 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
return ret ? -EINVAL : 0;
}
-static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, int enc)
+static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
{
- int ret;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
u8 tweak[AES_BLOCK_SIZE];
- u8 *iv;
- struct blkcipher_walk walk;
- struct p8_aes_xts_ctx *ctx =
- crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+ int ret;
if (!crypto_simd_usable()) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_sync_tfm(req, ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- } else {
- blkcipher_walk_init(&walk, dst, src, nbytes);
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ return enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ }
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
- ret = blkcipher_walk_virt(desc, &walk);
+ aes_p8_encrypt(walk.iv, tweak, &ctx->tweak_key);
+
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ while ((nbytes = walk.nbytes) != 0) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
-
- iv = walk.iv;
- memset(tweak, 0, AES_BLOCK_SIZE);
- aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
-
+ if (enc)
+ aes_p8_xts_encrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ &ctx->enc_key, NULL, tweak);
+ else
+ aes_p8_xts_decrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ &ctx->dec_key, NULL, tweak);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- while ((nbytes = walk.nbytes)) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- if (enc)
- aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
- nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
- else
- aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
- nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
+ ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
}
return ret;
}
-static int p8_aes_xts_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int p8_aes_xts_encrypt(struct skcipher_request *req)
{
- return p8_aes_xts_crypt(desc, dst, src, nbytes, 1);
+ return p8_aes_xts_crypt(req, 1);
}
-static int p8_aes_xts_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int p8_aes_xts_decrypt(struct skcipher_request *req)
{
- return p8_aes_xts_crypt(desc, dst, src, nbytes, 0);
+ return p8_aes_xts_crypt(req, 0);
}
-struct crypto_alg p8_aes_xts_alg = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "p8_aes_xts",
- .cra_module = THIS_MODULE,
- .cra_priority = 2000,
- .cra_type = &crypto_blkcipher_type,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
- .cra_alignmask = 0,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
- .cra_init = p8_aes_xts_init,
- .cra_exit = p8_aes_xts_exit,
- .cra_blkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .setkey = p8_aes_xts_setkey,
- .encrypt = p8_aes_xts_encrypt,
- .decrypt = p8_aes_xts_decrypt,
- }
+struct skcipher_alg p8_aes_xts_alg = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "p8_aes_xts",
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 2000,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
+ .setkey = p8_aes_xts_setkey,
+ .encrypt = p8_aes_xts_encrypt,
+ .decrypt = p8_aes_xts_decrypt,
+ .init = p8_aes_xts_init,
+ .exit = p8_aes_xts_exit,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
};
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h
index 349646b73754..01774a4d26a2 100644
--- a/drivers/crypto/vmx/aesp8-ppc.h
+++ b/drivers/crypto/vmx/aesp8-ppc.h
@@ -2,8 +2,6 @@
#include <linux/types.h>
#include <crypto/aes.h>
-#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
-
struct aes_key {
u8 key[AES_MAX_KEYLENGTH];
int rounds;
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 9c6b5c1d6a1a..db874367b602 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1286,6 +1286,24 @@ ___
#########################################################################
{{{ # CTR procedure[s] #
+
+####################### WARNING: Here be dragons! #######################
+#
+# This code is written as 'ctr32', based on a 32-bit counter used
+# upstream. The kernel does *not* use a 32-bit counter. The kernel uses
+# a 128-bit counter.
+#
+# This leads to subtle changes from the upstream code: the counter
+# is incremented with vaddu_q_m rather than vaddu_w_m. This occurs in
+# both the bulk (8 blocks at a time) path, and in the individual block
+# path. Be aware of this when doing updates.
+#
+# See:
+# 1d4aa0b4c181 ("crypto: vmx - Fixing AES-CTR counter bug")
+# 009b30ac7444 ("crypto: vmx - CTR: always increment IV as quadword")
+# https://github.com/openssl/openssl/pull/8942
+#
+#########################################################################
my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10));
my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3));
my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)=
@@ -1357,7 +1375,7 @@ Loop_ctr32_enc:
addi $idx,$idx,16
bdnz Loop_ctr32_enc
- vadduqm $ivec,$ivec,$one
+ vadduqm $ivec,$ivec,$one # Kernel change for 128-bit
vmr $dat,$inptail
lvx $inptail,0,$inp
addi $inp,$inp,16
@@ -1501,7 +1519,7 @@ Load_ctr32_enc_key:
$SHL $len,$len,4
vadduqm $out1,$ivec,$one # counter values ...
- vadduqm $out2,$ivec,$two
+ vadduqm $out2,$ivec,$two # (do all ctr adds as 128-bit)
vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
le?li $idx,8
vadduqm $out3,$out1,$two
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index 6c4c77f4e159..3e0335fb406c 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -15,54 +15,58 @@
#include <linux/crypto.h>
#include <asm/cputable.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
extern struct shash_alg p8_ghash_alg;
extern struct crypto_alg p8_aes_alg;
-extern struct crypto_alg p8_aes_cbc_alg;
-extern struct crypto_alg p8_aes_ctr_alg;
-extern struct crypto_alg p8_aes_xts_alg;
-static struct crypto_alg *algs[] = {
- &p8_aes_alg,
- &p8_aes_cbc_alg,
- &p8_aes_ctr_alg,
- &p8_aes_xts_alg,
- NULL,
-};
+extern struct skcipher_alg p8_aes_cbc_alg;
+extern struct skcipher_alg p8_aes_ctr_alg;
+extern struct skcipher_alg p8_aes_xts_alg;
static int __init p8_init(void)
{
- int ret = 0;
- struct crypto_alg **alg_it;
+ int ret;
- for (alg_it = algs; *alg_it; alg_it++) {
- ret = crypto_register_alg(*alg_it);
- printk(KERN_INFO "crypto_register_alg '%s' = %d\n",
- (*alg_it)->cra_name, ret);
- if (ret) {
- for (alg_it--; alg_it >= algs; alg_it--)
- crypto_unregister_alg(*alg_it);
- break;
- }
- }
+ ret = crypto_register_shash(&p8_ghash_alg);
if (ret)
- return ret;
+ goto err;
- ret = crypto_register_shash(&p8_ghash_alg);
- if (ret) {
- for (alg_it = algs; *alg_it; alg_it++)
- crypto_unregister_alg(*alg_it);
- }
+ ret = crypto_register_alg(&p8_aes_alg);
+ if (ret)
+ goto err_unregister_ghash;
+
+ ret = crypto_register_skcipher(&p8_aes_cbc_alg);
+ if (ret)
+ goto err_unregister_aes;
+
+ ret = crypto_register_skcipher(&p8_aes_ctr_alg);
+ if (ret)
+ goto err_unregister_aes_cbc;
+
+ ret = crypto_register_skcipher(&p8_aes_xts_alg);
+ if (ret)
+ goto err_unregister_aes_ctr;
+
+ return 0;
+
+err_unregister_aes_ctr:
+ crypto_unregister_skcipher(&p8_aes_ctr_alg);
+err_unregister_aes_cbc:
+ crypto_unregister_skcipher(&p8_aes_cbc_alg);
+err_unregister_aes:
+ crypto_unregister_alg(&p8_aes_alg);
+err_unregister_ghash:
+ crypto_unregister_shash(&p8_ghash_alg);
+err:
return ret;
}
static void __exit p8_exit(void)
{
- struct crypto_alg **alg_it;
-
- for (alg_it = algs; *alg_it; alg_it++) {
- printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name);
- crypto_unregister_alg(*alg_it);
- }
+ crypto_unregister_skcipher(&p8_aes_xts_alg);
+ crypto_unregister_skcipher(&p8_aes_ctr_alg);
+ crypto_unregister_skcipher(&p8_aes_cbc_alg);
+ crypto_unregister_alg(&p8_aes_alg);
crypto_unregister_shash(&p8_ghash_alg);
}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 263bee76ef0d..6b8c4c458e8a 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -718,12 +718,13 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
{
struct jz4780_dma_dev *jzdma = data;
unsigned int nb_channels = jzdma->soc_data->nb_channels;
- uint32_t pending, dmac;
+ unsigned long pending;
+ uint32_t dmac;
int i;
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
- for_each_set_bit(i, (unsigned long *)&pending, nb_channels) {
+ for_each_set_bit(i, &pending, nb_channels) {
if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
pending &= ~BIT(i);
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 99d9f431ae2c..4ec84a633bd3 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -703,7 +703,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
spin_lock_irqsave(&sdma->channel_0_lock, flags);
bd0->mode.command = C0_SETPM;
- bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+ bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
bd0->mode.count = size / 2;
bd0->buffer_addr = buf_phys;
bd0->ext_buffer_addr = address;
@@ -1025,7 +1025,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
context->gReg[7] = sdmac->watermark_level;
bd0->mode.command = C0_SETDM;
- bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+ bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
bd0->mode.count = sizeof(*context) / 4;
bd0->buffer_addr = sdma->context_phys;
bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
@@ -2096,27 +2096,6 @@ static int sdma_probe(struct platform_device *pdev)
if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs);
- if (pdata) {
- ret = sdma_get_firmware(sdma, pdata->fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
- } else {
- /*
- * Because that device tree does not encode ROM script address,
- * the RAM script in firmware is mandatory for device tree
- * probe, otherwise it fails.
- */
- ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
- &fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware name\n");
- else {
- ret = sdma_get_firmware(sdma, fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
- }
- }
-
sdma->dma_device.dev = &pdev->dev;
sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
@@ -2161,6 +2140,33 @@ static int sdma_probe(struct platform_device *pdev)
of_node_put(spba_bus);
}
+ /*
+ * Kick off firmware loading as the very last step:
+ * attempt to load firmware only if we're not on the error path, because
+ * the firmware callback requires a fully functional and allocated sdma
+ * instance.
+ */
+ if (pdata) {
+ ret = sdma_get_firmware(sdma, pdata->fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
+ } else {
+ /*
+ * Because that device tree does not encode ROM script address,
+ * the RAM script in firmware is mandatory for device tree
+ * probe, otherwise it fails.
+ */
+ ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
+ &fw_name);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get firmware name\n");
+ } else {
+ ret = sdma_get_firmware(sdma, fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
+ }
+ }
+
return 0;
err_register:
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 4b43844f6af5..8e90a405939d 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -799,6 +799,9 @@ static u32 process_channel_irqs(struct bam_device *bdev)
/* Number of bytes available to read */
avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
+ if (offset < bchan->head)
+ avail--;
+
list_for_each_entry_safe(async_desc, tmp,
&bchan->desc_list, desc_node) {
/* Not enough data to read */
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index a2384184a7de..b07c17643210 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -47,11 +47,6 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
bgrt->version);
goto out;
}
- if (bgrt->status & 0xfe) {
- pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
- bgrt->status);
- goto out;
- }
if (bgrt->image_type != 0) {
pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
bgrt->image_type);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 16b2137d117c..ad3b1f4866b3 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -52,6 +52,7 @@ struct efi __read_mostly efi = {
.mem_attr_table = EFI_INVALID_TABLE_ADDR,
.rng_seed = EFI_INVALID_TABLE_ADDR,
.tpm_log = EFI_INVALID_TABLE_ADDR,
+ .tpm_final_log = EFI_INVALID_TABLE_ADDR,
.mem_reserve = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
@@ -484,6 +485,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
{LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
+ {LINUX_EFI_TPM_FINAL_LOG_GUID, "TPMFinalLog", &efi.tpm_final_log},
{LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &efi.mem_reserve},
{NULL_GUID, NULL, NULL},
};
@@ -1009,14 +1011,16 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
/* first try to find a slot in an existing linked list entry */
for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
- rsv = __va(prsv);
+ rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
if (index < rsv->size) {
rsv->entry[index].base = addr;
rsv->entry[index].size = size;
+ memunmap(rsv);
return 0;
}
+ memunmap(rsv);
}
/* no slot found - allocate a new linked list entry */
@@ -1024,7 +1028,13 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
if (!rsv)
return -ENOMEM;
- rsv->size = EFI_MEMRESERVE_COUNT(PAGE_SIZE);
+ /*
+ * The memremap() call above assumes that a linux_efi_memreserve entry
+ * never crosses a page boundary, so let's ensure that this remains true
+ * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
+ * using SZ_4K explicitly in the size calculation below.
+ */
+ rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
atomic_set(&rsv->count, 1);
rsv->entry[0].base = addr;
rsv->entry[0].size = size;
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 61e099826cbb..35dccc88ac0a 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -43,11 +43,13 @@ static int efibc_set_variable(const char *name, const char *value)
efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
- ret = efivar_entry_set(entry,
- EFI_VARIABLE_NON_VOLATILE
- | EFI_VARIABLE_BOOTSERVICE_ACCESS
- | EFI_VARIABLE_RUNTIME_ACCESS,
- size, entry->var.Data, NULL);
+ ret = efivar_entry_set_safe(entry->var.VariableName,
+ entry->var.VendorGuid,
+ EFI_VARIABLE_NON_VOLATILE
+ | EFI_VARIABLE_BOOTSERVICE_ACCESS
+ | EFI_VARIABLE_RUNTIME_ACCESS,
+ false, size, entry->var.Data);
+
if (ret)
pr_err("failed to set %s EFI variable: 0x%x\n",
name, ret);
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index e4610e72b78f..1db780c0f07b 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -926,3 +926,18 @@ free_map:
fail:
return status;
}
+
+void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
+{
+ efi_config_table_t *tables = (efi_config_table_t *)sys_table->tables;
+ int i;
+
+ for (i = 0; i < sys_table->nr_tables; i++) {
+ if (efi_guidcmp(tables[i].guid, guid) != 0)
+ continue;
+
+ return (void *)tables[i].table;
+ }
+
+ return NULL;
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 1b1dfcaa6fb9..7f1556fd867d 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -65,6 +65,8 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
+void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
+
/* Helper macros for the usual case of using simple C variables: */
#ifndef fdt_setprop_inplace_var
#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 5440ba17a1c5..0bf0190917e0 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -363,26 +363,17 @@ fail:
void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size)
{
- efi_guid_t fdt_guid = DEVICE_TREE_GUID;
- efi_config_table_t *tables;
- int i;
+ void *fdt;
- tables = (efi_config_table_t *)sys_table->tables;
+ fdt = get_efi_config_table(sys_table, DEVICE_TREE_GUID);
- for (i = 0; i < sys_table->nr_tables; i++) {
- void *fdt;
+ if (!fdt)
+ return NULL;
- if (efi_guidcmp(tables[i].guid, fdt_guid) != 0)
- continue;
-
- fdt = (void *)tables[i].table;
- if (fdt_check_header(fdt) != 0) {
- pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
- return NULL;
- }
- *fdt_size = fdt_totalsize(fdt);
- return fdt;
+ if (fdt_check_header(fdt) != 0) {
+ pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+ return NULL;
}
-
- return NULL;
+ *fdt_size = fdt_totalsize(fdt);
+ return fdt;
}
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index 5bd04f75d8d6..eb9af83e4d59 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -57,31 +57,40 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
#endif
-static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
+void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
{
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
efi_status_t status;
efi_physical_addr_t log_location = 0, log_last_entry = 0;
struct linux_efi_tpm_eventlog *log_tbl = NULL;
+ struct efi_tcg2_final_events_table *final_events_table;
unsigned long first_entry_addr, last_entry_addr;
size_t log_size, last_entry_size;
efi_bool_t truncated;
+ int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
void *tcg2_protocol = NULL;
+ int final_events_size = 0;
status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
&tcg2_protocol);
if (status != EFI_SUCCESS)
return;
- status = efi_call_proto(efi_tcg2_protocol, get_event_log, tcg2_protocol,
- EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2,
- &log_location, &log_last_entry, &truncated);
- if (status != EFI_SUCCESS)
- return;
+ status = efi_call_proto(efi_tcg2_protocol, get_event_log,
+ tcg2_protocol, version, &log_location,
+ &log_last_entry, &truncated);
+
+ if (status != EFI_SUCCESS || !log_location) {
+ version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ status = efi_call_proto(efi_tcg2_protocol, get_event_log,
+ tcg2_protocol, version, &log_location,
+ &log_last_entry, &truncated);
+ if (status != EFI_SUCCESS || !log_location)
+ return;
+
+ }
- if (!log_location)
- return;
first_entry_addr = (unsigned long) log_location;
/*
@@ -96,8 +105,23 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
* We need to calculate its size to deduce the full size of
* the logs.
*/
- last_entry_size = sizeof(struct tcpa_event) +
- ((struct tcpa_event *) last_entry_addr)->event_size;
+ if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+ /*
+ * The TCG2 log format has variable length entries,
+ * and the information to decode the hash algorithms
+ * back into a size is contained in the first entry -
+ * pass a pointer to the final entry (to calculate its
+ * size) and the first entry (so we know how long each
+ * digest is)
+ */
+ last_entry_size =
+ __calc_tpm2_event_size((void *)last_entry_addr,
+ (void *)(long)log_location,
+ false);
+ } else {
+ last_entry_size = sizeof(struct tcpa_event) +
+ ((struct tcpa_event *) last_entry_addr)->event_size;
+ }
log_size = log_last_entry - log_location + last_entry_size;
}
@@ -112,9 +136,37 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
return;
}
+ /*
+ * Figure out whether any events have already been logged to the
+ * final events structure, and if so how much space they take up
+ */
+ final_events_table = get_efi_config_table(sys_table_arg,
+ LINUX_EFI_TPM_FINAL_LOG_GUID);
+ if (final_events_table && final_events_table->nr_events) {
+ struct tcg_pcr_event2_head *header;
+ int offset;
+ void *data;
+ int event_size;
+ int i = final_events_table->nr_events;
+
+ data = (void *)final_events_table;
+ offset = sizeof(final_events_table->version) +
+ sizeof(final_events_table->nr_events);
+
+ while (i > 0) {
+ header = data + offset + final_events_size;
+ event_size = __calc_tpm2_event_size(header,
+ (void *)(long)log_location,
+ false);
+ final_events_size += event_size;
+ i--;
+ }
+ }
+
memset(log_tbl, 0, sizeof(*log_tbl) + log_size);
log_tbl->size = log_size;
- log_tbl->version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ log_tbl->final_events_preboot_size = final_events_size;
+ log_tbl->version = version;
memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
status = efi_call_early(install_configuration_table,
@@ -126,9 +178,3 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
err_free:
efi_call_early(free_pool, log_tbl);
}
-
-void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
-{
- /* Only try to retrieve the logs in 1.2 format. */
- efi_retrieve_tpm2_eventlog_1_2(sys_table_arg);
-}
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index 3a689b40ccc0..1d3f5ca3eaaf 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -4,11 +4,34 @@
* Thiebaud Weksteen <tweek@google.com>
*/
+#define TPM_MEMREMAP(start, size) early_memremap(start, size)
+#define TPM_MEMUNMAP(start, size) early_memunmap(start, size)
+
+#include <asm/early_ioremap.h>
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/memblock.h>
+#include <linux/tpm_eventlog.h>
-#include <asm/early_ioremap.h>
+int efi_tpm_final_log_size;
+EXPORT_SYMBOL(efi_tpm_final_log_size);
+
+static int tpm2_calc_event_log_size(void *data, int count, void *size_info)
+{
+ struct tcg_pcr_event2_head *header;
+ int event_size, size = 0;
+
+ while (count > 0) {
+ header = data + size;
+ event_size = __calc_tpm2_event_size(header, size_info, true);
+ if (event_size == 0)
+ return -1;
+ size += event_size;
+ count--;
+ }
+
+ return size;
+}
/*
* Reserve the memory associated with the TPM Event Log configuration table.
@@ -16,22 +39,54 @@
int __init efi_tpm_eventlog_init(void)
{
struct linux_efi_tpm_eventlog *log_tbl;
+ struct efi_tcg2_final_events_table *final_tbl;
unsigned int tbl_size;
+ int ret = 0;
- if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
+ /*
+ * We can't calculate the size of the final events without the
+ * first entry in the TPM log, so bail here.
+ */
return 0;
+ }
log_tbl = early_memremap(efi.tpm_log, sizeof(*log_tbl));
if (!log_tbl) {
pr_err("Failed to map TPM Event Log table @ 0x%lx\n",
- efi.tpm_log);
+ efi.tpm_log);
efi.tpm_log = EFI_INVALID_TABLE_ADDR;
return -ENOMEM;
}
tbl_size = sizeof(*log_tbl) + log_tbl->size;
memblock_reserve(efi.tpm_log, tbl_size);
+
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR)
+ goto out;
+
+ final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl));
+
+ if (!final_tbl) {
+ pr_err("Failed to map TPM Final Event Log table @ 0x%lx\n",
+ efi.tpm_final_log);
+ efi.tpm_final_log = EFI_INVALID_TABLE_ADDR;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tbl_size = tpm2_calc_event_log_size((void *)efi.tpm_final_log
+ + sizeof(final_tbl->version)
+ + sizeof(final_tbl->nr_events),
+ final_tbl->nr_events,
+ log_tbl->log);
+ memblock_reserve((unsigned long)final_tbl,
+ tbl_size + sizeof(*final_tbl));
+ early_memunmap(final_tbl, sizeof(*final_tbl));
+ efi_tpm_final_log_size = tbl_size;
+
+out:
early_memunmap(log_tbl, sizeof(*log_tbl));
- return 0;
+ return ret;
}
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 9bfff171f9fe..8f466993cd24 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -6,6 +6,7 @@
* Copyright (C) 2015 Linaro Ltd.
*/
+#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/clk.h>
@@ -19,6 +20,8 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include "gpiolib.h"
+
/*
* Only first 8bits of a register correspond to each pin,
* so there are 4 registers for 32 pins.
@@ -135,6 +138,20 @@ static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
spin_unlock_irqrestore(&gchip->lock, flags);
}
+static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ int irq, index;
+
+ for (index = 0;; index++) {
+ irq = platform_get_irq(to_platform_device(gc->parent), index);
+ if (irq <= 0)
+ break;
+ if (irq_get_irq_data(irq)->hwirq == offset)
+ return irq;
+ }
+ return -EINVAL;
+}
+
static int mb86s70_gpio_probe(struct platform_device *pdev)
{
struct mb86s70_gpio_chip *gchip;
@@ -150,13 +167,15 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gchip->base))
return PTR_ERR(gchip->base);
- gchip->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(gchip->clk))
- return PTR_ERR(gchip->clk);
+ if (!has_acpi_companion(&pdev->dev)) {
+ gchip->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(gchip->clk))
+ return PTR_ERR(gchip->clk);
- ret = clk_prepare_enable(gchip->clk);
- if (ret)
- return ret;
+ ret = clk_prepare_enable(gchip->clk);
+ if (ret)
+ return ret;
+ }
spin_lock_init(&gchip->lock);
@@ -172,19 +191,28 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.parent = &pdev->dev;
gchip->gc.base = -1;
+ if (has_acpi_companion(&pdev->dev))
+ gchip->gc.to_irq = mb86s70_gpio_to_irq;
+
ret = gpiochip_add_data(&gchip->gc, gchip);
if (ret) {
dev_err(&pdev->dev, "couldn't register gpio driver\n");
clk_disable_unprepare(gchip->clk);
+ return ret;
}
- return ret;
+ if (has_acpi_companion(&pdev->dev))
+ acpi_gpiochip_request_interrupts(&gchip->gc);
+
+ return 0;
}
static int mb86s70_gpio_remove(struct platform_device *pdev)
{
struct mb86s70_gpio_chip *gchip = platform_get_drvdata(pdev);
+ if (has_acpi_companion(&pdev->dev))
+ acpi_gpiochip_free_interrupts(&gchip->gc);
gpiochip_remove(&gchip->gc);
clk_disable_unprepare(gchip->clk);
@@ -197,10 +225,19 @@ static const struct of_device_id mb86s70_gpio_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mb86s70_gpio_dt_ids);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id mb86s70_gpio_acpi_ids[] = {
+ { "SCX0007" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, mb86s70_gpio_acpi_ids);
+#endif
+
static struct platform_driver mb86s70_gpio_driver = {
.driver = {
.name = "mb86s70-gpio",
.of_match_table = mb86s70_gpio_dt_ids,
+ .acpi_match_table = ACPI_PTR(mb86s70_gpio_acpi_ids),
},
.probe = mb86s70_gpio_probe,
.remove = mb86s70_gpio_remove,
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index aec7bd86ae7e..9c9b965d7d6d 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -118,8 +118,15 @@ static void of_gpio_flags_quirks(struct device_node *np,
* Legacy handling of SPI active high chip select. If we have a
* property named "cs-gpios" we need to inspect the child node
* to determine if the flags should have inverted semantics.
+ *
+ * This does not apply to an SPI device named "spi-gpio", because
+ * these have traditionally obtained their own GPIOs by parsing
+ * the device tree directly and did not respect any "spi-cs-high"
+ * property on the SPI bus children.
*/
- if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") &&
+ if (IS_ENABLED(CONFIG_SPI_MASTER) &&
+ !strcmp(propname, "cs-gpios") &&
+ !of_device_is_compatible(np, "spi-gpio") &&
of_property_read_bool(np, "cs-gpios")) {
struct device_node *child;
u32 cs;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index b610e3b30d95..2f18c64d531f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1959,25 +1959,6 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
gfx_v9_0_init_compute_vmid(adev);
-
- mutex_lock(&adev->grbm_idx_mutex);
- /*
- * making sure that the following register writes will be broadcasted
- * to all the shaders
- */
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-
- WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
- (adev->gfx.config.sc_prim_fifo_size_frontend <<
- PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
- (adev->gfx.config.sc_prim_fifo_size_backend <<
- PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
- (adev->gfx.config.sc_hiz_tile_fifo_size <<
- PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
- (adev->gfx.config.sc_earlyz_tile_fifo_size <<
- PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
- mutex_unlock(&adev->grbm_idx_mutex);
-
}
static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 083bd8114db1..dd6b4b0b5f30 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -837,7 +837,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
/* No access to rdtsc. Using raw monotonic time */
args->cpu_clock_counter = ktime_get_raw_ns();
- args->system_clock_counter = ktime_get_boot_ns();
+ args->system_clock_counter = ktime_get_boottime_ns();
/* Since the counter is in nano-seconds we use 1GHz frequency */
args->system_clock_freq = 1000000000;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index f1d326caf69e..a7e8340baf90 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -326,7 +326,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
if (ret)
return ret;
- ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index ae64ff7153d6..1cd5a8b5cdc1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -916,8 +916,10 @@ static int init_thermal_controller(
PHM_PlatformCaps_ThermalController
);
- if (0 == powerplay_table->usFanTableOffset)
+ if (0 == powerplay_table->usFanTableOffset) {
+ hwmgr->thermal_controller.use_hw_fan_control = 1;
return 0;
+ }
fan_table = (const PPTable_Generic_SubTable_Header *)
(((unsigned long)powerplay_table) +
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index c92999aac07c..eccb26fddbd0 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -694,6 +694,7 @@ struct pp_thermal_controller_info {
uint8_t ucType;
uint8_t ucI2cLine;
uint8_t ucI2cAddress;
+ uint8_t use_hw_fan_control;
struct pp_fan_info fanInfo;
struct pp_advance_fan_control_parameters advanceFanControlParameters;
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 2d4cfe14f72e..29e641c6a5db 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -2092,6 +2092,10 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
+ /* use hardware fan control */
+ if (hwmgr->thermal_controller.use_hw_fan_control)
+ return 0;
+
tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
usPWMMin * duty100;
do_div(tmp64, 10000);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 72d01e873160..5418a1a87b2c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -760,7 +760,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
if (IS_ERR(gpu->cmdbuf_suballoc)) {
dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
ret = PTR_ERR(gpu->cmdbuf_suballoc);
- goto fail;
+ goto destroy_iommu;
}
/* Create buffer: */
@@ -768,7 +768,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
PAGE_SIZE);
if (ret) {
dev_err(gpu->dev, "could not create command buffer\n");
- goto destroy_iommu;
+ goto destroy_suballoc;
}
if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
@@ -800,6 +800,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
free_buffer:
etnaviv_cmdbuf_free(&gpu->buffer);
gpu->buffer.suballoc = NULL;
+destroy_suballoc:
+ etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
+ gpu->cmdbuf_suballoc = NULL;
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 029fd8ec1857..f0d45ccc1aac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1888,12 +1888,12 @@ static int ring_request_alloc(struct i915_request *request)
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- ret = switch_context(request);
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
- /* Unconditionally invalidate GPU caches and TLBs. */
- ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ ret = switch_context(request);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 9cc1d678674f..c436a28d50e4 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -91,14 +91,14 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
ipu_dc_disable(ipu);
ipu_prg_disable(ipu);
+ drm_crtc_vblank_off(crtc);
+
spin_lock_irq(&crtc->dev->event_lock);
- if (crtc->state->event) {
+ if (crtc->state->event && !crtc->state->active) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
-
- drm_crtc_vblank_off(crtc);
}
static void imx_drm_crtc_reset(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index d11e2281dde6..7e43b25785f7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -63,7 +63,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
return 0;
err_free:
- drm_gem_object_put_unlocked(&shmem->base);
+ drm_gem_handle_delete(file, args->handle);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index e62fe24b1a2e..5bb0f0a084e9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -619,11 +619,11 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
output = vgdev->outputs + scanout;
new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
+ drm_connector_update_edid_property(&output->conn, new_edid);
spin_lock(&vgdev->display_info_lock);
old_edid = output->edid;
output->edid = new_edid;
- drm_connector_update_edid_property(&output->conn, output->edid);
spin_unlock(&vgdev->display_info_lock);
kfree(old_edid);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index eac0c54c5970..b032d3899fa3 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -80,6 +80,7 @@
#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
#define HID_DEVICE_ID_ALPS_U1 0x1215
#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
+#define HID_DEVICE_ID_ALPS_1222 0x1222
#define USB_VENDOR_ID_AMI 0x046b
@@ -269,6 +270,7 @@
#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053
+#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2 0x0939
#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
#define USB_DEVICE_ID_ASUS_AK1D 0x1125
#define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408
@@ -569,6 +571,7 @@
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
+#define USB_DEVICE_ID_HUION_HS64 0x006d
#define USB_VENDOR_ID_IBM 0x04b3
#define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index e564bff86515..bfcf2ee58d14 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -30,6 +30,7 @@
#define REPORT_ID_HIDPP_SHORT 0x10
#define REPORT_ID_HIDPP_LONG 0x11
+#define REPORT_ID_HIDPP_VERY_LONG 0x12
#define HIDPP_REPORT_SHORT_LENGTH 7
#define HIDPP_REPORT_LONG_LENGTH 20
@@ -1242,7 +1243,8 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
int ret;
if ((buf[0] == REPORT_ID_HIDPP_SHORT) ||
- (buf[0] == REPORT_ID_HIDPP_LONG)) {
+ (buf[0] == REPORT_ID_HIDPP_LONG) ||
+ (buf[0] == REPORT_ID_HIDPP_VERY_LONG)) {
if (count < 2)
return -EINVAL;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 5df5dd56ecc8..b603c14d043b 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1776,6 +1776,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_ALPS_JP,
HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
+ { .driver_data = MT_CLS_WIN_8_DUAL,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_ALPS_JP,
+ HID_DEVICE_ID_ALPS_1222) },
/* Lenovo X1 TAB Gen 2 */
{ .driver_data = MT_CLS_WIN_8_DUAL,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index e5ca6fe2ca57..671a285724f9 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -42,6 +42,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET },
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index 8fe02d81265d..914fb527ae7a 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -369,6 +369,8 @@ static const struct hid_device_id uclogic_devices[] = {
USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION,
USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HUION,
+ USB_DEVICE_ID_HUION_HS64) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 0187c9f8fc22..273d784fff66 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -977,6 +977,8 @@ int uclogic_params_init(struct uclogic_params *params,
/* FALL THROUGH */
case VID_PID(USB_VENDOR_ID_HUION,
USB_DEVICE_ID_HUION_TABLET):
+ case VID_PID(USB_VENDOR_ID_HUION,
+ USB_DEVICE_ID_HUION_HS64):
case VID_PID(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_HUION_TABLET):
case VID_PID(USB_VENDOR_ID_UCLOGIC,
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index 22ba21457035..aa2dbed30fc3 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -816,9 +816,9 @@ static int load_fw_from_host(struct ishtp_cl_data *client_data)
goto end_err_fw_release;
release_firmware(fw);
- kfree(filename);
dev_info(cl_data_to_dev(client_data), "ISH firmware %s loaded\n",
filename);
+ kfree(filename);
return 0;
end_err_fw_release:
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index c0487b34d2cf..6ba944b40fdb 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -891,7 +891,7 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
*/
static int hid_ishtp_cl_suspend(struct device *device)
{
- struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
@@ -912,7 +912,7 @@ static int hid_ishtp_cl_suspend(struct device *device)
*/
static int hid_ishtp_cl_resume(struct device *device)
{
- struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 794e700d65f7..c47c3328a0f4 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -471,7 +471,6 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
}
ishtp_device_ready = true;
- dev_set_drvdata(&device->dev, device);
return device;
}
@@ -640,6 +639,20 @@ void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device)
EXPORT_SYMBOL(ishtp_get_drvdata);
/**
+ * ishtp_dev_to_cl_device() - get ishtp_cl_device instance from device instance
+ * @device: device instance
+ *
+ * Get ish_cl_device instance which embeds device instance in it.
+ *
+ * Return: pointer to ishtp_cl_device instance
+ */
+struct ishtp_cl_device *ishtp_dev_to_cl_device(struct device *device)
+{
+ return to_ishtp_cl_device(device);
+}
+EXPORT_SYMBOL(ishtp_dev_to_cl_device);
+
+/**
* ishtp_bus_new_client() - Create a new client
* @dev: ISHTP device instance
*
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 1c1a2514d6f3..9a59957922d4 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -6,10 +6,14 @@ config HYPERV
tristate "Microsoft Hyper-V client drivers"
depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST
select PARAVIRT
+ select X86_HV_CALLBACK_VECTOR
help
Select this option to run Linux as a Hyper-V client operating
system.
+config HYPERV_TIMER
+ def_bool HYPERV
+
config HYPERV_TSCPAGE
def_bool HYPERV && X86_64
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index a1ea482183e8..6188fb7dda42 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -16,6 +16,7 @@
#include <linux/version.h>
#include <linux/random.h>
#include <linux/clockchips.h>
+#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@@ -23,21 +24,6 @@
struct hv_context hv_context;
/*
- * If false, we're using the old mechanism for stimer0 interrupts
- * where it sends a VMbus message when it expires. The old
- * mechanism is used when running on older versions of Hyper-V
- * that don't support Direct Mode. While Hyper-V provides
- * four stimer's per CPU, Linux uses only stimer0.
- */
-static bool direct_mode_enabled;
-static int stimer0_irq;
-static int stimer0_vector;
-
-#define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
-#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
-#define HV_MIN_DELTA_TICKS 1
-
-/*
* hv_init - Main initialization routine.
*
* This routine must be called before any other routines in here are called
@@ -47,9 +33,6 @@ int hv_init(void)
hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
if (!hv_context.cpu_context)
return -ENOMEM;
-
- direct_mode_enabled = ms_hyperv.misc_features &
- HV_STIMER_DIRECT_MODE_AVAILABLE;
return 0;
}
@@ -88,89 +71,6 @@ int hv_post_message(union hv_connection_id connection_id,
return status & 0xFFFF;
}
-/*
- * ISR for when stimer0 is operating in Direct Mode. Direct Mode
- * does not use VMbus or any VMbus messages, so process here and not
- * in the VMbus driver code.
- */
-
-static void hv_stimer0_isr(void)
-{
- struct hv_per_cpu_context *hv_cpu;
-
- hv_cpu = this_cpu_ptr(hv_context.cpu_context);
- hv_cpu->clk_evt->event_handler(hv_cpu->clk_evt);
- add_interrupt_randomness(stimer0_vector, 0);
-}
-
-static int hv_ce_set_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- u64 current_tick;
-
- WARN_ON(!clockevent_state_oneshot(evt));
-
- current_tick = hyperv_cs->read(NULL);
- current_tick += delta;
- hv_init_timer(0, current_tick);
- return 0;
-}
-
-static int hv_ce_shutdown(struct clock_event_device *evt)
-{
- hv_init_timer(0, 0);
- hv_init_timer_config(0, 0);
- if (direct_mode_enabled)
- hv_disable_stimer0_percpu_irq(stimer0_irq);
-
- return 0;
-}
-
-static int hv_ce_set_oneshot(struct clock_event_device *evt)
-{
- union hv_stimer_config timer_cfg;
-
- timer_cfg.as_uint64 = 0;
- timer_cfg.enable = 1;
- timer_cfg.auto_enable = 1;
- if (direct_mode_enabled) {
- /*
- * When it expires, the timer will directly interrupt
- * on the specified hardware vector/IRQ.
- */
- timer_cfg.direct_mode = 1;
- timer_cfg.apic_vector = stimer0_vector;
- hv_enable_stimer0_percpu_irq(stimer0_irq);
- } else {
- /*
- * When it expires, the timer will generate a VMbus message,
- * to be handled by the normal VMbus interrupt handler.
- */
- timer_cfg.direct_mode = 0;
- timer_cfg.sintx = VMBUS_MESSAGE_SINT;
- }
- hv_init_timer_config(0, timer_cfg.as_uint64);
- return 0;
-}
-
-static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
-{
- dev->name = "Hyper-V clockevent";
- dev->features = CLOCK_EVT_FEAT_ONESHOT;
- dev->cpumask = cpumask_of(cpu);
- dev->rating = 1000;
- /*
- * Avoid settint dev->owner = THIS_MODULE deliberately as doing so will
- * result in clockevents_config_and_register() taking additional
- * references to the hv_vmbus module making it impossible to unload.
- */
-
- dev->set_state_shutdown = hv_ce_shutdown;
- dev->set_state_oneshot = hv_ce_set_oneshot;
- dev->set_next_event = hv_ce_set_next_event;
-}
-
-
int hv_synic_alloc(void)
{
int cpu;
@@ -199,14 +99,6 @@ int hv_synic_alloc(void)
tasklet_init(&hv_cpu->msg_dpc,
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
- hv_cpu->clk_evt = kzalloc(sizeof(struct clock_event_device),
- GFP_KERNEL);
- if (hv_cpu->clk_evt == NULL) {
- pr_err("Unable to allocate clock event device\n");
- goto err;
- }
- hv_init_clockevent_device(hv_cpu->clk_evt, cpu);
-
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_message_page == NULL) {
@@ -229,11 +121,6 @@ int hv_synic_alloc(void)
INIT_LIST_HEAD(&hv_cpu->chan_list);
}
- if (direct_mode_enabled &&
- hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector,
- hv_stimer0_isr))
- goto err;
-
return 0;
err:
/*
@@ -252,7 +139,6 @@ void hv_synic_free(void)
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
- kfree(hv_cpu->clk_evt);
free_page((unsigned long)hv_cpu->synic_event_page);
free_page((unsigned long)hv_cpu->synic_message_page);
free_page((unsigned long)hv_cpu->post_msg_page);
@@ -311,36 +197,9 @@ int hv_synic_init(unsigned int cpu)
hv_set_synic_state(sctrl.as_uint64);
- /*
- * Register the per-cpu clockevent source.
- */
- if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE)
- clockevents_config_and_register(hv_cpu->clk_evt,
- HV_TIMER_FREQUENCY,
- HV_MIN_DELTA_TICKS,
- HV_MAX_MAX_DELTA_TICKS);
- return 0;
-}
-
-/*
- * hv_synic_clockevents_cleanup - Cleanup clockevent devices
- */
-void hv_synic_clockevents_cleanup(void)
-{
- int cpu;
+ hv_stimer_init(cpu);
- if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
- return;
-
- if (direct_mode_enabled)
- hv_remove_stimer0_irq(stimer0_irq);
-
- for_each_present_cpu(cpu) {
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
-
- clockevents_unbind_device(hv_cpu->clk_evt, cpu);
- }
+ return 0;
}
/*
@@ -388,14 +247,7 @@ int hv_synic_cleanup(unsigned int cpu)
if (channel_found && vmbus_connection.conn_state == CONNECTED)
return -EBUSY;
- /* Turn off clockevent device */
- if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
- struct hv_per_cpu_context *hv_cpu
- = this_cpu_ptr(hv_context.cpu_context);
-
- clockevents_unbind_device(hv_cpu->clk_evt, cpu);
- hv_ce_shutdown(hv_cpu->clk_evt);
- }
+ hv_stimer_cleanup(cpu);
hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 7d3d31f099ea..e32681ee7b9f 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -17,6 +17,7 @@
#include <linux/hyperv.h>
#include <linux/clockchips.h>
#include <linux/ptp_clock_kernel.h>
+#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index b8e1ff05f110..362e70e9d145 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -138,7 +138,6 @@ struct hv_per_cpu_context {
* per-cpu list of the channels based on their CPU affinity.
*/
struct list_head chan_list;
- struct clock_event_device *clk_evt;
};
struct hv_context {
@@ -176,8 +175,6 @@ extern int hv_synic_init(unsigned int cpu);
extern int hv_synic_cleanup(unsigned int cpu);
-extern void hv_synic_clockevents_cleanup(void);
-
/* Interface */
void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 92b1874b3eb3..72d5a7cde7ea 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -30,6 +30,7 @@
#include <linux/kdebug.h>
#include <linux/efi.h>
#include <linux/random.h>
+#include <clocksource/hyperv_timer.h>
#include "hyperv_vmbus.h"
struct vmbus_dynid {
@@ -955,17 +956,6 @@ static void vmbus_onmessage_work(struct work_struct *work)
kfree(ctx);
}
-static void hv_process_timer_expiration(struct hv_message *msg,
- struct hv_per_cpu_context *hv_cpu)
-{
- struct clock_event_device *dev = hv_cpu->clk_evt;
-
- if (dev->event_handler)
- dev->event_handler(dev);
-
- vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
-}
-
void vmbus_on_msg_dpc(unsigned long data)
{
struct hv_per_cpu_context *hv_cpu = (void *)data;
@@ -1159,9 +1149,10 @@ static void vmbus_isr(void)
/* Check if there are actual msgs to be processed */
if (msg->header.message_type != HVMSG_NONE) {
- if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
- hv_process_timer_expiration(msg, hv_cpu);
- else
+ if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
+ hv_stimer0_isr();
+ vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
+ } else
tasklet_schedule(&hv_cpu->msg_dpc);
}
@@ -1263,14 +1254,19 @@ static int vmbus_bus_init(void)
ret = hv_synic_alloc();
if (ret)
goto err_alloc;
+
+ ret = hv_stimer_alloc(VMBUS_MESSAGE_SINT);
+ if (ret < 0)
+ goto err_alloc;
+
/*
- * Initialize the per-cpu interrupt state and
- * connect to the host.
+ * Initialize the per-cpu interrupt state and stimer state.
+ * Then connect to the host.
*/
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
hv_synic_init, hv_synic_cleanup);
if (ret < 0)
- goto err_alloc;
+ goto err_cpuhp;
hyperv_cpuhp_online = ret;
ret = vmbus_connect();
@@ -1318,6 +1314,8 @@ static int vmbus_bus_init(void)
err_connect:
cpuhp_remove_state(hyperv_cpuhp_online);
+err_cpuhp:
+ hv_stimer_free();
err_alloc:
hv_synic_free();
hv_remove_vmbus_irq();
@@ -2064,7 +2062,7 @@ static struct acpi_driver vmbus_acpi_driver = {
static void hv_kexec_handler(void)
{
- hv_synic_clockevents_cleanup();
+ hv_stimer_global_cleanup();
vmbus_initiate_unload(false);
vmbus_connection.conn_state = DISCONNECTED;
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
@@ -2075,6 +2073,8 @@ static void hv_kexec_handler(void)
static void hv_crash_handler(struct pt_regs *regs)
{
+ int cpu;
+
vmbus_initiate_unload(true);
/*
* In crash handler we can't schedule synic cleanup for all CPUs,
@@ -2082,7 +2082,9 @@ static void hv_crash_handler(struct pt_regs *regs)
* for kdump.
*/
vmbus_connection.conn_state = DISCONNECTED;
- hv_synic_cleanup(smp_processor_id());
+ cpu = smp_processor_id();
+ hv_stimer_cleanup(cpu);
+ hv_synic_cleanup(cpu);
hyperv_cleanup();
};
@@ -2131,7 +2133,7 @@ static void __exit vmbus_exit(void)
hv_remove_kexec_handler();
hv_remove_crash_handler();
vmbus_connection.conn_state = DISCONNECTED;
- hv_synic_clockevents_cleanup();
+ hv_stimer_global_cleanup();
vmbus_disconnect();
hv_remove_vmbus_irq();
for_each_online_cpu(cpu) {
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 4d0d6c86c12f..fe6618e49dc4 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -96,10 +96,10 @@ struct platform_data {
struct device_attribute name_attr;
};
-/* Keep track of how many package pointers we allocated in init() */
-static int max_packages __read_mostly;
-/* Array of package pointers. Serialized by cpu hotplug lock */
-static struct platform_device **pkg_devices;
+/* Keep track of how many zone pointers we allocated in init() */
+static int max_zones __read_mostly;
+/* Array of zone pointers. Serialized by cpu hotplug lock */
+static struct platform_device **zone_devices;
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr, char *buf)
@@ -422,10 +422,10 @@ static int chk_ucode_version(unsigned int cpu)
static struct platform_device *coretemp_get_pdev(unsigned int cpu)
{
- int pkgid = topology_logical_package_id(cpu);
+ int id = topology_logical_die_id(cpu);
- if (pkgid >= 0 && pkgid < max_packages)
- return pkg_devices[pkgid];
+ if (id >= 0 && id < max_zones)
+ return zone_devices[id];
return NULL;
}
@@ -531,7 +531,7 @@ static int coretemp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct platform_data *pdata;
- /* Initialize the per-package data structures */
+ /* Initialize the per-zone data structures */
pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -566,13 +566,13 @@ static struct platform_driver coretemp_driver = {
static struct platform_device *coretemp_device_add(unsigned int cpu)
{
- int err, pkgid = topology_logical_package_id(cpu);
+ int err, zoneid = topology_logical_die_id(cpu);
struct platform_device *pdev;
- if (pkgid < 0)
+ if (zoneid < 0)
return ERR_PTR(-ENOMEM);
- pdev = platform_device_alloc(DRVNAME, pkgid);
+ pdev = platform_device_alloc(DRVNAME, zoneid);
if (!pdev)
return ERR_PTR(-ENOMEM);
@@ -582,7 +582,7 @@ static struct platform_device *coretemp_device_add(unsigned int cpu)
return ERR_PTR(err);
}
- pkg_devices[pkgid] = pdev;
+ zone_devices[zoneid] = pdev;
return pdev;
}
@@ -690,7 +690,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
* the rest.
*/
if (cpumask_empty(&pd->cpumask)) {
- pkg_devices[topology_logical_package_id(cpu)] = NULL;
+ zone_devices[topology_logical_die_id(cpu)] = NULL;
platform_device_unregister(pdev);
return 0;
}
@@ -728,10 +728,10 @@ static int __init coretemp_init(void)
if (!x86_match_cpu(coretemp_ids))
return -ENODEV;
- max_packages = topology_max_packages();
- pkg_devices = kcalloc(max_packages, sizeof(struct platform_device *),
+ max_zones = topology_max_packages() * topology_max_die_per_package();
+ zone_devices = kcalloc(max_zones, sizeof(struct platform_device *),
GFP_KERNEL);
- if (!pkg_devices)
+ if (!zone_devices)
return -ENOMEM;
err = platform_driver_register(&coretemp_driver);
@@ -747,7 +747,7 @@ static int __init coretemp_init(void)
outdrv:
platform_driver_unregister(&coretemp_driver);
- kfree(pkg_devices);
+ kfree(zone_devices);
return err;
}
module_init(coretemp_init)
@@ -756,7 +756,7 @@ static void __exit coretemp_exit(void)
{
cpuhp_remove_state(coretemp_hp_online);
platform_driver_unregister(&coretemp_driver);
- kfree(pkg_devices);
+ kfree(zone_devices);
}
module_exit(coretemp_exit)
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index d84095591e45..f86065e16772 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -111,8 +111,7 @@ static int i2c_acpi_do_lookup(struct acpi_device *adev,
struct list_head resource_list;
int ret;
- if (acpi_bus_get_status(adev) || !adev->status.present ||
- acpi_device_enumerated(adev))
+ if (acpi_bus_get_status(adev) || !adev->status.present)
return -EINVAL;
if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0)
@@ -147,6 +146,9 @@ static int i2c_acpi_get_info(struct acpi_device *adev,
lookup.info = info;
lookup.index = -1;
+ if (acpi_device_enumerated(adev))
+ return -EINVAL;
+
ret = i2c_acpi_do_lookup(adev, &lookup);
if (ret)
return ret;
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index c8159205c77d..4e22b3c3e488 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -149,7 +149,7 @@ static int dht11_decode(struct dht11 *dht11, int offset)
return -EIO;
}
- dht11->timestamp = ktime_get_boot_ns();
+ dht11->timestamp = ktime_get_boottime_ns();
if (hum_int < 4) { /* DHT22: 100000 = (3*256+232)*100 */
dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
((temp_int & 0x80) ? -100 : 100);
@@ -177,7 +177,7 @@ static irqreturn_t dht11_handle_irq(int irq, void *data)
/* TODO: Consider making the handler safe for IRQ sharing */
if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
- dht11->edges[dht11->num_edges].ts = ktime_get_boot_ns();
+ dht11->edges[dht11->num_edges].ts = ktime_get_boottime_ns();
dht11->edges[dht11->num_edges++].value =
gpio_get_value(dht11->gpio);
@@ -196,7 +196,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
int ret, timeres, offset;
mutex_lock(&dht11->lock);
- if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
+ if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boottime_ns()) {
timeres = ktime_get_resolution_ns();
dev_dbg(dht11->dev, "current timeresolution: %dns\n", timeres);
if (timeres > DHT11_MIN_TIMERES) {
@@ -322,7 +322,7 @@ static int dht11_probe(struct platform_device *pdev)
return -EINVAL;
}
- dht11->timestamp = ktime_get_boot_ns() - DHT11_DATA_VALID_TIME - 1;
+ dht11->timestamp = ktime_get_boottime_ns() - DHT11_DATA_VALID_TIME - 1;
dht11->num_edges = -1;
platform_set_drvdata(pdev, iio);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 245b5844028d..401d7ff99853 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -228,9 +228,9 @@ s64 iio_get_time_ns(const struct iio_dev *indio_dev)
ktime_get_coarse_ts64(&tp);
return timespec64_to_ns(&tp);
case CLOCK_BOOTTIME:
- return ktime_get_boot_ns();
+ return ktime_get_boottime_ns();
case CLOCK_TAI:
- return ktime_get_tai_ns();
+ return ktime_get_clocktai_ns();
default:
BUG();
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 29f7b15c81d9..3352a107b4a3 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -457,7 +457,7 @@ static int alloc_name(struct ib_device *ibdev, const char *name)
int rc;
int i;
- lockdep_assert_held_exclusive(&devices_rwsem);
+ lockdep_assert_held_write(&devices_rwsem);
ida_init(&inuse);
xa_for_each (&devices, index, device) {
char buf[IB_DEVICE_NAME_MAX];
@@ -2520,7 +2520,7 @@ static int __init ib_core_init(void)
goto err_mad;
}
- ret = register_lsm_notifier(&ibdev_lsm_nb);
+ ret = register_blocking_lsm_notifier(&ibdev_lsm_nb);
if (ret) {
pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
goto err_sa;
@@ -2539,7 +2539,7 @@ static int __init ib_core_init(void)
return 0;
err_compat:
- unregister_lsm_notifier(&ibdev_lsm_nb);
+ unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
err_sa:
ib_sa_cleanup();
err_mad:
@@ -2565,7 +2565,7 @@ static void __exit ib_core_cleanup(void)
nldev_exit();
rdma_nl_unregister(RDMA_NL_LS);
unregister_pernet_device(&rdma_dev_net_ops);
- unregister_lsm_notifier(&ibdev_lsm_nb);
+ unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
ib_sa_cleanup();
ib_mad_cleanup();
addr_cleanup();
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 4fe662c3bbc1..c142b23bb401 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -1038,7 +1038,7 @@ int hfi1_get_proc_affinity(int node)
struct hfi1_affinity_node *entry;
cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
const struct cpumask *node_mask,
- *proc_mask = &current->cpus_allowed;
+ *proc_mask = current->cpus_ptr;
struct hfi1_affinity_node_list *affinity = &node_affinity;
struct cpu_mask_set *set = &affinity->proc;
@@ -1046,7 +1046,7 @@ int hfi1_get_proc_affinity(int node)
* check whether process/context affinity has already
* been set
*/
- if (cpumask_weight(proc_mask) == 1) {
+ if (current->nr_cpus_allowed == 1) {
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
@@ -1057,7 +1057,7 @@ int hfi1_get_proc_affinity(int node)
cpu = cpumask_first(proc_mask);
cpumask_set_cpu(cpu, &set->used);
goto done;
- } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
+ } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 28b66bd70b74..2395fd4233a7 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -869,14 +869,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
{
struct sdma_rht_node *rht_node;
struct sdma_engine *sde = NULL;
- const struct cpumask *current_mask = &current->cpus_allowed;
unsigned long cpu_id;
/*
* To ensure that always the same sdma engine(s) will be
* selected make sure the process is pinned to this CPU only.
*/
- if (cpumask_weight(current_mask) != 1)
+ if (current->nr_cpus_allowed != 1)
goto out;
cpu_id = smp_processor_id();
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 2a0b59a4b6eb..cca414ecfcd5 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -310,7 +310,7 @@ static void aliasguid_query_handler(int status,
if (status) {
pr_debug("(port: %d) failed: status = %d\n",
cb_ctx->port, status);
- rec->time_to_run = ktime_get_boot_ns() + 1 * NSEC_PER_SEC;
+ rec->time_to_run = ktime_get_boottime_ns() + 1 * NSEC_PER_SEC;
goto out;
}
@@ -416,7 +416,7 @@ next_entry:
be64_to_cpu((__force __be64)rec->guid_indexes),
be64_to_cpu((__force __be64)applied_guid_indexes),
be64_to_cpu((__force __be64)declined_guid_indexes));
- rec->time_to_run = ktime_get_boot_ns() +
+ rec->time_to_run = ktime_get_boottime_ns() +
resched_delay_sec * NSEC_PER_SEC;
} else {
rec->status = MLX4_GUID_INFO_STATUS_SET;
@@ -709,7 +709,7 @@ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
}
}
if (resched_delay_sec) {
- u64 curr_time = ktime_get_boot_ns();
+ u64 curr_time = ktime_get_boottime_ns();
*resched_delay_sec = (low_record_time < curr_time) ? 0 :
div_u64((low_record_time - curr_time), NSEC_PER_SEC);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 78fa634de98a..27b6e664e59d 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1142,7 +1142,7 @@ static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
{
struct qib_filedata *fd = fp->private_data;
- const unsigned int weight = cpumask_weight(&current->cpus_allowed);
+ const unsigned int weight = current->nr_cpus_allowed;
const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
int local_cpu;
@@ -1623,9 +1623,8 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
else {
int unit;
- const unsigned int cpu = cpumask_first(&current->cpus_allowed);
- const unsigned int weight =
- cpumask_weight(&current->cpus_allowed);
+ const unsigned int cpu = cpumask_first(current->cpus_ptr);
+ const unsigned int weight = current->nr_cpus_allowed;
if (weight == 1 && !test_bit(cpu, qib_cpulist))
if (!find_hca(cpu, &unit) && unit >= 0)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 659c5e0fb835..80e10f4e213a 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -15,10 +15,10 @@ config ARM_GIC_PM
bool
depends on PM
select ARM_GIC
- select PM_CLK
config ARM_GIC_MAX_NR
int
+ depends on ARM_GIC
default 2 if ARCH_REALVIEW
default 1
@@ -87,6 +87,14 @@ config ALPINE_MSI
select PCI_MSI
select GENERIC_IRQ_CHIP
+config AL_FIC
+ bool "Amazon's Annapurna Labs Fabric Interrupt Controller"
+ depends on OF || COMPILE_TEST
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ help
+ Support Amazon's Annapurna Labs Fabric Interrupt Controller.
+
config ATMEL_AIC_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -217,13 +225,26 @@ config RDA_INTC
select IRQ_DOMAIN
config RENESAS_INTC_IRQPIN
- bool
+ bool "Renesas INTC External IRQ Pin Support" if COMPILE_TEST
select IRQ_DOMAIN
+ help
+ Enable support for the Renesas Interrupt Controller for external
+ interrupt pins, as found on SH/R-Mobile and R-Car Gen1 SoCs.
config RENESAS_IRQC
- bool
+ bool "Renesas R-Mobile APE6 and R-Car IRQC support" if COMPILE_TEST
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+ help
+ Enable support for the Renesas Interrupt Controller for external
+ devices, as found on R-Mobile APE6, R-Car Gen2, and R-Car Gen3 SoCs.
+
+config RENESAS_RZA1_IRQC
+ bool "Renesas RZ/A1 IRQC support" if COMPILE_TEST
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Enable support for the Renesas RZ/A1 Interrupt Controller, to use up
+ to 8 external interrupts with configurable sense select.
config ST_IRQCHIP
bool
@@ -299,8 +320,11 @@ config RENESAS_H8300H_INTC
select IRQ_DOMAIN
config RENESAS_H8S_INTC
- bool
+ bool "Renesas H8S Interrupt Controller Support" if COMPILE_TEST
select IRQ_DOMAIN
+ help
+ Enable support for the Renesas H8/300 Interrupt Controller, as found
+ on Renesas H8S SoCs.
config IMX_GPCV2
bool
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 606a003a0000..8d0fcec6ab23 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IRQCHIP) += irqchip.o
+obj-$(CONFIG_AL_FIC) += irq-al-fic.o
obj-$(CONFIG_ALPINE_MSI) += irq-alpine-msi.o
obj-$(CONFIG_ATH79) += irq-ath79-cpu.o
obj-$(CONFIG_ATH79) += irq-ath79-misc.o
@@ -49,6 +50,7 @@ obj-$(CONFIG_JCORE_AIC) += irq-jcore-aic.o
obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
+obj-$(CONFIG_RENESAS_RZA1_IRQC) += irq-renesas-rza1.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c
new file mode 100644
index 000000000000..1a57cee3efab
--- /dev/null
+++ b/drivers/irqchip/irq-al-fic.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+/* FIC Registers */
+#define AL_FIC_CAUSE 0x00
+#define AL_FIC_MASK 0x10
+#define AL_FIC_CONTROL 0x28
+
+#define CONTROL_TRIGGER_RISING BIT(3)
+#define CONTROL_MASK_MSI_X BIT(5)
+
+#define NR_FIC_IRQS 32
+
+MODULE_AUTHOR("Talel Shenhar");
+MODULE_DESCRIPTION("Amazon's Annapurna Labs Interrupt Controller Driver");
+MODULE_LICENSE("GPL v2");
+
+enum al_fic_state {
+ AL_FIC_UNCONFIGURED = 0,
+ AL_FIC_CONFIGURED_LEVEL,
+ AL_FIC_CONFIGURED_RISING_EDGE,
+};
+
+struct al_fic {
+ void __iomem *base;
+ struct irq_domain *domain;
+ const char *name;
+ unsigned int parent_irq;
+ enum al_fic_state state;
+};
+
+static void al_fic_set_trigger(struct al_fic *fic,
+ struct irq_chip_generic *gc,
+ enum al_fic_state new_state)
+{
+ irq_flow_handler_t handler;
+ u32 control = readl_relaxed(fic->base + AL_FIC_CONTROL);
+
+ if (new_state == AL_FIC_CONFIGURED_LEVEL) {
+ handler = handle_level_irq;
+ control &= ~CONTROL_TRIGGER_RISING;
+ } else {
+ handler = handle_edge_irq;
+ control |= CONTROL_TRIGGER_RISING;
+ }
+ gc->chip_types->handler = handler;
+ fic->state = new_state;
+ writel_relaxed(control, fic->base + AL_FIC_CONTROL);
+}
+
+static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct al_fic *fic = gc->private;
+ enum al_fic_state new_state;
+ int ret = 0;
+
+ irq_gc_lock(gc);
+
+ if (((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) &&
+ ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)) {
+ pr_debug("fic doesn't support flow type %d\n", flow_type);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ new_state = (flow_type & IRQ_TYPE_LEVEL_HIGH) ?
+ AL_FIC_CONFIGURED_LEVEL : AL_FIC_CONFIGURED_RISING_EDGE;
+
+ /*
+ * A given FIC instance can be either all level or all edge triggered.
+ * This is generally fixed depending on what pieces of HW it's wired up
+ * to.
+ *
+ * We configure it based on the sensitivity of the first source
+ * being setup, and reject any subsequent attempt at configuring it in a
+ * different way.
+ */
+ if (fic->state == AL_FIC_UNCONFIGURED) {
+ al_fic_set_trigger(fic, gc, new_state);
+ } else if (fic->state != new_state) {
+ pr_debug("fic %s state already configured to %d\n",
+ fic->name, fic->state);
+ ret = -EINVAL;
+ goto err;
+ }
+
+err:
+ irq_gc_unlock(gc);
+
+ return ret;
+}
+
+static void al_fic_irq_handler(struct irq_desc *desc)
+{
+ struct al_fic *fic = irq_desc_get_handler_data(desc);
+ struct irq_domain *domain = fic->domain;
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
+ unsigned long pending;
+ unsigned int irq;
+ u32 hwirq;
+
+ chained_irq_enter(irqchip, desc);
+
+ pending = readl_relaxed(fic->base + AL_FIC_CAUSE);
+ pending &= ~gc->mask_cache;
+
+ for_each_set_bit(hwirq, &pending, NR_FIC_IRQS) {
+ irq = irq_find_mapping(domain, hwirq);
+ generic_handle_irq(irq);
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int al_fic_register(struct device_node *node,
+ struct al_fic *fic)
+{
+ struct irq_chip_generic *gc;
+ int ret;
+
+ fic->domain = irq_domain_add_linear(node,
+ NR_FIC_IRQS,
+ &irq_generic_chip_ops,
+ fic);
+ if (!fic->domain) {
+ pr_err("fail to add irq domain\n");
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(fic->domain,
+ NR_FIC_IRQS,
+ 1, fic->name,
+ handle_level_irq,
+ 0, 0, IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("fail to allocate generic chip (%d)\n", ret);
+ goto err_domain_remove;
+ }
+
+ gc = irq_get_domain_generic_chip(fic->domain, 0);
+ gc->reg_base = fic->base;
+ gc->chip_types->regs.mask = AL_FIC_MASK;
+ gc->chip_types->regs.ack = AL_FIC_CAUSE;
+ gc->chip_types->chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit;
+ gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit;
+ gc->chip_types->chip.irq_set_type = al_fic_irq_set_type;
+ gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE;
+ gc->private = fic;
+
+ irq_set_chained_handler_and_data(fic->parent_irq,
+ al_fic_irq_handler,
+ fic);
+ return 0;
+
+err_domain_remove:
+ irq_domain_remove(fic->domain);
+
+ return ret;
+}
+
+/*
+ * al_fic_wire_init() - initialize and configure fic in wire mode
+ * @of_node: optional pointer to interrupt controller's device tree node.
+ * @base: mmio to fic register
+ * @name: name of the fic
+ * @parent_irq: interrupt of parent
+ *
+ * This API will configure the fic hardware to to work in wire mode.
+ * In wire mode, fic hardware is generating a wire ("wired") interrupt.
+ * Interrupt can be generated based on positive edge or level - configuration is
+ * to be determined based on connected hardware to this fic.
+ */
+static struct al_fic *al_fic_wire_init(struct device_node *node,
+ void __iomem *base,
+ const char *name,
+ unsigned int parent_irq)
+{
+ struct al_fic *fic;
+ int ret;
+ u32 control = CONTROL_MASK_MSI_X;
+
+ fic = kzalloc(sizeof(*fic), GFP_KERNEL);
+ if (!fic)
+ return ERR_PTR(-ENOMEM);
+
+ fic->base = base;
+ fic->parent_irq = parent_irq;
+ fic->name = name;
+
+ /* mask out all interrupts */
+ writel_relaxed(0xFFFFFFFF, fic->base + AL_FIC_MASK);
+
+ /* clear any pending interrupt */
+ writel_relaxed(0, fic->base + AL_FIC_CAUSE);
+
+ writel_relaxed(control, fic->base + AL_FIC_CONTROL);
+
+ ret = al_fic_register(node, fic);
+ if (ret) {
+ pr_err("fail to register irqchip\n");
+ goto err_free;
+ }
+
+ pr_debug("%s initialized successfully in Legacy mode (parent-irq=%u)\n",
+ fic->name, parent_irq);
+
+ return fic;
+
+err_free:
+ kfree(fic);
+ return ERR_PTR(ret);
+}
+
+static int __init al_fic_init_dt(struct device_node *node,
+ struct device_node *parent)
+{
+ int ret;
+ void __iomem *base;
+ unsigned int parent_irq;
+ struct al_fic *fic;
+
+ if (!parent) {
+ pr_err("%s: unsupported - device require a parent\n",
+ node->name);
+ return -EINVAL;
+ }
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s: fail to map memory\n", node->name);
+ return -ENOMEM;
+ }
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ pr_err("%s: fail to map irq\n", node->name);
+ ret = -EINVAL;
+ goto err_unmap;
+ }
+
+ fic = al_fic_wire_init(node,
+ base,
+ node->name,
+ parent_irq);
+ if (IS_ERR(fic)) {
+ pr_err("%s: fail to initialize irqchip (%lu)\n",
+ node->name,
+ PTR_ERR(fic));
+ ret = PTR_ERR(fic);
+ goto err_irq_dispose;
+ }
+
+ return 0;
+
+err_irq_dispose:
+ irq_dispose_mapping(parent_irq);
+err_unmap:
+ iounmap(base);
+
+ return ret;
+}
+
+IRQCHIP_DECLARE(al_fic, "amazon,al-fic", al_fic_init_dt);
diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c
index c67c961ab6cc..a1534edef7fa 100644
--- a/drivers/irqchip/irq-csky-mpintc.c
+++ b/drivers/irqchip/irq-csky-mpintc.c
@@ -32,8 +32,8 @@ static void __iomem *INTCL_base;
#define INTCG_CIDSTR 0x1000
#define INTCL_PICTLR 0x0
+#define INTCL_CFGR 0x14
#define INTCL_SIGR 0x60
-#define INTCL_HPPIR 0x68
#define INTCL_RDYIR 0x6c
#define INTCL_SENR 0xa0
#define INTCL_CENR 0xa4
@@ -41,21 +41,49 @@ static void __iomem *INTCL_base;
static DEFINE_PER_CPU(void __iomem *, intcl_reg);
+static unsigned long *__trigger;
+
+#define IRQ_OFFSET(irq) ((irq < COMM_IRQ_BASE) ? irq : (irq - COMM_IRQ_BASE))
+
+#define TRIG_BYTE_OFFSET(i) ((((i) * 2) / 32) * 4)
+#define TRIG_BIT_OFFSET(i) (((i) * 2) % 32)
+
+#define TRIG_VAL(trigger, irq) (trigger << TRIG_BIT_OFFSET(IRQ_OFFSET(irq)))
+#define TRIG_VAL_MSK(irq) (~(3 << TRIG_BIT_OFFSET(IRQ_OFFSET(irq))))
+
+#define TRIG_BASE(irq) \
+ (TRIG_BYTE_OFFSET(IRQ_OFFSET(irq)) + ((irq < COMM_IRQ_BASE) ? \
+ (this_cpu_read(intcl_reg) + INTCL_CFGR) : (INTCG_base + INTCG_CICFGR)))
+
+static DEFINE_SPINLOCK(setup_lock);
+static void setup_trigger(unsigned long irq, unsigned long trigger)
+{
+ unsigned int tmp;
+
+ spin_lock(&setup_lock);
+
+ /* setup trigger */
+ tmp = readl_relaxed(TRIG_BASE(irq)) & TRIG_VAL_MSK(irq);
+
+ writel_relaxed(tmp | TRIG_VAL(trigger, irq), TRIG_BASE(irq));
+
+ spin_unlock(&setup_lock);
+}
+
static void csky_mpintc_handler(struct pt_regs *regs)
{
void __iomem *reg_base = this_cpu_read(intcl_reg);
- do {
- handle_domain_irq(root_domain,
- readl_relaxed(reg_base + INTCL_RDYIR),
- regs);
- } while (readl_relaxed(reg_base + INTCL_HPPIR) & BIT(31));
+ handle_domain_irq(root_domain,
+ readl_relaxed(reg_base + INTCL_RDYIR), regs);
}
static void csky_mpintc_enable(struct irq_data *d)
{
void __iomem *reg_base = this_cpu_read(intcl_reg);
+ setup_trigger(d->hwirq, __trigger[d->hwirq]);
+
writel_relaxed(d->hwirq, reg_base + INTCL_SENR);
}
@@ -73,6 +101,28 @@ static void csky_mpintc_eoi(struct irq_data *d)
writel_relaxed(d->hwirq, reg_base + INTCL_CACR);
}
+static int csky_mpintc_set_type(struct irq_data *d, unsigned int type)
+{
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ __trigger[d->hwirq] = 0;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ __trigger[d->hwirq] = 1;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ __trigger[d->hwirq] = 2;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ __trigger[d->hwirq] = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_SMP
static int csky_irq_set_affinity(struct irq_data *d,
const struct cpumask *mask_val,
@@ -89,8 +139,19 @@ static int csky_irq_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;
- /* Enable interrupt destination */
- cpu |= BIT(31);
+ /*
+ * The csky,mpintc could support auto irq deliver, but it only
+ * could deliver external irq to one cpu or all cpus. So it
+ * doesn't support deliver external irq to a group of cpus
+ * with cpu_mask.
+ * SO we only use auto deliver mode when affinity mask_val is
+ * equal to cpu_present_mask.
+ *
+ */
+ if (cpumask_equal(mask_val, cpu_present_mask))
+ cpu = 0;
+ else
+ cpu |= BIT(31);
writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset);
@@ -105,6 +166,7 @@ static struct irq_chip csky_irq_chip = {
.irq_eoi = csky_mpintc_eoi,
.irq_enable = csky_mpintc_enable,
.irq_disable = csky_mpintc_disable,
+ .irq_set_type = csky_mpintc_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = csky_irq_set_affinity,
#endif
@@ -125,9 +187,26 @@ static int csky_irqdomain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
+static int csky_irq_domain_xlate_cells(struct irq_domain *d,
+ struct device_node *ctrlr, const u32 *intspec,
+ unsigned int intsize, unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+ if (intsize > 1)
+ *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+ else
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
static const struct irq_domain_ops csky_irqdomain_ops = {
.map = csky_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
+ .xlate = csky_irq_domain_xlate_cells,
};
#ifdef CONFIG_SMP
@@ -161,6 +240,10 @@ csky_mpintc_init(struct device_node *node, struct device_node *parent)
if (ret < 0)
nr_irq = INTC_IRQS;
+ __trigger = kcalloc(nr_irq, sizeof(unsigned long), GFP_KERNEL);
+ if (__trigger == NULL)
+ return -ENXIO;
+
if (INTCG_base == NULL) {
INTCG_base = ioremap(mfcr("cr<31, 14>"),
INTCL_SIZE*nr_cpu_ids + INTCG_SIZE);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 875ac80f690b..7338f90b2f9e 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -53,6 +53,7 @@
/* List of flags for specific v2m implementation */
#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
+#define GICV2M_GRAVITON_ADDRESS_ONLY 0x00000002
static LIST_HEAD(v2m_nodes);
static DEFINE_SPINLOCK(v2m_lock);
@@ -95,15 +96,26 @@ static struct msi_domain_info gicv2m_msi_domain_info = {
.chip = &gicv2m_msi_irq_chip,
};
+static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq)
+{
+ if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
+ return v2m->res.start | ((hwirq - 32) << 3);
+ else
+ return v2m->res.start + V2M_MSI_SETSPI_NS;
+}
+
static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
- phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
+ phys_addr_t addr = gicv2m_get_msi_addr(v2m, data->hwirq);
msg->address_hi = upper_32_bits(addr);
msg->address_lo = lower_32_bits(addr);
- msg->data = data->hwirq;
+ if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
+ msg->data = 0;
+ else
+ msg->data = data->hwirq;
if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
msg->data -= v2m->spi_offset;
@@ -185,7 +197,7 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
hwirq = v2m->spi_start + offset;
err = iommu_dma_prepare_msi(info->desc,
- v2m->res.start + V2M_MSI_SETSPI_NS);
+ gicv2m_get_msi_addr(v2m, hwirq));
if (err)
return err;
@@ -304,7 +316,7 @@ static int gicv2m_allocate_domains(struct irq_domain *parent)
static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
u32 spi_start, u32 nr_spis,
- struct resource *res)
+ struct resource *res, u32 flags)
{
int ret;
struct v2m_data *v2m;
@@ -317,6 +329,7 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
INIT_LIST_HEAD(&v2m->entry);
v2m->fwnode = fwnode;
+ v2m->flags = flags;
memcpy(&v2m->res, res, sizeof(struct resource));
@@ -331,7 +344,14 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
v2m->spi_start = spi_start;
v2m->nr_spis = nr_spis;
} else {
- u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
+ u32 typer;
+
+ /* Graviton should always have explicit spi_start/nr_spis */
+ if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) {
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+ typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
@@ -352,18 +372,21 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
*
* Broadom NS2 GICv2m implementation has an erratum where the MSI data
* is 'spi_number - 32'
+ *
+ * Reading that register fails on the Graviton implementation
*/
- switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
- case XGENE_GICV2M_MSI_IIDR:
- v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
- v2m->spi_offset = v2m->spi_start;
- break;
- case BCM_NS2_GICV2M_MSI_IIDR:
- v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
- v2m->spi_offset = 32;
- break;
+ if (!(v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)) {
+ switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
+ case XGENE_GICV2M_MSI_IIDR:
+ v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+ v2m->spi_offset = v2m->spi_start;
+ break;
+ case BCM_NS2_GICV2M_MSI_IIDR:
+ v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+ v2m->spi_offset = 32;
+ break;
+ }
}
-
v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
GFP_KERNEL);
if (!v2m->bm) {
@@ -416,7 +439,8 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
spi_start, nr_spis);
- ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res);
+ ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
+ &res, 0);
if (ret) {
of_node_put(child);
break;
@@ -448,6 +472,25 @@ static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
return data->fwnode;
}
+static bool acpi_check_amazon_graviton_quirks(void)
+{
+ static struct acpi_table_madt *madt;
+ acpi_status status;
+ bool rc = false;
+
+#define ACPI_AMZN_OEM_ID "AMAZON"
+
+ status = acpi_get_table(ACPI_SIG_MADT, 0,
+ (struct acpi_table_header **)&madt);
+
+ if (ACPI_FAILURE(status) || !madt)
+ return rc;
+ rc = !memcmp(madt->header.oem_id, ACPI_AMZN_OEM_ID, ACPI_OEM_ID_SIZE);
+ acpi_put_table((struct acpi_table_header *)madt);
+
+ return rc;
+}
+
static int __init
acpi_parse_madt_msi(union acpi_subtable_headers *header,
const unsigned long end)
@@ -457,6 +500,7 @@ acpi_parse_madt_msi(union acpi_subtable_headers *header,
u32 spi_start = 0, nr_spis = 0;
struct acpi_madt_generic_msi_frame *m;
struct fwnode_handle *fwnode;
+ u32 flags = 0;
m = (struct acpi_madt_generic_msi_frame *)header;
if (BAD_MADT_ENTRY(m, end))
@@ -466,6 +510,13 @@ acpi_parse_madt_msi(union acpi_subtable_headers *header,
res.end = m->base_address + SZ_4K - 1;
res.flags = IORESOURCE_MEM;
+ if (acpi_check_amazon_graviton_quirks()) {
+ pr_info("applying Amazon Graviton quirk\n");
+ res.end = res.start + SZ_8K - 1;
+ flags |= GICV2M_GRAVITON_ADDRESS_ONLY;
+ gicv2m_msi_domain_info.flags &= ~MSI_FLAG_MULTI_PCI_MSI;
+ }
+
if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
spi_start = m->spi_base;
nr_spis = m->spi_count;
@@ -480,7 +531,7 @@ acpi_parse_madt_msi(union acpi_subtable_headers *header,
return -EINVAL;
}
- ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res);
+ ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res, flags);
if (ret)
irq_domain_free_fwnode(fwnode);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d29b44b677e4..35500801dc2b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -733,32 +733,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
}
static int its_wait_for_range_completion(struct its_node *its,
- struct its_cmd_block *from,
+ u64 prev_idx,
struct its_cmd_block *to)
{
- u64 rd_idx, from_idx, to_idx;
+ u64 rd_idx, to_idx, linear_idx;
u32 count = 1000000; /* 1s! */
- from_idx = its_cmd_ptr_to_offset(its, from);
+ /* Linearize to_idx if the command set has wrapped around */
to_idx = its_cmd_ptr_to_offset(its, to);
+ if (to_idx < prev_idx)
+ to_idx += ITS_CMD_QUEUE_SZ;
+
+ linear_idx = prev_idx;
while (1) {
+ s64 delta;
+
rd_idx = readl_relaxed(its->base + GITS_CREADR);
- /* Direct case */
- if (from_idx < to_idx && rd_idx >= to_idx)
- break;
+ /*
+ * Compute the read pointer progress, taking the
+ * potential wrap-around into account.
+ */
+ delta = rd_idx - prev_idx;
+ if (rd_idx < prev_idx)
+ delta += ITS_CMD_QUEUE_SZ;
- /* Wrapped case */
- if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
+ linear_idx += delta;
+ if (linear_idx >= to_idx)
break;
count--;
if (!count) {
- pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
- from_idx, to_idx, rd_idx);
+ pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
+ to_idx, linear_idx);
return -1;
}
+ prev_idx = rd_idx;
cpu_relax();
udelay(1);
}
@@ -775,6 +786,7 @@ void name(struct its_node *its, \
struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
synctype *sync_obj; \
unsigned long flags; \
+ u64 rd_idx; \
\
raw_spin_lock_irqsave(&its->lock, flags); \
\
@@ -796,10 +808,11 @@ void name(struct its_node *its, \
} \
\
post: \
+ rd_idx = readl_relaxed(its->base + GITS_CREADR); \
next_cmd = its_post_commands(its); \
raw_spin_unlock_irqrestore(&its->lock, flags); \
\
- if (its_wait_for_range_completion(its, cmd, next_cmd)) \
+ if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 6377cb864f4c..9bca4896fa6f 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -461,8 +461,12 @@ static void gic_deactivate_unhandled(u32 irqnr)
static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
{
+ bool irqs_enabled = interrupts_enabled(regs);
int err;
+ if (irqs_enabled)
+ nmi_enter();
+
if (static_branch_likely(&supports_deactivate_key))
gic_write_eoir(irqnr);
/*
@@ -474,6 +478,9 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
err = handle_domain_nmi(gic_data.domain, irqnr, regs);
if (err)
gic_deactivate_unhandled(irqnr);
+
+ if (irqs_enabled)
+ nmi_exit();
}
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
@@ -1332,6 +1339,9 @@ static int __init gic_init_bases(void __iomem *dist_base,
if (gic_dist_supports_lpis()) {
its_init(handle, &gic_data.rdists, gic_data.domain);
its_cpu_init();
+ } else {
+ if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
+ gicv2m_init(handle, gic_data.domain);
}
if (gic_prio_masking_enabled()) {
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index a89c693d5b90..3dd28382d5f5 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -344,8 +344,7 @@ static int mbigen_device_probe(struct platform_device *pdev)
err = -EINVAL;
if (err) {
- dev_err(&pdev->dev, "Failed to create mbi-gen@%p irqdomain",
- mgn_chip->base);
+ dev_err(&pdev->dev, "Failed to create mbi-gen irqdomain\n");
return err;
}
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 8eb92eb98f54..dcdc23b9dce6 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -60,6 +60,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
{ .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
{ .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
{ .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
+ { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
{ }
};
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index d32268cc1174..f3985469c221 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -388,7 +388,7 @@ static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
cd = irq_data_get_irq_chip_data(d);
- write_gic_vl_map(intr, cd->map);
+ write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
if (cd->mask)
write_gic_vl_smask(BIT(intr));
}
@@ -517,7 +517,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
- write_gic_vo_map(intr, map);
+ write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
}
spin_unlock_irqrestore(&gic_lock, flags);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 04c05a18600c..f82bc60a6793 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -508,7 +508,8 @@ static int intc_irqpin_probe(struct platform_device *pdev)
}
irq_chip = &p->irq_chip;
- irq_chip->name = name;
+ irq_chip->name = "intc-irqpin";
+ irq_chip->parent_device = dev;
irq_chip->irq_mask = disable_fn;
irq_chip->irq_unmask = enable_fn;
irq_chip->irq_set_type = intc_irqpin_irq_set_type;
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index a449a7c839b3..11abc09ef76c 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -7,7 +7,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/io.h>
@@ -48,7 +47,7 @@ struct irqc_priv {
void __iomem *cpu_int_base;
struct irqc_irq irq[IRQC_IRQ_MAX];
unsigned int number_of_irqs;
- struct platform_device *pdev;
+ struct device *dev;
struct irq_chip_generic *gc;
struct irq_domain *irq_domain;
atomic_t wakeup_path;
@@ -61,8 +60,7 @@ static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
static void irqc_dbg(struct irqc_irq *i, char *str)
{
- dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
- str, i->requested_irq, i->hw_irq);
+ dev_dbg(i->p->dev, "%s (%d:%d)\n", str, i->requested_irq, i->hw_irq);
}
static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
@@ -125,33 +123,22 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
static int irqc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ const char *name = dev_name(dev);
struct irqc_priv *p;
- struct resource *io;
struct resource *irq;
- const char *name = dev_name(&pdev->dev);
int ret;
int k;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p) {
- dev_err(&pdev->dev, "failed to allocate driver data\n");
- ret = -ENOMEM;
- goto err0;
- }
+ p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
- p->pdev = pdev;
+ p->dev = dev;
platform_set_drvdata(pdev, p);
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
- /* get hold of manadatory IOMEM */
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!io) {
- dev_err(&pdev->dev, "not enough IOMEM resources\n");
- ret = -EINVAL;
- goto err1;
- }
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
/* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
for (k = 0; k < IRQC_IRQ_MAX; k++) {
@@ -166,42 +153,41 @@ static int irqc_probe(struct platform_device *pdev)
p->number_of_irqs = k;
if (p->number_of_irqs < 1) {
- dev_err(&pdev->dev, "not enough IRQ resources\n");
+ dev_err(dev, "not enough IRQ resources\n");
ret = -EINVAL;
- goto err1;
+ goto err_runtime_pm_disable;
}
/* ioremap IOMEM and setup read/write callbacks */
- p->iomem = ioremap_nocache(io->start, resource_size(io));
- if (!p->iomem) {
- dev_err(&pdev->dev, "failed to remap IOMEM\n");
- ret = -ENXIO;
- goto err2;
+ p->iomem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(p->iomem)) {
+ ret = PTR_ERR(p->iomem);
+ goto err_runtime_pm_disable;
}
p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
- p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
- p->number_of_irqs,
+ p->irq_domain = irq_domain_add_linear(dev->of_node, p->number_of_irqs,
&irq_generic_chip_ops, p);
if (!p->irq_domain) {
ret = -ENXIO;
- dev_err(&pdev->dev, "cannot initialize irq domain\n");
- goto err2;
+ dev_err(dev, "cannot initialize irq domain\n");
+ goto err_runtime_pm_disable;
}
ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
- 1, name, handle_level_irq,
+ 1, "irqc", handle_level_irq,
0, 0, IRQ_GC_INIT_NESTED_LOCK);
if (ret) {
- dev_err(&pdev->dev, "cannot allocate generic chip\n");
- goto err3;
+ dev_err(dev, "cannot allocate generic chip\n");
+ goto err_remove_domain;
}
p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
p->gc->reg_base = p->cpu_int_base;
p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
+ p->gc->chip_types[0].chip.parent_device = dev;
p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
@@ -210,46 +196,33 @@ static int irqc_probe(struct platform_device *pdev)
/* request interrupts one by one */
for (k = 0; k < p->number_of_irqs; k++) {
- if (request_irq(p->irq[k].requested_irq, irqc_irq_handler,
- 0, name, &p->irq[k])) {
- dev_err(&pdev->dev, "failed to request IRQ\n");
+ if (devm_request_irq(dev, p->irq[k].requested_irq,
+ irqc_irq_handler, 0, name, &p->irq[k])) {
+ dev_err(dev, "failed to request IRQ\n");
ret = -ENOENT;
- goto err4;
+ goto err_remove_domain;
}
}
- dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
+ dev_info(dev, "driving %d irqs\n", p->number_of_irqs);
return 0;
-err4:
- while (--k >= 0)
- free_irq(p->irq[k].requested_irq, &p->irq[k]);
-err3:
+err_remove_domain:
irq_domain_remove(p->irq_domain);
-err2:
- iounmap(p->iomem);
-err1:
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- kfree(p);
-err0:
+err_runtime_pm_disable:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
return ret;
}
static int irqc_remove(struct platform_device *pdev)
{
struct irqc_priv *p = platform_get_drvdata(pdev);
- int k;
-
- for (k = 0; k < p->number_of_irqs; k++)
- free_irq(p->irq[k].requested_irq, &p->irq[k]);
irq_domain_remove(p->irq_domain);
- iounmap(p->iomem);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(p);
return 0;
}
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
new file mode 100644
index 000000000000..b1f19b210190
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/A1 IRQC Driver
+ *
+ * Copyright (C) 2019 Glider bvba
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define IRQC_NUM_IRQ 8
+
+#define ICR0 0 /* Interrupt Control Register 0 */
+
+#define ICR0_NMIL BIT(15) /* NMI Input Level (0=low, 1=high) */
+#define ICR0_NMIE BIT(8) /* Edge Select (0=falling, 1=rising) */
+#define ICR0_NMIF BIT(1) /* NMI Interrupt Request */
+
+#define ICR1 2 /* Interrupt Control Register 1 */
+
+#define ICR1_IRQS(n, sense) ((sense) << ((n) * 2)) /* IRQ Sense Select */
+#define ICR1_IRQS_LEVEL_LOW 0
+#define ICR1_IRQS_EDGE_FALLING 1
+#define ICR1_IRQS_EDGE_RISING 2
+#define ICR1_IRQS_EDGE_BOTH 3
+#define ICR1_IRQS_MASK(n) ICR1_IRQS((n), 3)
+
+#define IRQRR 4 /* IRQ Interrupt Request Register */
+
+
+struct rza1_irqc_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_chip chip;
+ struct irq_domain *irq_domain;
+ struct of_phandle_args map[IRQC_NUM_IRQ];
+};
+
+static struct rza1_irqc_priv *irq_data_to_priv(struct irq_data *data)
+{
+ return data->domain->host_data;
+}
+
+static void rza1_irqc_eoi(struct irq_data *d)
+{
+ struct rza1_irqc_priv *priv = irq_data_to_priv(d);
+ u16 bit = BIT(irqd_to_hwirq(d));
+ u16 tmp;
+
+ tmp = readw_relaxed(priv->base + IRQRR);
+ if (tmp & bit)
+ writew_relaxed(GENMASK(IRQC_NUM_IRQ - 1, 0) & ~bit,
+ priv->base + IRQRR);
+
+ irq_chip_eoi_parent(d);
+}
+
+static int rza1_irqc_set_type(struct irq_data *d, unsigned int type)
+{
+ struct rza1_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hw_irq = irqd_to_hwirq(d);
+ u16 sense, tmp;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_LEVEL_LOW:
+ sense = ICR1_IRQS_LEVEL_LOW;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ sense = ICR1_IRQS_EDGE_FALLING;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ sense = ICR1_IRQS_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ sense = ICR1_IRQS_EDGE_BOTH;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tmp = readw_relaxed(priv->base + ICR1);
+ tmp &= ~ICR1_IRQS_MASK(hw_irq);
+ tmp |= ICR1_IRQS(hw_irq, sense);
+ writew_relaxed(tmp, priv->base + ICR1);
+ return 0;
+}
+
+static int rza1_irqc_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct rza1_irqc_priv *priv = domain->host_data;
+ struct irq_fwspec *fwspec = arg;
+ unsigned int hwirq = fwspec->param[0];
+ struct irq_fwspec spec;
+ unsigned int i;
+ int ret;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &priv->chip,
+ priv);
+ if (ret)
+ return ret;
+
+ spec.fwnode = &priv->dev->of_node->fwnode;
+ spec.param_count = priv->map[hwirq].args_count;
+ for (i = 0; i < spec.param_count; i++)
+ spec.param[i] = priv->map[hwirq].args[i];
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &spec);
+}
+
+static int rza1_irqc_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec, unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (fwspec->param_count != 2 || fwspec->param[0] >= IRQC_NUM_IRQ)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+}
+
+static const struct irq_domain_ops rza1_irqc_domain_ops = {
+ .alloc = rza1_irqc_alloc,
+ .translate = rza1_irqc_translate,
+};
+
+static int rza1_irqc_parse_map(struct rza1_irqc_priv *priv,
+ struct device_node *gic_node)
+{
+ unsigned int imaplen, i, j, ret;
+ struct device *dev = priv->dev;
+ struct device_node *ipar;
+ const __be32 *imap;
+ u32 intsize;
+
+ imap = of_get_property(dev->of_node, "interrupt-map", &imaplen);
+ if (!imap)
+ return -EINVAL;
+
+ for (i = 0; i < IRQC_NUM_IRQ; i++) {
+ if (imaplen < 3)
+ return -EINVAL;
+
+ /* Check interrupt number, ignore sense */
+ if (be32_to_cpup(imap) != i)
+ return -EINVAL;
+
+ ipar = of_find_node_by_phandle(be32_to_cpup(imap + 2));
+ if (ipar != gic_node) {
+ of_node_put(ipar);
+ return -EINVAL;
+ }
+
+ imap += 3;
+ imaplen -= 3;
+
+ ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize);
+ of_node_put(ipar);
+ if (ret)
+ return ret;
+
+ if (imaplen < intsize)
+ return -EINVAL;
+
+ priv->map[i].args_count = intsize;
+ for (j = 0; j < intsize; j++)
+ priv->map[i].args[j] = be32_to_cpup(imap++);
+
+ imaplen -= intsize;
+ }
+
+ return 0;
+}
+
+static int rza1_irqc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct irq_domain *parent = NULL;
+ struct device_node *gic_node;
+ struct rza1_irqc_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+ priv->dev = dev;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ gic_node = of_irq_find_parent(np);
+ if (gic_node) {
+ parent = irq_find_host(gic_node);
+ of_node_put(gic_node);
+ }
+
+ if (!parent) {
+ dev_err(dev, "cannot find parent domain\n");
+ return -ENODEV;
+ }
+
+ ret = rza1_irqc_parse_map(priv, gic_node);
+ if (ret) {
+ dev_err(dev, "cannot parse %s: %d\n", "interrupt-map", ret);
+ return ret;
+ }
+
+ priv->chip.name = "rza1-irqc",
+ priv->chip.irq_mask = irq_chip_mask_parent,
+ priv->chip.irq_unmask = irq_chip_unmask_parent,
+ priv->chip.irq_eoi = rza1_irqc_eoi,
+ priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy,
+ priv->chip.irq_set_type = rza1_irqc_set_type,
+ priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
+
+ priv->irq_domain = irq_domain_add_hierarchy(parent, 0, IRQC_NUM_IRQ,
+ np, &rza1_irqc_domain_ops,
+ priv);
+ if (!priv->irq_domain) {
+ dev_err(dev, "cannot initialize irq domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int rza1_irqc_remove(struct platform_device *pdev)
+{
+ struct rza1_irqc_priv *priv = platform_get_drvdata(pdev);
+
+ irq_domain_remove(priv->irq_domain);
+ return 0;
+}
+
+static const struct of_device_id rza1_irqc_dt_ids[] = {
+ { .compatible = "renesas,rza1-irqc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rza1_irqc_dt_ids);
+
+static struct platform_driver rza1_irqc_device_driver = {
+ .probe = rza1_irqc_probe,
+ .remove = rza1_irqc_remove,
+ .driver = {
+ .name = "renesas_rza1_irqc",
+ .of_match_table = rza1_irqc_dt_ids,
+ }
+};
+
+static int __init rza1_irqc_init(void)
+{
+ return platform_driver_register(&rza1_irqc_device_driver);
+}
+postcore_initcall(rza1_irqc_init);
+
+static void __exit rza1_irqc_exit(void)
+{
+ platform_driver_unregister(&rza1_irqc_device_driver);
+}
+module_exit(rza1_irqc_exit);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("Renesas RZ/A1 IRQC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
index 4e983bc6cf93..1d027623c776 100644
--- a/drivers/irqchip/irq-sni-exiu.c
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -2,7 +2,7 @@
/*
* Driver for Socionext External Interrupt Unit (EXIU)
*
- * Copyright (c) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (c) 2017-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
* Based on irq-tegra.c:
* Copyright (C) 2011 Google, Inc.
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -131,9 +132,13 @@ static int exiu_domain_translate(struct irq_domain *domain,
*hwirq = fwspec->param[1] - info->spi_base;
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
- return 0;
+ } else {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
}
- return -EINVAL;
+ return 0;
}
static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq,
@@ -144,16 +149,21 @@ static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq,
struct exiu_irq_data *info = dom->host_data;
irq_hw_number_t hwirq;
- if (fwspec->param_count != 3)
- return -EINVAL; /* Not GIC compliant */
- if (fwspec->param[0] != GIC_SPI)
- return -EINVAL; /* No PPI should point to this domain */
+ parent_fwspec = *fwspec;
+ if (is_of_node(dom->parent->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL; /* No PPI should point to this domain */
+ hwirq = fwspec->param[1] - info->spi_base;
+ } else {
+ hwirq = fwspec->param[0];
+ parent_fwspec.param[0] = hwirq + info->spi_base + 32;
+ }
WARN_ON(nr_irqs != 1);
- hwirq = fwspec->param[1] - info->spi_base;
irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &exiu_irq_chip, info);
- parent_fwspec = *fwspec;
parent_fwspec.fwnode = dom->parent->fwnode;
return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec);
}
@@ -164,35 +174,23 @@ static const struct irq_domain_ops exiu_domain_ops = {
.free = irq_domain_free_irqs_common,
};
-static int __init exiu_init(struct device_node *node,
- struct device_node *parent)
+static struct exiu_irq_data *exiu_init(const struct fwnode_handle *fwnode,
+ struct resource *res)
{
- struct irq_domain *parent_domain, *domain;
struct exiu_irq_data *data;
int err;
- if (!parent) {
- pr_err("%pOF: no parent, giving up\n", node);
- return -ENODEV;
- }
-
- parent_domain = irq_find_host(parent);
- if (!parent_domain) {
- pr_err("%pOF: unable to obtain parent domain\n", node);
- return -ENXIO;
- }
-
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- if (of_property_read_u32(node, "socionext,spi-base", &data->spi_base)) {
- pr_err("%pOF: failed to parse 'spi-base' property\n", node);
+ if (fwnode_property_read_u32_array(fwnode, "socionext,spi-base",
+ &data->spi_base, 1)) {
err = -ENODEV;
goto out_free;
}
- data->base = of_iomap(node, 0);
+ data->base = ioremap(res->start, resource_size(res));
if (!data->base) {
err = -ENODEV;
goto out_free;
@@ -202,11 +200,44 @@ static int __init exiu_init(struct device_node *node,
writel_relaxed(0xFFFFFFFF, data->base + EIREQCLR);
writel_relaxed(0xFFFFFFFF, data->base + EIMASK);
+ return data;
+
+out_free:
+ kfree(data);
+ return ERR_PTR(err);
+}
+
+static int __init exiu_dt_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ struct exiu_irq_data *data;
+ struct resource res;
+
+ if (!parent) {
+ pr_err("%pOF: no parent, giving up\n", node);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: unable to obtain parent domain\n", node);
+ return -ENXIO;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ pr_err("%pOF: failed to parse memory resource\n", node);
+ return -ENXIO;
+ }
+
+ data = exiu_init(of_node_to_fwnode(node), &res);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node,
&exiu_domain_ops, data);
if (!domain) {
pr_err("%pOF: failed to allocate domain\n", node);
- err = -ENOMEM;
goto out_unmap;
}
@@ -217,8 +248,57 @@ static int __init exiu_init(struct device_node *node,
out_unmap:
iounmap(data->base);
-out_free:
kfree(data);
- return err;
+ return -ENOMEM;
}
-IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_init);
+IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_dt_init);
+
+#ifdef CONFIG_ACPI
+static int exiu_acpi_probe(struct platform_device *pdev)
+{
+ struct irq_domain *domain;
+ struct exiu_irq_data *data;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to parse memory resource\n");
+ return -ENXIO;
+ }
+
+ data = exiu_init(dev_fwnode(&pdev->dev), res);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ domain = acpi_irq_create_hierarchy(0, NUM_IRQS, dev_fwnode(&pdev->dev),
+ &exiu_domain_ops, data);
+ if (!domain) {
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ goto out_unmap;
+ }
+
+ dev_info(&pdev->dev, "%d interrupts forwarded\n", NUM_IRQS);
+
+ return 0;
+
+out_unmap:
+ iounmap(data->base);
+ kfree(data);
+ return -ENOMEM;
+}
+
+static const struct acpi_device_id exiu_acpi_ids[] = {
+ { "SCX0008" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, exiu_acpi_ids);
+
+static struct platform_driver exiu_driver = {
+ .driver = {
+ .name = "exiu",
+ .acpi_match_table = exiu_acpi_ids,
+ },
+ .probe = exiu_acpi_probe,
+};
+builtin_platform_driver(exiu_driver);
+#endif
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 011b60a49e3f..ef4d625d2d80 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -159,9 +159,9 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
parent_fwspec.param[1] = vint_desc->vint_id;
parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
- if (parent_virq <= 0) {
+ if (parent_virq == 0) {
kfree(vint_desc);
- return ERR_PTR(parent_virq);
+ return ERR_PTR(-EINVAL);
}
vint_desc->parent_virq = parent_virq;
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index 067337ab3f20..d88e993aa66d 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -229,7 +229,6 @@ static int get_registers(struct platform_device *pdev, struct combiner *comb)
static int __init combiner_probe(struct platform_device *pdev)
{
struct combiner *combiner;
- size_t alloc_sz;
int nregs;
int err;
@@ -239,8 +238,8 @@ static int __init combiner_probe(struct platform_device *pdev)
return -EINVAL;
}
- alloc_sz = sizeof(*combiner) + sizeof(struct combiner_reg) * nregs;
- combiner = devm_kzalloc(&pdev->dev, alloc_sz, GFP_KERNEL);
+ combiner = devm_kzalloc(&pdev->dev, struct_size(combiner, regs, nregs),
+ GFP_KERNEL);
if (!combiner)
return -ENOMEM;
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
index 4c8b0c3cf284..6a72b7e13719 100644
--- a/drivers/leds/trigger/ledtrig-activity.c
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -70,7 +70,7 @@ static void led_activity_function(struct timer_list *t)
* down to 16us, ensuring we won't overflow 32-bit computations below
* even up to 3k CPUs, while keeping divides cheap on smaller systems.
*/
- curr_boot = ktime_get_boot_ns() * cpus;
+ curr_boot = ktime_get_boottime_ns() * cpus;
diff_boot = (curr_boot - activity_data->last_boot) >> 16;
diff_used = (curr_used - activity_data->last_used) >> 16;
activity_data->last_boot = curr_boot;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 1b16d34bb785..0fd3ca9bfe54 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2035,7 +2035,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
return -ENOMEM;
key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
- key_desc + 1, NULL);
+ key_desc + 1, NULL, NULL);
if (IS_ERR(key)) {
kzfree(new_key_string);
return PTR_ERR(key);
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index 352e803f566e..728733a514c7 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -140,8 +140,8 @@ static char __init *dm_parse_table_entry(struct dm_device *dev, char *str)
return ERR_PTR(-EINVAL);
}
/* target_args */
- dev->target_args_array[n] = kstrndup(field[3], GFP_KERNEL,
- DM_MAX_STR_SIZE);
+ dev->target_args_array[n] = kstrndup(field[3], DM_MAX_STR_SIZE,
+ GFP_KERNEL);
if (!dev->target_args_array[n])
return ERR_PTR(-ENOMEM);
@@ -272,10 +272,10 @@ static int __init dm_init_init(void)
return 0;
if (strlen(create) >= DM_MAX_STR_SIZE) {
- DMERR("Argument is too big. Limit is %d\n", DM_MAX_STR_SIZE);
+ DMERR("Argument is too big. Limit is %d", DM_MAX_STR_SIZE);
return -EINVAL;
}
- str = kstrndup(create, GFP_KERNEL, DM_MAX_STR_SIZE);
+ str = kstrndup(create, DM_MAX_STR_SIZE, GFP_KERNEL);
if (!str)
return -ENOMEM;
@@ -283,7 +283,7 @@ static int __init dm_init_init(void)
if (r)
goto out;
- DMINFO("waiting for all devices to be available before creating mapped devices\n");
+ DMINFO("waiting for all devices to be available before creating mapped devices");
wait_for_device_probe();
list_for_each_entry(dev, &devices, list) {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 9ea2b0291f20..e549392e0ea5 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -60,6 +60,7 @@
#define WRITE_LOG_VERSION 1ULL
#define WRITE_LOG_MAGIC 0x6a736677736872ULL
+#define WRITE_LOG_SUPER_SECTOR 0
/*
* The disk format for this is braindead simple.
@@ -115,6 +116,7 @@ struct log_writes_c {
struct list_head logging_blocks;
wait_queue_head_t wait;
struct task_struct *log_kthread;
+ struct completion super_done;
};
struct pending_block {
@@ -180,6 +182,14 @@ static void log_end_io(struct bio *bio)
bio_put(bio);
}
+static void log_end_super(struct bio *bio)
+{
+ struct log_writes_c *lc = bio->bi_private;
+
+ complete(&lc->super_done);
+ log_end_io(bio);
+}
+
/*
* Meant to be called if there is an error, it will free all the pages
* associated with the block.
@@ -215,7 +225,8 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, lc->logdev->bdev);
- bio->bi_end_io = log_end_io;
+ bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
+ log_end_super : log_end_io;
bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -418,11 +429,18 @@ static int log_super(struct log_writes_c *lc)
super.nr_entries = cpu_to_le64(lc->logged_entries);
super.sectorsize = cpu_to_le32(lc->sectorsize);
- if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) {
+ if (write_metadata(lc, &super, sizeof(super), NULL, 0,
+ WRITE_LOG_SUPER_SECTOR)) {
DMERR("Couldn't write super");
return -1;
}
+ /*
+ * Super sector should be writen in-order, otherwise the
+ * nr_entries could be rewritten incorrectly by an old bio.
+ */
+ wait_for_completion_io(&lc->super_done);
+
return 0;
}
@@ -531,6 +549,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_LIST_HEAD(&lc->unflushed_blocks);
INIT_LIST_HEAD(&lc->logging_blocks);
init_waitqueue_head(&lc->wait);
+ init_completion(&lc->super_done);
atomic_set(&lc->io_blocks, 0);
atomic_set(&lc->pending_blocks, 0);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 350cf0451456..ec8b27e20de3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -561,7 +561,7 @@ static char **realloc_argv(unsigned *size, char **old_argv)
gfp = GFP_NOIO;
}
argv = kmalloc_array(new_size, sizeof(*argv), gfp);
- if (argv) {
+ if (argv && old_argv) {
memcpy(argv, old_argv, *size * sizeof(*argv));
*size = new_size;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 720d06531aa3..ea24ff0612e3 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -235,8 +235,8 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
BUG();
}
- DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
- block);
+ DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
+ type_str, block);
if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
DMERR("%s: reached maximum errors", v->data_dev->name);
diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig
index 19a96ce515c1..66b7cffdb0c2 100644
--- a/drivers/mtd/nand/raw/ingenic/Kconfig
+++ b/drivers/mtd/nand/raw/ingenic/Kconfig
@@ -16,7 +16,7 @@ config MTD_NAND_JZ4780
if MTD_NAND_JZ4780
config MTD_NAND_INGENIC_ECC
- tristate
+ bool
config MTD_NAND_JZ4740_ECC
tristate "Hardware BCH support for JZ4740 SoC"
diff --git a/drivers/mtd/nand/raw/ingenic/Makefile b/drivers/mtd/nand/raw/ingenic/Makefile
index 1ac4f455baea..b63d36889263 100644
--- a/drivers/mtd/nand/raw/ingenic/Makefile
+++ b/drivers/mtd/nand/raw/ingenic/Makefile
@@ -2,7 +2,9 @@
obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
obj-$(CONFIG_MTD_NAND_JZ4780) += ingenic_nand.o
-obj-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o
+ingenic_nand-y += ingenic_nand_drv.o
+ingenic_nand-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o
+
obj-$(CONFIG_MTD_NAND_JZ4740_ECC) += jz4740_ecc.o
obj-$(CONFIG_MTD_NAND_JZ4725B_BCH) += jz4725b_bch.o
obj-$(CONFIG_MTD_NAND_JZ4780_BCH) += jz4780_bch.o
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index d3e085c5685a..c954189606f6 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -30,7 +30,6 @@ int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
{
return ecc->ops->calculate(ecc, params, buf, ecc_code);
}
-EXPORT_SYMBOL(ingenic_ecc_calculate);
/**
* ingenic_ecc_correct() - detect and correct bit errors
@@ -51,7 +50,6 @@ int ingenic_ecc_correct(struct ingenic_ecc *ecc,
{
return ecc->ops->correct(ecc, params, buf, ecc_code);
}
-EXPORT_SYMBOL(ingenic_ecc_correct);
/**
* ingenic_ecc_get() - get the ECC controller device
@@ -111,7 +109,6 @@ struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *of_node)
}
return ecc;
}
-EXPORT_SYMBOL(of_ingenic_ecc_get);
/**
* ingenic_ecc_release() - release the ECC controller device
@@ -122,7 +119,6 @@ void ingenic_ecc_release(struct ingenic_ecc *ecc)
clk_disable_unprepare(ecc->clk);
put_device(ecc->dev);
}
-EXPORT_SYMBOL(ingenic_ecc_release);
int ingenic_ecc_probe(struct platform_device *pdev)
{
@@ -159,8 +155,3 @@ int ingenic_ecc_probe(struct platform_device *pdev)
return 0;
}
EXPORT_SYMBOL(ingenic_ecc_probe);
-
-MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
-MODULE_DESCRIPTION("Ingenic ECC common driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index d7b7c0f13909..d7b7c0f13909 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index b021a5720b42..89773293c64d 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -51,6 +51,7 @@
#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
#define NFC_REG_SPARE_AREA 0x00A0
#define NFC_REG_PAT_ID 0x00A4
+#define NFC_REG_MDMA_CNT 0x00C4
#define NFC_RAM0_BASE 0x0400
#define NFC_RAM1_BASE 0x0800
@@ -69,6 +70,7 @@
#define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
#define NFC_SAM BIT(12)
#define NFC_RAM_METHOD BIT(14)
+#define NFC_DMA_TYPE_NORMAL BIT(15)
#define NFC_DEBUG_CTL BIT(31)
/* define bit use in NFC_ST */
@@ -205,14 +207,13 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
* NAND Controller capabilities structure: stores NAND controller capabilities
* for distinction between compatible strings.
*
- * @sram_through_ahb: On A23, we choose to access the internal RAM through AHB
- * instead of MBUS (less configuration). A10, A10s, A13 and
- * A20 use the MBUS but no extra configuration is needed.
+ * @extra_mbus_conf: Contrary to A10, A10s and A13, accessing internal RAM
+ * through MBUS on A23/A33 needs extra configuration.
* @reg_io_data: I/O data register
* @dma_maxburst: DMA maxburst
*/
struct sunxi_nfc_caps {
- bool sram_through_ahb;
+ bool extra_mbus_conf;
unsigned int reg_io_data;
unsigned int dma_maxburst;
};
@@ -368,28 +369,12 @@ static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
goto err_unmap_buf;
}
- /*
- * On A23, we suppose the "internal RAM" (p.12 of the NFC user manual)
- * refers to the NAND controller's internal SRAM. This memory is mapped
- * and so is accessible from the AHB. It seems that it can also be
- * accessed by the MBUS. MBUS accesses are mandatory when using the
- * internal DMA instead of the external DMA engine.
- *
- * During DMA I/O operation, either we access this memory from the AHB
- * by clearing the NFC_RAM_METHOD bit, or we set the bit and use the
- * MBUS. In this case, we should also configure the MBUS DMA length
- * NFC_REG_MDMA_CNT(0xC4) to be chunksize * nchunks. NAND I/O over MBUS
- * are also limited to 32kiB pages.
- */
- if (nfc->caps->sram_through_ahb)
- writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
- nfc->regs + NFC_REG_CTL);
- else
- writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
- nfc->regs + NFC_REG_CTL);
-
+ writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
writel(chunksize, nfc->regs + NFC_REG_CNT);
+ if (nfc->caps->extra_mbus_conf)
+ writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT);
dmat = dmaengine_submit(dmad);
@@ -2151,6 +2136,11 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
dmac_cfg.src_maxburst = nfc->caps->dma_maxburst;
dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst;
dmaengine_slave_config(nfc->dmac, &dmac_cfg);
+
+ if (nfc->caps->extra_mbus_conf)
+ writel(readl(nfc->regs + NFC_REG_CTL) |
+ NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL);
+
} else {
dev_warn(dev, "failed to request rxtx DMA channel\n");
}
@@ -2200,7 +2190,7 @@ static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
};
static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = {
- .sram_through_ahb = true,
+ .extra_mbus_conf = true,
.reg_io_data = NFC_REG_A23_IO_DATA,
.dma_maxburst = 8,
};
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index e5586390026a..e6c646007cda 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -180,7 +180,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F4GQ4xA", 0xF4,
- NAND_MEMORG(1, 2048, 64, 64, 4096, 40, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 6502727049a8..21def3f8fb36 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -100,7 +100,7 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB", 0x12,
- NAND_MEMORG(1, 2048, 64, 64, 1024, 40, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -109,7 +109,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF2GE4AB", 0x22,
- NAND_MEMORG(1, 2048, 64, 64, 2048, 20, 2, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 407f4095a37a..799fc38c5c34 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4320,12 +4320,12 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->features |= NETIF_F_NETNS_LOCAL;
bond_dev->hw_features = BOND_VLAN_FEATURES |
- NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
bond_dev->features |= bond_dev->hw_features;
+ bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
}
/* Destroy a bonding device.
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index f46086fa9064..db91b213eae1 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -436,9 +436,9 @@ int ksz_switch_register(struct ksz_device *dev,
return PTR_ERR(dev->reset_gpio);
if (dev->reset_gpio) {
- gpiod_set_value(dev->reset_gpio, 1);
+ gpiod_set_value_cansleep(dev->reset_gpio, 1);
mdelay(10);
- gpiod_set_value(dev->reset_gpio, 0);
+ gpiod_set_value_cansleep(dev->reset_gpio, 0);
}
mutex_init(&dev->dev_mutex);
@@ -487,7 +487,7 @@ void ksz_switch_remove(struct ksz_device *dev)
dsa_unregister_switch(dev->ds);
if (dev->reset_gpio)
- gpiod_set_value(dev->reset_gpio, 1);
+ gpiod_set_value_cansleep(dev->reset_gpio, 1);
}
EXPORT_SYMBOL(ksz_switch_remove);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 18bc035da850..1fff462a4175 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -843,9 +843,14 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
return err;
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
- if (hweight < AQ_VLAN_MAX_FILTERS)
- err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true);
+ if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) {
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
+ !(aq_nic->packet_filter & IFF_PROMISC));
+ aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
+ } else {
/* otherwise left in promiscue mode */
+ aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
+ }
}
return err;
@@ -866,6 +871,7 @@ int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
return -EOPNOTSUPP;
+ aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
return err;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 0da5e161ec5d..41172fbebddd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -126,6 +126,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
cfg->features = cfg->aq_hw_caps->hw_features;
+ cfg->is_vlan_force_promisc = true;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index eb2e3c7c36f9..0f22f5d5691b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -35,6 +35,7 @@ struct aq_nic_cfg_s {
u32 flow_control;
u32 link_speed_msk;
u32 wol;
+ bool is_vlan_force_promisc;
u16 is_mc_list_enabled;
u16 mc_list_count;
bool is_autoneg;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 1c7593d54035..13ac2661a473 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -778,8 +778,15 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter)
{
unsigned int i = 0U;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+
+ hw_atl_rpfl2promiscuous_mode_en_set(self,
+ IS_FILTER_ENABLED(IFF_PROMISC));
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self,
+ IS_FILTER_ENABLED(IFF_PROMISC) ||
+ cfg->is_vlan_force_promisc);
- hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
hw_atl_rpfl2multicast_flr_en_set(self,
IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
@@ -788,13 +795,13 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
- self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
+ cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
hw_atl_rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled &&
- (i <= self->aq_nic_cfg->mc_list_count)) ?
- 1U : 0U, i);
+ (cfg->is_mc_list_enabled &&
+ (i <= cfg->mc_list_count)) ?
+ 1U : 0U, i);
return aq_hw_err_from_flags(self);
}
@@ -1086,7 +1093,7 @@ static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
{
/* set promisc in case of disabing the vland filter */
- hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
return aq_hw_err_from_flags(self);
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 2375a13bb446..262a28ff81fc 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4180,7 +4180,7 @@ static int macb_probe(struct platform_device *pdev)
if (PTR_ERR(mac) == -EPROBE_DEFER) {
err = -EPROBE_DEFER;
goto err_out_free_netdev;
- } else if (!IS_ERR(mac)) {
+ } else if (!IS_ERR_OR_NULL(mac)) {
ether_addr_copy(bp->dev->dev_addr, mac);
} else {
macb_get_hwaddr(bp);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 8a6785173228..492f8769ac12 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -891,7 +891,7 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
- int status;
+ int status, cnt;
u8 link_status = 0;
if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
@@ -902,6 +902,9 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
+ /* check link status before offline tests */
+ link_status = netif_carrier_ok(netdev);
+
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
@@ -922,13 +925,26 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
test->flags |= ETH_TEST_FL_FAILED;
}
- status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
- if (status) {
- test->flags |= ETH_TEST_FL_FAILED;
- data[4] = -1;
- } else if (!link_status) {
+ /* link status was down prior to test */
+ if (!link_status) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = 1;
+ return;
+ }
+
+ for (cnt = 10; cnt; cnt--) {
+ status = be_cmd_link_status_query(adapter, NULL, &link_status,
+ 0);
+ if (status) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = -1;
+ break;
+ }
+
+ if (link_status)
+ break;
+
+ msleep_interruptible(500);
}
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 67f9bb6e941b..9b036c857b1d 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1057,7 +1057,7 @@ sis900_open(struct net_device *net_dev)
sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
/* Enable all known interrupts by setting the interrupt mask. */
- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
sw32(cr, RxENA | sr32(cr));
sw32(ier, IE);
@@ -1578,7 +1578,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
sw32(txdp, sis_priv->tx_ring_dma);
/* Enable all known interrupts by setting the interrupt mask. */
- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
}
/**
@@ -1618,7 +1618,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
return NETDEV_TX_OK;
}
- sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
+ sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len);
sw32(cr, TxENA | sr32(cr));
sis_priv->cur_tx ++;
@@ -1674,7 +1674,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
do {
status = sr32(isr);
- if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
+ if ((status & (HIBERR|TxURN|TxERR|TxIDLE|TxDESC|RxORN|RxERR|RxOK)) == 0)
/* nothing intresting happened */
break;
handled = 1;
@@ -1684,7 +1684,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
/* Rx interrupt */
sis900_rx(net_dev);
- if (status & (TxURN | TxERR | TxIDLE))
+ if (status & (TxURN | TxERR | TxIDLE | TxDESC))
/* Tx interrupt */
sis900_finish_xmit(net_dev);
@@ -1896,8 +1896,8 @@ static void sis900_finish_xmit (struct net_device *net_dev)
if (tx_status & OWN) {
/* The packet is not transmitted yet (owned by hardware) !
- * Note: the interrupt is generated only when Tx Machine
- * is idle, so this is an almost impossible case */
+ * Note: this is an almost impossible condition
+ * in case of TxDESC ('descriptor interrupt') */
break;
}
@@ -2473,7 +2473,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
/* Enable all known interrupts by setting the interrupt mask. */
- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
sw32(cr, RxENA | sr32(cr));
sw32(ier, IE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 2dcdf761d525..020159622559 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -112,7 +112,7 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
* programmed with (2^32 – <new_sec_value>)
*/
if (gmac4)
- sec = (100000000ULL - sec);
+ sec = -sec;
value = readl(ioaddr + PTP_TCR);
if (value & PTP_TCR_TSCTRLSSR)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 06dd51f47cfd..06358fe5b245 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2947,12 +2947,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* Manage tx mitigation */
tx_q->tx_count_frames += nfrags + 1;
- if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
+ if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
+ !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ stmmac_tx_timer_arm(priv, queue);
+ } else {
+ tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
- tx_q->tx_count_frames = 0;
- } else {
- stmmac_tx_timer_arm(priv, queue);
}
skb_tx_timestamp(skb);
@@ -3166,12 +3169,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* element in case of no SG.
*/
tx_q->tx_count_frames += nfrags + 1;
- if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
+ if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
+ !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ stmmac_tx_timer_arm(priv, queue);
+ } else {
+ tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
- tx_q->tx_count_frames = 0;
- } else {
- stmmac_tx_timer_arm(priv, queue);
}
skb_tx_timestamp(skb);
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index bf395df3bb37..1a2e2f7629f3 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -87,8 +87,7 @@ config PPP_MPPE
depends on PPP
select CRYPTO
select CRYPTO_SHA1
- select CRYPTO_ARC4
- select CRYPTO_ECB
+ select CRYPTO_LIB_ARC4
---help---
Support for the MPPE Encryption protocol, as employed by the
Microsoft Point-to-Point Tunneling Protocol.
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index ff61dd8748de..bd3c80b0bc77 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -42,9 +42,10 @@
* deprecated in 2.6
*/
+#include <crypto/arc4.h>
#include <crypto/hash.h>
-#include <crypto/skcipher.h>
#include <linux/err.h>
+#include <linux/fips.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -63,15 +64,9 @@ MODULE_AUTHOR("Frank Cusack <fcusack@fcusack.com>");
MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
+MODULE_SOFTDEP("pre: arc4");
MODULE_VERSION("1.0.2");
-static unsigned int
-setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
-{
- sg_set_buf(sg, address, length);
- return length;
-}
-
#define SHA1_PAD_SIZE 40
/*
@@ -95,7 +90,7 @@ static inline void sha_pad_init(struct sha_pad *shapad)
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
- struct crypto_sync_skcipher *arc4;
+ struct arc4_ctx arc4;
struct shash_desc *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
@@ -154,24 +149,11 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
*/
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
- struct scatterlist sg_in[1], sg_out[1];
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
-
- skcipher_request_set_sync_tfm(req, state->arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
-
get_new_key_from_sha(state);
if (!initial_key) {
- crypto_sync_skcipher_setkey(state->arc4, state->sha1_digest,
- state->keylen);
- sg_init_table(sg_in, 1);
- sg_init_table(sg_out, 1);
- setup_sg(sg_in, state->sha1_digest, state->keylen);
- setup_sg(sg_out, state->session_key, state->keylen);
- skcipher_request_set_crypt(req, sg_in, sg_out, state->keylen,
- NULL);
- if (crypto_skcipher_encrypt(req))
- printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
+ arc4_setkey(&state->arc4, state->sha1_digest, state->keylen);
+ arc4_crypt(&state->arc4, state->session_key, state->sha1_digest,
+ state->keylen);
} else {
memcpy(state->session_key, state->sha1_digest, state->keylen);
}
@@ -181,9 +163,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
state->session_key[1] = 0x26;
state->session_key[2] = 0x9e;
}
- crypto_sync_skcipher_setkey(state->arc4, state->session_key,
- state->keylen);
- skcipher_request_zero(req);
+ arc4_setkey(&state->arc4, state->session_key, state->keylen);
}
/*
@@ -196,7 +176,8 @@ static void *mppe_alloc(unsigned char *options, int optlen)
unsigned int digestsize;
if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
- options[0] != CI_MPPE || options[1] != CILEN_MPPE)
+ options[0] != CI_MPPE || options[1] != CILEN_MPPE ||
+ fips_enabled)
goto out;
state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -204,12 +185,6 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out;
- state->arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
- if (IS_ERR(state->arc4)) {
- state->arc4 = NULL;
- goto out_free;
- }
-
shash = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(shash))
goto out_free;
@@ -250,7 +225,6 @@ out_free:
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
}
- crypto_free_sync_skcipher(state->arc4);
kfree(state);
out:
return NULL;
@@ -266,8 +240,7 @@ static void mppe_free(void *arg)
kfree(state->sha1_digest);
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
- crypto_free_sync_skcipher(state->arc4);
- kfree(state);
+ kzfree(state);
}
}
@@ -366,10 +339,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
int isize, int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
int proto;
- int err;
- struct scatterlist sg_in[1], sg_out[1];
/*
* Check that the protocol is in the range we handle.
@@ -420,21 +390,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
ibuf += 2; /* skip to proto field */
isize -= 2;
- /* Encrypt packet */
- sg_init_table(sg_in, 1);
- sg_init_table(sg_out, 1);
- setup_sg(sg_in, ibuf, isize);
- setup_sg(sg_out, obuf, osize);
-
- skcipher_request_set_sync_tfm(req, state->arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
- err = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- if (err) {
- printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
- return -1;
- }
+ arc4_crypt(&state->arc4, obuf, ibuf, isize);
state->stats.unc_bytes += isize;
state->stats.unc_packets++;
@@ -480,10 +436,8 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
- struct scatterlist sg_in[1], sg_out[1];
if (isize <= PPP_HDRLEN + MPPE_OVHD) {
if (state->debug)
@@ -610,19 +564,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
* Decrypt the first byte in order to check if it is
* a compressed or uncompressed protocol field.
*/
- sg_init_table(sg_in, 1);
- sg_init_table(sg_out, 1);
- setup_sg(sg_in, ibuf, 1);
- setup_sg(sg_out, obuf, 1);
-
- skcipher_request_set_sync_tfm(req, state->arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
- if (crypto_skcipher_decrypt(req)) {
- printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
- osize = DECOMP_ERROR;
- goto out_zap_req;
- }
+ arc4_crypt(&state->arc4, obuf, ibuf, 1);
/*
* Do PFC decompression.
@@ -637,14 +579,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
}
/* And finally, decrypt the rest of the packet. */
- setup_sg(sg_in, ibuf + 1, isize - 1);
- setup_sg(sg_out, obuf + 1, osize - 1);
- skcipher_request_set_crypt(req, sg_in, sg_out, isize - 1, NULL);
- if (crypto_skcipher_decrypt(req)) {
- printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
- osize = DECOMP_ERROR;
- goto out_zap_req;
- }
+ arc4_crypt(&state->arc4, obuf + 1, ibuf + 1, isize - 1);
state->stats.unc_bytes += osize;
state->stats.unc_packets++;
@@ -654,8 +589,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
/* good packet credit */
state->sanity_errors >>= 1;
-out_zap_req:
- skcipher_request_zero(req);
return osize;
sanity_error:
@@ -728,8 +661,7 @@ static struct compressor ppp_mppe = {
static int __init ppp_mppe_init(void)
{
int answer;
- if (!(crypto_has_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
- crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC)))
+ if (fips_enabled || !crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC))
return -ENODEV;
sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b48006e7fa2f..36916bf51ee6 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2128,12 +2128,12 @@ static void team_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
dev->hw_features = TEAM_VLAN_FEATURES |
- NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
dev->features |= dev->hw_features;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
}
static int team_newlink(struct net *src_net, struct net_device *dev,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d080f8048e52..8b4ad10cf940 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1482,7 +1482,7 @@ static int qmi_wwan_probe(struct usb_interface *intf,
* different. Ignore the current interface if the number of endpoints
* equals the number for the diag interface (two).
*/
- info = (void *)&id->driver_info;
+ info = (void *)id->driver_info;
if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
if (desc->bNumEndpoints == 2)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 11b9525dff27..311b0cc6eb98 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -350,8 +350,8 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
+ const struct in6_addr *nexthop;
struct neighbour *neigh;
- struct in6_addr *nexthop;
int ret;
nf_reset(skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index fec38a47696e..9f4b117db9d7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -93,7 +93,7 @@ void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
struct cfg80211_pmsr_result result = {
.status = NL80211_PMSR_STATUS_FAILURE,
.final = 1,
- .host_time = ktime_get_boot_ns(),
+ .host_time = ktime_get_boottime_ns(),
.type = NL80211_PMSR_TYPE_FTM,
};
int i;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index fbd3014e8b82..160b0db27103 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -555,7 +555,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control)))
- rx_status->boottime_ns = ktime_get_boot_ns();
+ rx_status->boottime_ns = ktime_get_boottime_ns();
/* Take a reference briefly to kick off a d0i3 entry delay so
* we can handle bursts of RX packets without toggling the
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 1824566d08fc..64f950501287 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1684,7 +1684,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control)))
- rx_status->boottime_ns = ktime_get_boot_ns();
+ rx_status->boottime_ns = ktime_get_boottime_ns();
}
if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index cc56ab88fb43..72cd5b3f2d8d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1445,7 +1445,7 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
}
*gp2 = iwl_mvm_get_systime(mvm);
- *boottime = ktime_get_boot_ns();
+ *boottime = ktime_get_boottime_ns();
if (!ps_disabled) {
mvm->ps_disabled = ps_disabled;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 1c699a9fa866..a7bf6519d7aa 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1271,7 +1271,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
*/
if (ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control)) {
- rx_status.boottime_ns = ktime_get_boot_ns();
+ rx_status.boottime_ns = ktime_get_boottime_ns();
now = data->abs_bcn_ts;
} else {
now = mac80211_hwsim_get_tsf_raw();
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index c9a485ecee7b..b74dc8bc9755 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -483,7 +483,7 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
}
/* update the host-chipset time offset */
- wl->time_offset = (ktime_get_boot_ns() >> 10) -
+ wl->time_offset = (ktime_get_boottime_ns() >> 10) -
(s64)(status->fw_localtime);
wl->fw_fast_lnk_map = status->link_fast_bitmap;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index d96bb602fae6..307fab21050b 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -93,7 +93,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
if (beacon || probe_rsp)
- status->boottime_ns = ktime_get_boot_ns();
+ status->boottime_ns = ktime_get_boottime_ns();
if (beacon)
wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 057c6be330e7..90e56d4c3df3 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -273,7 +273,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
/* configure packet life time */
- hosttime = (ktime_get_boot_ns() >> 10);
+ hosttime = (ktime_get_boottime_ns() >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
is_dummy = wl12xx_is_dummy_packet(wl, skb);
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 606999f102eb..be92e1220284 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -172,7 +172,7 @@ static void virt_wifi_scan_result(struct work_struct *work)
informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
CFG80211_BSS_FTYPE_PRESP,
fake_router_bssid,
- ktime_get_boot_ns(),
+ ktime_get_boottime_ns(),
WLAN_CAPABILITY_ESS, 0,
(void *)&ssid, sizeof(ssid),
DBM_TO_MBM(-50), GFP_KERNEL);
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index a570f2263a42..99a5708b37e3 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -55,7 +55,7 @@ static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
struct device *dev = &nvdimm->dev;
sprintf(desc, "%s%s", NVDIMM_PREFIX, nvdimm->dimm_id);
- key = request_key(&key_type_encrypted, desc, "");
+ key = request_key(&key_type_encrypted, desc, "", NULL);
if (IS_ERR(key)) {
if (PTR_ERR(key) == -ENOKEY)
dev_dbg(dev, "request_key() found no key\n");
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 98af9ecd4a90..ca3793002e2f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -859,7 +859,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
pci_dev->bus->self->skip_bus_pm = true;
}
- if (pci_dev->skip_bus_pm && !pm_suspend_via_firmware()) {
+ if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) {
dev_dbg(dev, "PCI PM: Skipped\n");
goto Fixup;
}
@@ -914,10 +914,10 @@ static int pci_pm_resume_noirq(struct device *dev)
/*
* In the suspend-to-idle case, devices left in D0 during suspend will
* stay in D0, so it is not necessary to restore or update their
- * configuration here and attempting to put them into D0 again may
- * confuse some firmware, so avoid doing that.
+ * configuration here and attempting to put them into D0 again is
+ * pointless, so avoid doing that.
*/
- if (!pci_dev->skip_bus_pm || pm_suspend_via_firmware())
+ if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform()))
pci_pm_default_resume_early(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index e4221a107dca..09ae8a970880 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -71,6 +71,14 @@ config ARM_DSU_PMU
system, control logic. The PMU allows counting various events related
to DSU.
+config FSL_IMX8_DDR_PMU
+ tristate "Freescale i.MX8 DDR perf monitor"
+ depends on ARCH_MXC
+ help
+ Provides support for the DDR performance monitor in i.MX8, which
+ can give information about memory throughput and other related
+ events.
+
config HISI_PMU
bool "HiSilicon SoC PMU"
depends on ARM64 && ACPI
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 30489941f3d6..2ebb4de17815 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
+obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index d2c2978409d2..acce8781c456 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -71,6 +71,76 @@ static void arm_pmu_acpi_unregister_irq(int cpu)
acpi_unregister_gsi(gsi);
}
+#if IS_ENABLED(CONFIG_ARM_SPE_PMU)
+static struct resource spe_resources[] = {
+ {
+ /* irq */
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device spe_dev = {
+ .name = ARMV8_SPE_PDEV_NAME,
+ .id = -1,
+ .resource = spe_resources,
+ .num_resources = ARRAY_SIZE(spe_resources)
+};
+
+/*
+ * For lack of a better place, hook the normal PMU MADT walk
+ * and create a SPE device if we detect a recent MADT with
+ * a homogeneous PPI mapping.
+ */
+static void arm_spe_acpi_register_device(void)
+{
+ int cpu, hetid, irq, ret;
+ bool first = true;
+ u16 gsi = 0;
+
+ /*
+ * Sanity check all the GICC tables for the same interrupt number.
+ * For now, we only support homogeneous ACPI/SPE machines.
+ */
+ for_each_possible_cpu(cpu) {
+ struct acpi_madt_generic_interrupt *gicc;
+
+ gicc = acpi_cpu_get_madt_gicc(cpu);
+ if (gicc->header.length < ACPI_MADT_GICC_SPE)
+ return;
+
+ if (first) {
+ gsi = gicc->spe_interrupt;
+ if (!gsi)
+ return;
+ hetid = find_acpi_cpu_topology_hetero_id(cpu);
+ first = false;
+ } else if ((gsi != gicc->spe_interrupt) ||
+ (hetid != find_acpi_cpu_topology_hetero_id(cpu))) {
+ pr_warn("ACPI: SPE must be homogeneous\n");
+ return;
+ }
+ }
+
+ irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE,
+ ACPI_ACTIVE_HIGH);
+ if (irq < 0) {
+ pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi);
+ return;
+ }
+
+ spe_resources[0].start = irq;
+ ret = platform_device_register(&spe_dev);
+ if (ret < 0) {
+ pr_warn("ACPI: SPE: Unable to register device\n");
+ acpi_unregister_gsi(gsi);
+ }
+}
+#else
+static inline void arm_spe_acpi_register_device(void)
+{
+}
+#endif /* CONFIG_ARM_SPE_PMU */
+
static int arm_pmu_acpi_parse_irqs(void)
{
int irq, cpu, irq_cpu, err;
@@ -276,6 +346,8 @@ static int arm_pmu_acpi_init(void)
if (acpi_disabled)
return 0;
+ arm_spe_acpi_register_device();
+
ret = arm_pmu_acpi_parse_irqs();
if (ret)
return ret;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 49b490925255..4e4984a55cd1 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -27,6 +27,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/perf_event.h>
+#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/slab.h>
@@ -1157,7 +1158,13 @@ static const struct of_device_id arm_spe_pmu_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match);
-static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev)
+static const struct platform_device_id arm_spe_match[] = {
+ { ARMV8_SPE_PDEV_NAME, 0},
+ { }
+};
+MODULE_DEVICE_TABLE(platform, arm_spe_match);
+
+static int arm_spe_pmu_device_probe(struct platform_device *pdev)
{
int ret;
struct arm_spe_pmu *spe_pmu;
@@ -1217,11 +1224,12 @@ static int arm_spe_pmu_device_remove(struct platform_device *pdev)
}
static struct platform_driver arm_spe_pmu_driver = {
+ .id_table = arm_spe_match,
.driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(arm_spe_pmu_of_match),
},
- .probe = arm_spe_pmu_device_dt_probe,
+ .probe = arm_spe_pmu_device_probe,
.remove = arm_spe_pmu_device_remove,
};
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
new file mode 100644
index 000000000000..63fe21600072
--- /dev/null
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 NXP
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+
+#define COUNTER_CNTL 0x0
+#define COUNTER_READ 0x20
+
+#define COUNTER_DPCR1 0x30
+
+#define CNTL_OVER 0x1
+#define CNTL_CLEAR 0x2
+#define CNTL_EN 0x4
+#define CNTL_EN_MASK 0xFFFFFFFB
+#define CNTL_CLEAR_MASK 0xFFFFFFFD
+#define CNTL_OVER_MASK 0xFFFFFFFE
+
+#define CNTL_CSV_SHIFT 24
+#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
+
+#define EVENT_CYCLES_ID 0
+#define EVENT_CYCLES_COUNTER 0
+#define NUM_COUNTERS 4
+
+#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
+
+#define DDR_PERF_DEV_NAME "imx8_ddr"
+#define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
+
+static DEFINE_IDA(ddr_ida);
+
+static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
+ { .compatible = "fsl,imx8-ddr-pmu",},
+ { .compatible = "fsl,imx8m-ddr-pmu",},
+ { /* sentinel */ }
+};
+
+struct ddr_pmu {
+ struct pmu pmu;
+ void __iomem *base;
+ unsigned int cpu;
+ struct hlist_node node;
+ struct device *dev;
+ struct perf_event *events[NUM_COUNTERS];
+ int active_events;
+ enum cpuhp_state cpuhp_state;
+ int irq;
+ int id;
+};
+
+static ssize_t ddr_perf_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddr_pmu *pmu = dev_get_drvdata(dev);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
+}
+
+static struct device_attribute ddr_perf_cpumask_attr =
+ __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
+
+static struct attribute *ddr_perf_cpumask_attrs[] = {
+ &ddr_perf_cpumask_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ddr_perf_cpumask_attr_group = {
+ .attrs = ddr_perf_cpumask_attrs,
+};
+
+static ssize_t
+ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
+ .id = _id, } \
+ })[0].attr.attr)
+
+static struct attribute *ddr_perf_events_attrs[] = {
+ IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
+ IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
+ IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
+ IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
+ IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
+ IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
+ IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
+ IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
+ IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
+ IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
+ IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
+ IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
+ IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
+ IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
+ IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
+ IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
+ IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
+ IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
+ IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
+ IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
+ IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
+ IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
+ IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
+ IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
+ IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
+ IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
+ IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
+ IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
+ IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
+ IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
+ NULL,
+};
+
+static struct attribute_group ddr_perf_events_attr_group = {
+ .name = "events",
+ .attrs = ddr_perf_events_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+
+static struct attribute *ddr_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group ddr_perf_format_attr_group = {
+ .name = "format",
+ .attrs = ddr_perf_format_attrs,
+};
+
+static const struct attribute_group *attr_groups[] = {
+ &ddr_perf_events_attr_group,
+ &ddr_perf_format_attr_group,
+ &ddr_perf_cpumask_attr_group,
+ NULL,
+};
+
+static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
+{
+ int i;
+
+ /*
+ * Always map cycle event to counter 0
+ * Cycles counter is dedicated for cycle event
+ * can't used for the other events
+ */
+ if (event == EVENT_CYCLES_ID) {
+ if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+ return EVENT_CYCLES_COUNTER;
+ else
+ return -ENOENT;
+ }
+
+ for (i = 1; i < NUM_COUNTERS; i++) {
+ if (pmu->events[i] == NULL)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
+{
+ pmu->events[counter] = NULL;
+}
+
+static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
+{
+ return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
+}
+
+static int ddr_perf_event_init(struct perf_event *event)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_event *sibling;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
+ if (event->cpu < 0) {
+ dev_warn(pmu->dev, "Can't provide per-task data!\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * We must NOT create groups containing mixed PMUs, although software
+ * events are acceptable (for example to create a CCN group
+ * periodically read when a hrtimer aka cpu-clock leader triggers).
+ */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling->pmu != event->pmu &&
+ !is_software_event(sibling))
+ return -EINVAL;
+ }
+
+ event->cpu = pmu->cpu;
+ hwc->idx = -1;
+
+ return 0;
+}
+
+
+static void ddr_perf_event_update(struct perf_event *event)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev_raw_count, new_raw_count;
+ int counter = hwc->idx;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = ddr_perf_read_counter(pmu, counter);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count);
+
+ delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
+
+ local64_add(delta, &event->count);
+}
+
+static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
+ int counter, bool enable)
+{
+ u8 reg = counter * 4 + COUNTER_CNTL;
+ int val;
+
+ if (enable) {
+ /*
+ * must disable first, then enable again
+ * otherwise, cycle counter will not work
+ * if previous state is enabled.
+ */
+ writel(0, pmu->base + reg);
+ val = CNTL_EN | CNTL_CLEAR;
+ val |= FIELD_PREP(CNTL_CSV_MASK, config);
+ writel(val, pmu->base + reg);
+ } else {
+ /* Disable counter */
+ writel(0, pmu->base + reg);
+ }
+}
+
+static void ddr_perf_event_start(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ local64_set(&hwc->prev_count, 0);
+
+ ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
+
+ hwc->state = 0;
+}
+
+static int ddr_perf_event_add(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter;
+ int cfg = event->attr.config;
+
+ counter = ddr_perf_alloc_counter(pmu, cfg);
+ if (counter < 0) {
+ dev_dbg(pmu->dev, "There are not enough counters\n");
+ return -EOPNOTSUPP;
+ }
+
+ pmu->events[counter] = event;
+ pmu->active_events++;
+ hwc->idx = counter;
+
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ ddr_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void ddr_perf_event_stop(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
+ ddr_perf_event_update(event);
+
+ hwc->state |= PERF_HES_STOPPED;
+}
+
+static void ddr_perf_event_del(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ ddr_perf_event_stop(event, PERF_EF_UPDATE);
+
+ ddr_perf_free_counter(pmu, counter);
+ pmu->active_events--;
+ hwc->idx = -1;
+}
+
+static void ddr_perf_pmu_enable(struct pmu *pmu)
+{
+ struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+
+ /* enable cycle counter if cycle is not active event list */
+ if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+ ddr_perf_counter_enable(ddr_pmu,
+ EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER,
+ true);
+}
+
+static void ddr_perf_pmu_disable(struct pmu *pmu)
+{
+ struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+
+ if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+ ddr_perf_counter_enable(ddr_pmu,
+ EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER,
+ false);
+}
+
+static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
+ struct device *dev)
+{
+ *pmu = (struct ddr_pmu) {
+ .pmu = (struct pmu) {
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = attr_groups,
+ .event_init = ddr_perf_event_init,
+ .add = ddr_perf_event_add,
+ .del = ddr_perf_event_del,
+ .start = ddr_perf_event_start,
+ .stop = ddr_perf_event_stop,
+ .read = ddr_perf_event_update,
+ .pmu_enable = ddr_perf_pmu_enable,
+ .pmu_disable = ddr_perf_pmu_disable,
+ },
+ .base = base,
+ .dev = dev,
+ };
+
+ pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
+ return pmu->id;
+}
+
+static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
+{
+ int i;
+ struct ddr_pmu *pmu = (struct ddr_pmu *) p;
+ struct perf_event *event, *cycle_event = NULL;
+
+ /* all counter will stop if cycle counter disabled */
+ ddr_perf_counter_enable(pmu,
+ EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER,
+ false);
+ /*
+ * When the cycle counter overflows, all counters are stopped,
+ * and an IRQ is raised. If any other counter overflows, it
+ * continues counting, and no IRQ is raised.
+ *
+ * Cycles occur at least 4 times as often as other events, so we
+ * can update all events on a cycle counter overflow and not
+ * lose events.
+ *
+ */
+ for (i = 0; i < NUM_COUNTERS; i++) {
+
+ if (!pmu->events[i])
+ continue;
+
+ event = pmu->events[i];
+
+ ddr_perf_event_update(event);
+
+ if (event->hw.idx == EVENT_CYCLES_COUNTER)
+ cycle_event = event;
+ }
+
+ ddr_perf_counter_enable(pmu,
+ EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER,
+ true);
+ if (cycle_event)
+ ddr_perf_event_update(cycle_event);
+
+ return IRQ_HANDLED;
+}
+
+static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
+ int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+
+ WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
+
+ return 0;
+}
+
+static int ddr_perf_probe(struct platform_device *pdev)
+{
+ struct ddr_pmu *pmu;
+ struct device_node *np;
+ void __iomem *base;
+ char *name;
+ int num;
+ int ret;
+ int irq;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ np = pdev->dev.of_node;
+
+ pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ num = ddr_perf_init(pmu, base, &pdev->dev);
+
+ platform_set_drvdata(pdev, pmu);
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
+ num);
+ if (!name)
+ return -ENOMEM;
+
+ pmu->cpu = raw_smp_processor_id();
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ DDR_CPUHP_CB_NAME,
+ NULL,
+ ddr_perf_offline_cpu);
+
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
+ goto ddr_perf_err;
+ }
+
+ pmu->cpuhp_state = ret;
+
+ /* Register the pmu instance for cpu hotplug */
+ cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+
+ /* Request irq */
+ irq = of_irq_get(np, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get irq: %d", irq);
+ ret = irq;
+ goto ddr_perf_err;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq,
+ ddr_perf_irq_handler,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ DDR_CPUHP_CB_NAME,
+ pmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Request irq failed: %d", ret);
+ goto ddr_perf_err;
+ }
+
+ pmu->irq = irq;
+ ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
+ if (ret) {
+ dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
+ goto ddr_perf_err;
+ }
+
+ ret = perf_pmu_register(&pmu->pmu, name, -1);
+ if (ret)
+ goto ddr_perf_err;
+
+ return 0;
+
+ddr_perf_err:
+ if (pmu->cpuhp_state)
+ cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+
+ ida_simple_remove(&ddr_ida, pmu->id);
+ dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
+ return ret;
+}
+
+static int ddr_perf_remove(struct platform_device *pdev)
+{
+ struct ddr_pmu *pmu = platform_get_drvdata(pdev);
+
+ cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ irq_set_affinity_hint(pmu->irq, NULL);
+
+ perf_pmu_unregister(&pmu->pmu);
+
+ ida_simple_remove(&ddr_ida, pmu->id);
+ return 0;
+}
+
+static struct platform_driver imx_ddr_pmu_driver = {
+ .driver = {
+ .name = "imx-ddr-pmu",
+ .of_match_table = imx_ddr_pmu_dt_ids,
+ },
+ .probe = ddr_perf_probe,
+ .remove = ddr_perf_remove,
+};
+
+module_platform_driver(imx_ddr_pmu_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index f464f8cd274b..7e526bcf5e0b 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -113,6 +113,8 @@ static void mtk_eint_mask(struct irq_data *d)
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
eint->regs->mask_set);
+ eint->cur_mask[d->hwirq >> 5] &= ~mask;
+
writel(mask, reg);
}
@@ -123,6 +125,8 @@ static void mtk_eint_unmask(struct irq_data *d)
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
eint->regs->mask_clr);
+ eint->cur_mask[d->hwirq >> 5] |= mask;
+
writel(mask, reg);
if (eint->dual_edge[d->hwirq])
@@ -217,19 +221,6 @@ static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
}
}
-static void mtk_eint_chip_read_mask(const struct mtk_eint *eint,
- void __iomem *base, u32 *buf)
-{
- int port;
- void __iomem *reg;
-
- for (port = 0; port < eint->hw->ports; port++) {
- reg = base + eint->regs->mask + (port << 2);
- buf[port] = ~readl_relaxed(reg);
- /* Mask is 0 when irq is enabled, and 1 when disabled. */
- }
-}
-
static int mtk_eint_irq_request_resources(struct irq_data *d)
{
struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
@@ -318,7 +309,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
struct mtk_eint *eint = irq_desc_get_handler_data(desc);
unsigned int status, eint_num;
- int offset, index, virq;
+ int offset, mask_offset, index, virq;
void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat);
int dual_edge, start_level, curr_level;
@@ -328,10 +319,24 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
status = readl(reg);
while (status) {
offset = __ffs(status);
+ mask_offset = eint_num >> 5;
index = eint_num + offset;
virq = irq_find_mapping(eint->domain, index);
status &= ~BIT(offset);
+ /*
+ * If we get an interrupt on pin that was only required
+ * for wake (but no real interrupt requested), mask the
+ * interrupt (as would mtk_eint_resume do anyway later
+ * in the resume sequence).
+ */
+ if (eint->wake_mask[mask_offset] & BIT(offset) &&
+ !(eint->cur_mask[mask_offset] & BIT(offset))) {
+ writel_relaxed(BIT(offset), reg -
+ eint->regs->stat +
+ eint->regs->mask_set);
+ }
+
dual_edge = eint->dual_edge[index];
if (dual_edge) {
/*
@@ -370,7 +375,6 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
int mtk_eint_do_suspend(struct mtk_eint *eint)
{
- mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask);
mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
return 0;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 568ca96cdb6d..3a235487e38d 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -771,6 +771,10 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
if (ret < 0)
goto fail;
+ ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
+ if (ret < 0)
+ goto fail;
+
mcp->irq_controller =
device_property_read_bool(dev, "interrupt-controller");
if (mcp->irq && mcp->irq_controller) {
@@ -812,10 +816,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
goto fail;
}
- ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
- if (ret < 0)
- goto fail;
-
if (one_regmap_config) {
mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL,
"mcp23xxx-pinctrl.%d", raw_chip_address);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 3b4ca52d2456..fb76fb2e9ea5 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -396,7 +396,7 @@ static int ocelot_pin_function_idx(struct ocelot_pinctrl *info,
return -1;
}
-#define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32)))
+#define REG_ALT(msb, info, p) (OCELOT_GPIO_ALT0 * (info)->stride + 4 * ((msb) + ((info)->stride * ((p) / 32))))
static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int selector, unsigned int group)
@@ -412,19 +412,21 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
/*
* f is encoded on two bits.
- * bit 0 of f goes in BIT(pin) of ALT0, bit 1 of f goes in BIT(pin) of
- * ALT1
+ * bit 0 of f goes in BIT(pin) of ALT[0], bit 1 of f goes in BIT(pin) of
+ * ALT[1]
* This is racy because both registers can't be updated at the same time
* but it doesn't matter much for now.
*/
- regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT0, info, pin->pin),
+ regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
BIT(p), f << p);
- regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT1, info, pin->pin),
+ regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
BIT(p), f << (p - 1));
return 0;
}
+#define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32)))
+
static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int pin, bool input)
@@ -432,7 +434,7 @@ static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev,
struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
unsigned int p = pin % 32;
- regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, p), BIT(p),
+ regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, pin), BIT(p),
input ? 0 : BIT(p));
return 0;
@@ -445,9 +447,9 @@ static int ocelot_gpio_request_enable(struct pinctrl_dev *pctldev,
struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
unsigned int p = offset % 32;
- regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT0, info, offset),
+ regmap_update_bits(info->map, REG_ALT(0, info, offset),
BIT(p), 0);
- regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT1, info, offset),
+ regmap_update_bits(info->map, REG_ALT(1, info, offset),
BIT(p), 0);
return 0;
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index f888117b0efc..8692f6b79f93 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -166,12 +166,15 @@ struct rapl_domain {
#define power_zone_to_rapl_domain(_zone) \
container_of(_zone, struct rapl_domain, power_zone)
+/* maximum rapl package domain name: package-%d-die-%d */
+#define PACKAGE_DOMAIN_NAME_LENGTH 30
-/* Each physical package contains multiple domains, these are the common
+
+/* Each rapl package contains multiple domains, these are the common
* data across RAPL domains within a package.
*/
struct rapl_package {
- unsigned int id; /* physical package/socket id */
+ unsigned int id; /* logical die id, equals physical 1-die systems */
unsigned int nr_domains;
unsigned long domain_map; /* bit map of active domains */
unsigned int power_unit;
@@ -186,6 +189,7 @@ struct rapl_package {
int lead_cpu; /* one active cpu per package for access */
/* Track active cpus */
struct cpumask cpumask;
+ char name[PACKAGE_DOMAIN_NAME_LENGTH];
};
struct rapl_defaults {
@@ -252,8 +256,9 @@ static struct powercap_control_type *control_type; /* PowerCap Controller */
static struct rapl_domain *platform_rapl_domain; /* Platform (PSys) domain */
/* caller to ensure CPU hotplug lock is held */
-static struct rapl_package *find_package_by_id(int id)
+static struct rapl_package *rapl_find_package_domain(int cpu)
{
+ int id = topology_logical_die_id(cpu);
struct rapl_package *rp;
list_for_each_entry(rp, &rapl_packages, plist) {
@@ -913,8 +918,8 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
rp->time_unit = 1000000 / (1 << value);
- pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n",
- rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
+ pr_debug("Core CPU %s energy=%dpJ, time=%dus, power=%duW\n",
+ rp->name, rp->energy_unit, rp->time_unit, rp->power_unit);
return 0;
}
@@ -938,8 +943,8 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
rp->time_unit = 1000000 / (1 << value);
- pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n",
- rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
+ pr_debug("Atom %s energy=%dpJ, time=%dus, power=%duW\n",
+ rp->name, rp->energy_unit, rp->time_unit, rp->power_unit);
return 0;
}
@@ -1168,7 +1173,7 @@ static void rapl_update_domain_data(struct rapl_package *rp)
u64 val;
for (dmn = 0; dmn < rp->nr_domains; dmn++) {
- pr_debug("update package %d domain %s data\n", rp->id,
+ pr_debug("update %s domain %s data\n", rp->name,
rp->domains[dmn].name);
/* exclude non-raw primitives */
for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) {
@@ -1193,7 +1198,6 @@ static void rapl_unregister_powercap(void)
static int rapl_package_register_powercap(struct rapl_package *rp)
{
struct rapl_domain *rd;
- char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
struct powercap_zone *power_zone = NULL;
int nr_pl, ret;
@@ -1204,20 +1208,16 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
if (rd->id == RAPL_DOMAIN_PACKAGE) {
nr_pl = find_nr_power_limit(rd);
- pr_debug("register socket %d package domain %s\n",
- rp->id, rd->name);
- memset(dev_name, 0, sizeof(dev_name));
- snprintf(dev_name, sizeof(dev_name), "%s-%d",
- rd->name, rp->id);
+ pr_debug("register package domain %s\n", rp->name);
power_zone = powercap_register_zone(&rd->power_zone,
control_type,
- dev_name, NULL,
+ rp->name, NULL,
&zone_ops[rd->id],
nr_pl,
&constraint_ops);
if (IS_ERR(power_zone)) {
- pr_debug("failed to register package, %d\n",
- rp->id);
+ pr_debug("failed to register power zone %s\n",
+ rp->name);
return PTR_ERR(power_zone);
}
/* track parent zone in per package/socket data */
@@ -1243,8 +1243,8 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
&constraint_ops);
if (IS_ERR(power_zone)) {
- pr_debug("failed to register power_zone, %d:%s:%s\n",
- rp->id, rd->name, dev_name);
+ pr_debug("failed to register power_zone, %s:%s\n",
+ rp->name, rd->name);
ret = PTR_ERR(power_zone);
goto err_cleanup;
}
@@ -1257,7 +1257,7 @@ err_cleanup:
* failed after the first domain setup.
*/
while (--rd >= rp->domains) {
- pr_debug("unregister package %d domain %s\n", rp->id, rd->name);
+ pr_debug("unregister %s domain %s\n", rp->name, rd->name);
powercap_unregister_zone(control_type, &rd->power_zone);
}
@@ -1288,7 +1288,7 @@ static int __init rapl_register_psys(void)
rd->rpl[0].name = pl1_name;
rd->rpl[1].prim_id = PL2_ENABLE;
rd->rpl[1].name = pl2_name;
- rd->rp = find_package_by_id(0);
+ rd->rp = rapl_find_package_domain(0);
power_zone = powercap_register_zone(&rd->power_zone, control_type,
"psys", NULL,
@@ -1367,8 +1367,8 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd)
/* check if the domain is locked by BIOS, ignore if MSR doesn't exist */
if (!rapl_read_data_raw(rd, FW_LOCK, false, &val64)) {
if (val64) {
- pr_info("RAPL package %d domain %s locked by BIOS\n",
- rd->rp->id, rd->name);
+ pr_info("RAPL %s domain %s locked by BIOS\n",
+ rd->rp->name, rd->name);
rd->state |= DOMAIN_STATE_BIOS_LOCKED;
}
}
@@ -1397,10 +1397,10 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
}
rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
if (!rp->nr_domains) {
- pr_debug("no valid rapl domains found in package %d\n", rp->id);
+ pr_debug("no valid rapl domains found in %s\n", rp->name);
return -ENODEV;
}
- pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id);
+ pr_debug("found %d domains on %s\n", rp->nr_domains, rp->name);
rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
GFP_KERNEL);
@@ -1433,8 +1433,8 @@ static void rapl_remove_package(struct rapl_package *rp)
rd_package = rd;
continue;
}
- pr_debug("remove package, undo power limit on %d: %s\n",
- rp->id, rd->name);
+ pr_debug("remove package, undo power limit on %s: %s\n",
+ rp->name, rd->name);
powercap_unregister_zone(control_type, &rd->power_zone);
}
/* do parent zone last */
@@ -1444,9 +1444,11 @@ static void rapl_remove_package(struct rapl_package *rp)
}
/* called from CPU hotplug notifier, hotplug lock held */
-static struct rapl_package *rapl_add_package(int cpu, int pkgid)
+static struct rapl_package *rapl_add_package(int cpu)
{
+ int id = topology_logical_die_id(cpu);
struct rapl_package *rp;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
int ret;
rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
@@ -1454,9 +1456,16 @@ static struct rapl_package *rapl_add_package(int cpu, int pkgid)
return ERR_PTR(-ENOMEM);
/* add the new package to the list */
- rp->id = pkgid;
+ rp->id = id;
rp->lead_cpu = cpu;
+ if (topology_max_die_per_package() > 1)
+ snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH,
+ "package-%d-die-%d", c->phys_proc_id, c->cpu_die_id);
+ else
+ snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d",
+ c->phys_proc_id);
+
/* check if the package contains valid domains */
if (rapl_detect_domains(rp, cpu) ||
rapl_defaults->check_unit(rp, cpu)) {
@@ -1485,12 +1494,11 @@ err_free_package:
*/
static int rapl_cpu_online(unsigned int cpu)
{
- int pkgid = topology_physical_package_id(cpu);
struct rapl_package *rp;
- rp = find_package_by_id(pkgid);
+ rp = rapl_find_package_domain(cpu);
if (!rp) {
- rp = rapl_add_package(cpu, pkgid);
+ rp = rapl_add_package(cpu);
if (IS_ERR(rp))
return PTR_ERR(rp);
}
@@ -1500,11 +1508,10 @@ static int rapl_cpu_online(unsigned int cpu)
static int rapl_cpu_down_prep(unsigned int cpu)
{
- int pkgid = topology_physical_package_id(cpu);
struct rapl_package *rp;
int lead_cpu;
- rp = find_package_by_id(pkgid);
+ rp = rapl_find_package_domain(cpu);
if (!rp)
return 0;
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 673f8a128397..5d545806d930 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2019 Borislav Petkov, SUSE Labs.
+ */
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
@@ -37,9 +40,9 @@
* thus emulate an an LRU-like behavior when deleting elements to free up space
* in the page.
*
- * When an element reaches it's max count of count_threshold, we try to poison
- * it by assuming that errors triggered count_threshold times in a single page
- * are excessive and that page shouldn't be used anymore. count_threshold is
+ * When an element reaches it's max count of action_threshold, we try to poison
+ * it by assuming that errors triggered action_threshold times in a single page
+ * are excessive and that page shouldn't be used anymore. action_threshold is
* initialized to COUNT_MASK which is the maximum.
*
* That error event entry causes cec_add_elem() to return !0 value and thus
@@ -122,7 +125,7 @@ static DEFINE_MUTEX(ce_mutex);
static u64 dfs_pfn;
/* Amount of errors after which we offline */
-static unsigned int count_threshold = COUNT_MASK;
+static u64 action_threshold = COUNT_MASK;
/* Each element "decays" each decay_interval which is 24hrs by default. */
#define CEC_DECAY_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */
@@ -276,11 +279,39 @@ static u64 __maybe_unused del_lru_elem(void)
return pfn;
}
+static bool sanity_check(struct ce_array *ca)
+{
+ bool ret = false;
+ u64 prev = 0;
+ int i;
+
+ for (i = 0; i < ca->n; i++) {
+ u64 this = PFN(ca->array[i]);
+
+ if (WARN(prev > this, "prev: 0x%016llx <-> this: 0x%016llx\n", prev, this))
+ ret = true;
+
+ prev = this;
+ }
+
+ if (!ret)
+ return ret;
+
+ pr_info("Sanity check dump:\n{ n: %d\n", ca->n);
+ for (i = 0; i < ca->n; i++) {
+ u64 this = PFN(ca->array[i]);
+
+ pr_info(" %03d: [%016llx|%03llx]\n", i, this, FULL_COUNT(ca->array[i]));
+ }
+ pr_info("}\n");
+
+ return ret;
+}
int cec_add_elem(u64 pfn)
{
struct ce_array *ca = &ce_arr;
- unsigned int to;
+ unsigned int to = 0;
int count, ret = 0;
/*
@@ -294,6 +325,7 @@ int cec_add_elem(u64 pfn)
ca->ces_entered++;
+ /* Array full, free the LRU slot. */
if (ca->n == MAX_ELEMS)
WARN_ON(!del_lru_elem_unlocked(ca));
@@ -306,24 +338,17 @@ int cec_add_elem(u64 pfn)
(void *)&ca->array[to],
(ca->n - to) * sizeof(u64));
- ca->array[to] = (pfn << PAGE_SHIFT) |
- (DECAY_MASK << COUNT_BITS) | 1;
-
+ ca->array[to] = pfn << PAGE_SHIFT;
ca->n++;
-
- ret = 0;
-
- goto decay;
}
- count = COUNT(ca->array[to]);
-
- if (count < count_threshold) {
- ca->array[to] |= (DECAY_MASK << COUNT_BITS);
- ca->array[to]++;
+ /* Add/refresh element generation and increment count */
+ ca->array[to] |= DECAY_MASK << COUNT_BITS;
+ ca->array[to]++;
- ret = 0;
- } else {
+ /* Check action threshold and soft-offline, if reached. */
+ count = COUNT(ca->array[to]);
+ if (count >= action_threshold) {
u64 pfn = ca->array[to] >> PAGE_SHIFT;
if (!pfn_valid(pfn)) {
@@ -338,20 +363,21 @@ int cec_add_elem(u64 pfn)
del_elem(ca, to);
/*
- * Return a >0 value to denote that we've reached the offlining
- * threshold.
+ * Return a >0 value to callers, to denote that we've reached
+ * the offlining threshold.
*/
ret = 1;
goto unlock;
}
-decay:
ca->decay_count++;
if (ca->decay_count >= CLEAN_ELEMS)
do_spring_cleaning(ca);
+ WARN_ON_ONCE(sanity_check(ca));
+
unlock:
mutex_unlock(&ce_mutex);
@@ -369,45 +395,48 @@ static int pfn_set(void *data, u64 val)
{
*(u64 *)data = val;
- return cec_add_elem(val);
+ cec_add_elem(val);
+
+ return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(pfn_ops, u64_get, pfn_set, "0x%llx\n");
static int decay_interval_set(void *data, u64 val)
{
- *(u64 *)data = val;
-
if (val < CEC_DECAY_MIN_INTERVAL)
return -EINVAL;
if (val > CEC_DECAY_MAX_INTERVAL)
return -EINVAL;
+ *(u64 *)data = val;
decay_interval = val;
cec_mod_work(decay_interval);
+
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(decay_interval_ops, u64_get, decay_interval_set, "%lld\n");
-static int count_threshold_set(void *data, u64 val)
+static int action_threshold_set(void *data, u64 val)
{
*(u64 *)data = val;
if (val > COUNT_MASK)
val = COUNT_MASK;
- count_threshold = val;
+ action_threshold = val;
return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(count_threshold_ops, u64_get, count_threshold_set, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(action_threshold_ops, u64_get, action_threshold_set, "%lld\n");
+
+static const char * const bins[] = { "00", "01", "10", "11" };
static int array_dump(struct seq_file *m, void *v)
{
struct ce_array *ca = &ce_arr;
- u64 prev = 0;
int i;
mutex_lock(&ce_mutex);
@@ -416,11 +445,8 @@ static int array_dump(struct seq_file *m, void *v)
for (i = 0; i < ca->n; i++) {
u64 this = PFN(ca->array[i]);
- seq_printf(m, " %03d: [%016llx|%03llx]\n", i, this, FULL_COUNT(ca->array[i]));
-
- WARN_ON(prev > this);
-
- prev = this;
+ seq_printf(m, " %3d: [%016llx|%s|%03llx]\n",
+ i, this, bins[DECAY(ca->array[i])], COUNT(ca->array[i]));
}
seq_printf(m, "}\n");
@@ -433,7 +459,7 @@ static int array_dump(struct seq_file *m, void *v)
seq_printf(m, "Decay interval: %lld seconds\n", decay_interval);
seq_printf(m, "Decays: %lld\n", ca->decays_done);
- seq_printf(m, "Action threshold: %d\n", count_threshold);
+ seq_printf(m, "Action threshold: %lld\n", action_threshold);
mutex_unlock(&ce_mutex);
@@ -463,18 +489,6 @@ static int __init create_debugfs_nodes(void)
return -1;
}
- pfn = debugfs_create_file("pfn", S_IRUSR | S_IWUSR, d, &dfs_pfn, &pfn_ops);
- if (!pfn) {
- pr_warn("Error creating pfn debugfs node!\n");
- goto err;
- }
-
- array = debugfs_create_file("array", S_IRUSR, d, NULL, &array_ops);
- if (!array) {
- pr_warn("Error creating array debugfs node!\n");
- goto err;
- }
-
decay = debugfs_create_file("decay_interval", S_IRUSR | S_IWUSR, d,
&decay_interval, &decay_interval_ops);
if (!decay) {
@@ -482,13 +496,27 @@ static int __init create_debugfs_nodes(void)
goto err;
}
- count = debugfs_create_file("count_threshold", S_IRUSR | S_IWUSR, d,
- &count_threshold, &count_threshold_ops);
+ count = debugfs_create_file("action_threshold", S_IRUSR | S_IWUSR, d,
+ &action_threshold, &action_threshold_ops);
if (!count) {
- pr_warn("Error creating count_threshold debugfs node!\n");
+ pr_warn("Error creating action_threshold debugfs node!\n");
+ goto err;
+ }
+
+ if (!IS_ENABLED(CONFIG_RAS_CEC_DEBUG))
+ return 0;
+
+ pfn = debugfs_create_file("pfn", S_IRUSR | S_IWUSR, d, &dfs_pfn, &pfn_ops);
+ if (!pfn) {
+ pr_warn("Error creating pfn debugfs node!\n");
goto err;
}
+ array = debugfs_create_file("array", S_IRUSR, d, NULL, &array_ops);
+ if (!array) {
+ pr_warn("Error creating array debugfs node!\n");
+ goto err;
+ }
return 0;
@@ -509,8 +537,10 @@ void __init cec_init(void)
return;
}
- if (create_debugfs_nodes())
+ if (create_debugfs_nodes()) {
+ free_page((unsigned long)ce_arr.array);
return;
+ }
INIT_DELAYED_WORK(&cec_work, cec_work_fn);
schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL);
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 9ac7574e3cfb..a8682f69effc 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -38,7 +38,7 @@ config DASD_PROFILE
depends on DASD
help
Enable this option if you want to see profiling information
- in /proc/dasd/statistics.
+ in /proc/dasd/statistics.
config DASD_ECKD
def_tristate y
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index fab35c6170cc..245f33c2f71e 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -203,7 +203,7 @@ static int __init dasd_feature_list(char *str)
else if (len == 8 && !strncmp(str, "failfast", 8))
features |= DASD_FEATURE_FAILFAST;
else {
- pr_warn("%*s is not a supported device option\n",
+ pr_warn("%.*s is not a supported device option\n",
len, str);
rc = -EINVAL;
}
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index ab0b243a947d..6cc4b19acf85 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -79,27 +79,6 @@ config SCLP_VT220_CONSOLE
Include support for using an IBM SCLP VT220-compatible terminal as a
Linux system console.
-config SCLP_ASYNC
- def_tristate m
- prompt "Support for Call Home via Asynchronous SCLP Records"
- depends on S390
- help
- This option enables the call home function, which is able to inform
- the service element and connected organisations about a kernel panic.
- You should only select this option if you know what you are doing,
- want for inform other people about your kernel panics,
- need this feature and intend to run your kernel in LPAR.
-
-config SCLP_ASYNC_ID
- string "Component ID for Call Home"
- depends on SCLP_ASYNC
- default "000000000"
- help
- The Component ID for Call Home is used to identify the correct
- problem reporting queue the call home records should be sent to.
-
- If your are unsure, please use the default value "000000000".
-
config HMC_DRV
def_tristate m
prompt "Support for file transfers from HMC drive CD/DVD-ROM"
@@ -205,4 +184,3 @@ config S390_VMUR
depends on S390
help
Character device driver for z/VM reader, puncher and printer.
-
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 3072b89785dd..b8a8816d94e7 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_TN3215) += con3215.o
obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
-obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
obj-$(CONFIG_PCI) += sclp_pci.o
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
deleted file mode 100644
index e69b12a40636..000000000000
--- a/drivers/s390/char/sclp_async.c
+++ /dev/null
@@ -1,189 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Enable Asynchronous Notification via SCLP.
- *
- * Copyright IBM Corp. 2009
- * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/kmod.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/proc_fs.h>
-#include <linux/sysctl.h>
-#include <linux/utsname.h>
-#include "sclp.h"
-
-static int callhome_enabled;
-static struct sclp_req *request;
-static struct sclp_async_sccb *sccb;
-static int sclp_async_send_wait(char *message);
-static struct ctl_table_header *callhome_sysctl_header;
-static DEFINE_SPINLOCK(sclp_async_lock);
-#define SCLP_NORMAL_WRITE 0x00
-
-struct async_evbuf {
- struct evbuf_header header;
- u64 reserved;
- u8 rflags;
- u8 empty;
- u8 rtype;
- u8 otype;
- char comp_id[12];
- char data[3000]; /* there is still some space left */
-} __attribute__((packed));
-
-struct sclp_async_sccb {
- struct sccb_header header;
- struct async_evbuf evbuf;
-} __attribute__((packed));
-
-static struct sclp_register sclp_async_register = {
- .send_mask = EVTYP_ASYNC_MASK,
-};
-
-static int call_home_on_panic(struct notifier_block *self,
- unsigned long event, void *data)
-{
- strncat(data, init_utsname()->nodename,
- sizeof(init_utsname()->nodename));
- sclp_async_send_wait(data);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block call_home_panic_nb = {
- .notifier_call = call_home_on_panic,
- .priority = INT_MAX,
-};
-
-static int zero;
-static int one = 1;
-
-static struct ctl_table callhome_table[] = {
- {
- .procname = "callhome",
- .data = &callhome_enabled,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
- },
- {}
-};
-
-static struct ctl_table kern_dir_table[] = {
- {
- .procname = "kernel",
- .maxlen = 0,
- .mode = 0555,
- .child = callhome_table,
- },
- {}
-};
-
-/*
- * Function used to transfer asynchronous notification
- * records which waits for send completion
- */
-static int sclp_async_send_wait(char *message)
-{
- struct async_evbuf *evb;
- int rc;
- unsigned long flags;
-
- if (!callhome_enabled)
- return 0;
- sccb->evbuf.header.type = EVTYP_ASYNC;
- sccb->evbuf.rtype = 0xA5;
- sccb->evbuf.otype = 0x00;
- evb = &sccb->evbuf;
- request->command = SCLP_CMDW_WRITE_EVENT_DATA;
- request->sccb = sccb;
- request->status = SCLP_REQ_FILLED;
- strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
- /*
- * Retain Queue
- * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
- */
- strncpy(sccb->evbuf.comp_id, CONFIG_SCLP_ASYNC_ID,
- sizeof(sccb->evbuf.comp_id));
- sccb->evbuf.header.length = sizeof(sccb->evbuf);
- sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
- sccb->header.function_code = SCLP_NORMAL_WRITE;
- rc = sclp_add_request(request);
- if (rc)
- return rc;
- spin_lock_irqsave(&sclp_async_lock, flags);
- while (request->status != SCLP_REQ_DONE &&
- request->status != SCLP_REQ_FAILED) {
- sclp_sync_wait();
- }
- spin_unlock_irqrestore(&sclp_async_lock, flags);
- if (request->status != SCLP_REQ_DONE)
- return -EIO;
- rc = ((struct sclp_async_sccb *)
- request->sccb)->header.response_code;
- if (rc != 0x0020)
- return -EIO;
- if (evb->header.flags != 0x80)
- return -EIO;
- return rc;
-}
-
-static int __init sclp_async_init(void)
-{
- int rc;
-
- rc = sclp_register(&sclp_async_register);
- if (rc)
- return rc;
- rc = -EOPNOTSUPP;
- if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
- goto out_sclp;
- rc = -ENOMEM;
- callhome_sysctl_header = register_sysctl_table(kern_dir_table);
- if (!callhome_sysctl_header)
- goto out_sclp;
- request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
- sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!request || !sccb)
- goto out_mem;
- rc = atomic_notifier_chain_register(&panic_notifier_list,
- &call_home_panic_nb);
- if (!rc)
- goto out;
-out_mem:
- kfree(request);
- free_page((unsigned long) sccb);
- unregister_sysctl_table(callhome_sysctl_header);
-out_sclp:
- sclp_unregister(&sclp_async_register);
-out:
- return rc;
-}
-module_init(sclp_async_init);
-
-static void __exit sclp_async_exit(void)
-{
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &call_home_panic_nb);
- unregister_sysctl_table(callhome_sysctl_header);
- sclp_unregister(&sclp_async_register);
- free_page((unsigned long) sccb);
- kfree(request);
-}
-module_exit(sclp_async_exit);
-
-MODULE_AUTHOR("Copyright IBM Corp. 2009");
-MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 405a60538630..08f812475f5e 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -4,7 +4,7 @@
* dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
* dump format as s390 standalone dumps.
*
- * For more information please refer to Documentation/s390/zfcpdump.txt
+ * For more information please refer to Documentation/s390/zfcpdump.rst
*
* Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 4534afc63591..427b2e24a8ce 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -16,9 +16,11 @@
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/slab.h>
+#include <linux/dmapool.h>
#include <asm/airq.h>
#include <asm/isc.h>
+#include <asm/cio.h>
#include "cio.h"
#include "cio_debug.h"
@@ -27,7 +29,7 @@
static DEFINE_SPINLOCK(airq_lists_lock);
static struct hlist_head airq_lists[MAX_ISC+1];
-static struct kmem_cache *airq_iv_cache;
+static struct dma_pool *airq_iv_cache;
/**
* register_adapter_interrupt() - register adapter interrupt handler
@@ -115,6 +117,11 @@ void __init init_airq_interrupts(void)
setup_irq(THIN_INTERRUPT, &airq_interrupt);
}
+static inline unsigned long iv_size(unsigned long bits)
+{
+ return BITS_TO_LONGS(bits) * sizeof(unsigned long);
+}
+
/**
* airq_iv_create - create an interrupt vector
* @bits: number of bits in the interrupt vector
@@ -132,17 +139,19 @@ struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
goto out;
iv->bits = bits;
iv->flags = flags;
- size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
+ size = iv_size(bits);
if (flags & AIRQ_IV_CACHELINE) {
- if ((cache_line_size() * BITS_PER_BYTE) < bits)
+ if ((cache_line_size() * BITS_PER_BYTE) < bits
+ || !airq_iv_cache)
goto out_free;
- iv->vector = kmem_cache_zalloc(airq_iv_cache, GFP_KERNEL);
+ iv->vector = dma_pool_zalloc(airq_iv_cache, GFP_KERNEL,
+ &iv->vector_dma);
if (!iv->vector)
goto out_free;
} else {
- iv->vector = kzalloc(size, GFP_KERNEL);
+ iv->vector = cio_dma_zalloc(size);
if (!iv->vector)
goto out_free;
}
@@ -178,10 +187,10 @@ out_free:
kfree(iv->ptr);
kfree(iv->bitlock);
kfree(iv->avail);
- if (iv->flags & AIRQ_IV_CACHELINE)
- kmem_cache_free(airq_iv_cache, iv->vector);
+ if (iv->flags & AIRQ_IV_CACHELINE && iv->vector)
+ dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
else
- kfree(iv->vector);
+ cio_dma_free(iv->vector, size);
kfree(iv);
out:
return NULL;
@@ -198,9 +207,9 @@ void airq_iv_release(struct airq_iv *iv)
kfree(iv->ptr);
kfree(iv->bitlock);
if (iv->flags & AIRQ_IV_CACHELINE)
- kmem_cache_free(airq_iv_cache, iv->vector);
+ dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
else
- kfree(iv->vector);
+ cio_dma_free(iv->vector, iv_size(iv->bits));
kfree(iv->avail);
kfree(iv);
}
@@ -295,12 +304,12 @@ unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
}
EXPORT_SYMBOL(airq_iv_scan);
-static int __init airq_init(void)
+int __init airq_init(void)
{
- airq_iv_cache = kmem_cache_create("airq_iv_cache", cache_line_size(),
- cache_line_size(), 0, NULL);
+ airq_iv_cache = dma_pool_create("airq_iv_cache", cio_get_dma_css_dev(),
+ cache_line_size(),
+ cache_line_size(), PAGE_SIZE);
if (!airq_iv_cache)
return -ENOMEM;
return 0;
}
-subsys_initcall(airq_init);
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 603268a33ea1..73582a0a2622 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -63,7 +63,7 @@ static void ccwreq_stop(struct ccw_device *cdev, int rc)
return;
req->done = 1;
ccw_device_set_timeout(cdev, 0);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
if (rc && rc != -ENODEV && req->drc)
rc = req->drc;
req->callback(cdev, req->data, rc);
@@ -86,7 +86,7 @@ static void ccwreq_do(struct ccw_device *cdev)
continue;
}
/* Perform start function. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
rc = cio_start(sch, cp, (u8) req->mask);
if (rc == 0) {
/* I/O started successfully. */
@@ -169,7 +169,7 @@ int ccw_request_cancel(struct ccw_device *cdev)
*/
static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
{
- struct irb *irb = &cdev->private->irb;
+ struct irb *irb = &cdev->private->dma_area->irb;
struct cmd_scsw *scsw = &irb->scsw.cmd;
enum uc_todo todo;
@@ -187,7 +187,8 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
CIO_TRACE_EVENT(2, "sensedata");
CIO_HEX_EVENT(2, &cdev->private->dev_id,
sizeof(struct ccw_dev_id));
- CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
+ CIO_HEX_EVENT(2, &cdev->private->dma_area->irb.ecw,
+ SENSE_MAX_COUNT);
/* Check for command reject. */
if (irb->ecw[0] & SNS0_CMD_REJECT)
return IO_REJECTED;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a835b31aad99..6392a1b95b02 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -323,36 +323,6 @@ struct chsc_sei {
} __packed __aligned(PAGE_SIZE);
/*
- * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
- */
-
-#define ND_VALIDITY_VALID 0
-#define ND_VALIDITY_OUTDATED 1
-#define ND_VALIDITY_INVALID 2
-
-struct node_descriptor {
- /* Flags. */
- union {
- struct {
- u32 validity:3;
- u32 reserved:5;
- } __packed;
- u8 byte0;
- } __packed;
-
- /* Node parameters. */
- u32 params:24;
-
- /* Node ID. */
- char type[6];
- char model[3];
- char manufacturer[3];
- char plant[2];
- char seq[12];
- u16 tag;
-} __packed;
-
-/*
* Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
*/
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 06a91743335a..ba7d2480613b 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -113,6 +113,7 @@ struct subchannel {
enum sch_todo todo;
struct work_struct todo_work;
struct schib_config config;
+ char *driver_override; /* Driver name to force a match */
} __attribute__ ((aligned(8)));
DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
@@ -135,6 +136,8 @@ extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
+extern int __init airq_init(void);
+
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index aea502922646..e1f2d0eed544 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -20,6 +20,8 @@
#include <linux/reboot.h>
#include <linux/suspend.h>
#include <linux/proc_fs.h>
+#include <linux/genalloc.h>
+#include <linux/dma-mapping.h>
#include <asm/isc.h>
#include <asm/crw.h>
@@ -165,6 +167,7 @@ static void css_subchannel_release(struct device *dev)
sch->config.intparm = 0;
cio_commit_config(sch);
+ kfree(sch->driver_override);
kfree(sch->lock);
kfree(sch);
}
@@ -224,6 +227,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
INIT_WORK(&sch->todo_work, css_sch_todo);
sch->dev.release = &css_subchannel_release;
device_initialize(&sch->dev);
+ /*
+ * The physical addresses of some the dma structures that can
+ * belong to a subchannel need to fit 31 bit width (e.g. ccw).
+ */
+ sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
+ sch->dev.dma_mask = &sch->dev.coherent_dma_mask;
return sch;
err:
@@ -315,9 +324,57 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(modalias);
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ char *driver_override, *old, *cp;
+
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ device_lock(dev);
+ old = sch->driver_override;
+ if (strlen(driver_override)) {
+ sch->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ sch->driver_override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
+ device_unlock(dev);
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
static struct attribute *subch_attrs[] = {
&dev_attr_type.attr,
&dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
NULL,
};
@@ -899,6 +956,13 @@ static int __init setup_css(int nr)
dev_set_name(&css->device, "css%x", nr);
css->device.groups = cssdev_attr_groups;
css->device.release = channel_subsystem_release;
+ /*
+ * We currently allocate notifier bits with this (using
+ * css->device as the device argument with the DMA API)
+ * and are fine with 64 bit addresses.
+ */
+ css->device.coherent_dma_mask = DMA_BIT_MASK(64);
+ css->device.dma_mask = &css->device.coherent_dma_mask;
mutex_init(&css->mutex);
css->cssid = chsc_get_cssid(nr);
@@ -1018,6 +1082,111 @@ static struct notifier_block css_power_notifier = {
.notifier_call = css_power_event,
};
+#define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
+static struct gen_pool *cio_dma_pool;
+
+/* Currently cio supports only a single css */
+struct device *cio_get_dma_css_dev(void)
+{
+ return &channel_subsystems[0]->device;
+}
+
+struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
+{
+ struct gen_pool *gp_dma;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ int i;
+
+ gp_dma = gen_pool_create(3, -1);
+ if (!gp_dma)
+ return NULL;
+ for (i = 0; i < nr_pages; ++i) {
+ cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
+ CIO_DMA_GFP);
+ if (!cpu_addr)
+ return gp_dma;
+ gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
+ dma_addr, PAGE_SIZE, -1);
+ }
+ return gp_dma;
+}
+
+static void __gp_dma_free_dma(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk, void *data)
+{
+ size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
+
+ dma_free_coherent((struct device *) data, chunk_size,
+ (void *) chunk->start_addr,
+ (dma_addr_t) chunk->phys_addr);
+}
+
+void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
+{
+ if (!gp_dma)
+ return;
+ /* this is quite ugly but no better idea */
+ gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
+ gen_pool_destroy(gp_dma);
+}
+
+static int cio_dma_pool_init(void)
+{
+ /* No need to free up the resources: compiled in */
+ cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
+ if (!cio_dma_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ dma_addr_t dma_addr;
+ unsigned long addr;
+ size_t chunk_size;
+
+ if (!gp_dma)
+ return NULL;
+ addr = gen_pool_alloc(gp_dma, size);
+ while (!addr) {
+ chunk_size = round_up(size, PAGE_SIZE);
+ addr = (unsigned long) dma_alloc_coherent(dma_dev,
+ chunk_size, &dma_addr, CIO_DMA_GFP);
+ if (!addr)
+ return NULL;
+ gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
+ addr = gen_pool_alloc(gp_dma, size);
+ }
+ return (void *) addr;
+}
+
+void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
+{
+ if (!cpu_addr)
+ return;
+ memset(cpu_addr, 0, size);
+ gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
+}
+
+/*
+ * Allocate dma memory from the css global pool. Intended for memory not
+ * specific to any single device within the css. The allocated memory
+ * is not guaranteed to be 31-bit addressable.
+ *
+ * Caution: Not suitable for early stuff like console.
+ */
+void *cio_dma_zalloc(size_t size)
+{
+ return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
+}
+
+void cio_dma_free(void *cpu_addr, size_t size)
+{
+ cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
+}
+
/*
* Now that the driver core is running, we can setup our channel subsystem.
* The struct subchannel's are created during probing.
@@ -1059,16 +1228,22 @@ static int __init css_bus_init(void)
if (ret)
goto out_unregister;
ret = register_pm_notifier(&css_power_notifier);
- if (ret) {
- unregister_reboot_notifier(&css_reboot_notifier);
- goto out_unregister;
- }
+ if (ret)
+ goto out_unregister_rn;
+ ret = cio_dma_pool_init();
+ if (ret)
+ goto out_unregister_pmn;
+ airq_init();
css_init_done = 1;
/* Enable default isc for I/O subchannels. */
isc_register(IO_SCH_ISC);
return 0;
+out_unregister_pmn:
+ unregister_pm_notifier(&css_power_notifier);
+out_unregister_rn:
+ unregister_reboot_notifier(&css_reboot_notifier);
out_unregister:
while (i-- > 0) {
struct channel_subsystem *css = channel_subsystems[i];
@@ -1222,6 +1397,10 @@ static int css_bus_match(struct device *dev, struct device_driver *drv)
struct css_driver *driver = to_cssdriver(drv);
struct css_device_id *id;
+ /* When driver_override is set, only bind to the matching driver */
+ if (sch->driver_override && strcmp(sch->driver_override, drv->name))
+ return 0;
+
for (id = driver->subchannel_type; id->match_flags; id++) {
if (sch->st == id->type)
return 1;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 1540229a37bb..9985b7484a6b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -24,6 +24,7 @@
#include <linux/timer.h>
#include <linux/kernel_stat.h>
#include <linux/sched/signal.h>
+#include <linux/dma-mapping.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -687,6 +688,9 @@ ccw_device_release(struct device *dev)
struct ccw_device *cdev;
cdev = to_ccwdev(dev);
+ cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
+ sizeof(*cdev->private->dma_area));
+ cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
/* Release reference of parent subchannel. */
put_device(cdev->dev.parent);
kfree(cdev->private);
@@ -696,15 +700,33 @@ ccw_device_release(struct device *dev)
static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
{
struct ccw_device *cdev;
+ struct gen_pool *dma_pool;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
- if (cdev) {
- cdev->private = kzalloc(sizeof(struct ccw_device_private),
- GFP_KERNEL | GFP_DMA);
- if (cdev->private)
- return cdev;
- }
+ if (!cdev)
+ goto err_cdev;
+ cdev->private = kzalloc(sizeof(struct ccw_device_private),
+ GFP_KERNEL | GFP_DMA);
+ if (!cdev->private)
+ goto err_priv;
+ cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
+ cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask;
+ dma_pool = cio_gp_dma_create(&cdev->dev, 1);
+ if (!dma_pool)
+ goto err_dma_pool;
+ cdev->private->dma_pool = dma_pool;
+ cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
+ sizeof(*cdev->private->dma_area));
+ if (!cdev->private->dma_area)
+ goto err_dma_area;
+ return cdev;
+err_dma_area:
+ cio_gp_dma_destroy(dma_pool, &cdev->dev);
+err_dma_pool:
+ kfree(cdev->private);
+err_priv:
kfree(cdev);
+err_cdev:
return ERR_PTR(-ENOMEM);
}
@@ -884,7 +906,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
wake_up(&ccw_device_init_wq);
break;
case DEV_STATE_OFFLINE:
- /*
+ /*
* We can't register the device in interrupt context so
* we schedule a work item.
*/
@@ -1062,6 +1084,14 @@ static int io_subchannel_probe(struct subchannel *sch)
if (!io_priv)
goto out_schedule;
+ io_priv->dma_area = dma_alloc_coherent(&sch->dev,
+ sizeof(*io_priv->dma_area),
+ &io_priv->dma_area_dma, GFP_KERNEL);
+ if (!io_priv->dma_area) {
+ kfree(io_priv);
+ goto out_schedule;
+ }
+
set_io_private(sch, io_priv);
css_schedule_eval(sch->schid);
return 0;
@@ -1088,6 +1118,8 @@ static int io_subchannel_remove(struct subchannel *sch)
set_io_private(sch, NULL);
spin_unlock_irq(sch->lock);
out_free:
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
kfree(io_priv);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
@@ -1593,13 +1625,19 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
return ERR_CAST(sch);
io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
- if (!io_priv) {
- put_device(&sch->dev);
- return ERR_PTR(-ENOMEM);
- }
+ if (!io_priv)
+ goto err_priv;
+ io_priv->dma_area = dma_alloc_coherent(&sch->dev,
+ sizeof(*io_priv->dma_area),
+ &io_priv->dma_area_dma, GFP_KERNEL);
+ if (!io_priv->dma_area)
+ goto err_dma_area;
set_io_private(sch, io_priv);
cdev = io_subchannel_create_ccwdev(sch);
if (IS_ERR(cdev)) {
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
+ set_io_private(sch, NULL);
put_device(&sch->dev);
kfree(io_priv);
return cdev;
@@ -1607,6 +1645,12 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
cdev->drv = drv;
ccw_device_set_int_class(cdev);
return cdev;
+
+err_dma_area:
+ kfree(io_priv);
+err_priv:
+ put_device(&sch->dev);
+ return ERR_PTR(-ENOMEM);
}
void __init ccw_device_destroy_console(struct ccw_device *cdev)
@@ -1617,6 +1661,8 @@ void __init ccw_device_destroy_console(struct ccw_device *cdev)
set_io_private(sch, NULL);
put_device(&sch->dev);
put_device(&cdev->dev);
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
kfree(io_priv);
}
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 9169af7dbb43..8fc267324ebb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -67,8 +67,10 @@ static void ccw_timeout_log(struct ccw_device *cdev)
sizeof(struct tcw), 0);
} else {
printk(KERN_WARNING "cio: orb indicates command mode\n");
- if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
- (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
+ if ((void *)(addr_t)orb->cmd.cpa ==
+ &private->dma_area->sense_ccw ||
+ (void *)(addr_t)orb->cmd.cpa ==
+ cdev->private->dma_area->iccws)
printk(KERN_WARNING "cio: last channel program "
"(intern):\n");
else
@@ -143,18 +145,22 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
void ccw_device_update_sense_data(struct ccw_device *cdev)
{
memset(&cdev->id, 0, sizeof(cdev->id));
- cdev->id.cu_type = cdev->private->senseid.cu_type;
- cdev->id.cu_model = cdev->private->senseid.cu_model;
- cdev->id.dev_type = cdev->private->senseid.dev_type;
- cdev->id.dev_model = cdev->private->senseid.dev_model;
+ cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type;
+ cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model;
+ cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type;
+ cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model;
}
int ccw_device_test_sense_data(struct ccw_device *cdev)
{
- return cdev->id.cu_type == cdev->private->senseid.cu_type &&
- cdev->id.cu_model == cdev->private->senseid.cu_model &&
- cdev->id.dev_type == cdev->private->senseid.dev_type &&
- cdev->id.dev_model == cdev->private->senseid.dev_model;
+ return cdev->id.cu_type ==
+ cdev->private->dma_area->senseid.cu_type &&
+ cdev->id.cu_model ==
+ cdev->private->dma_area->senseid.cu_model &&
+ cdev->id.dev_type ==
+ cdev->private->dma_area->senseid.dev_type &&
+ cdev->id.dev_model ==
+ cdev->private->dma_area->senseid.dev_model;
}
/*
@@ -342,7 +348,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
cio_disable_subchannel(sch);
/* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
cdev->private->state = state;
@@ -509,13 +515,14 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
- create_fake_irb(&cdev->private->irb,
+ create_fake_irb(&cdev->private->dma_area->irb,
cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
- &cdev->private->irb);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ &cdev->private->dma_area->irb);
+ memset(&cdev->private->dma_area->irb, 0,
+ sizeof(struct irb));
}
ccw_device_report_path_events(cdev);
ccw_device_handle_broken_paths(cdev);
@@ -672,7 +679,8 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
if (scsw_actl(&sch->schib.scsw) != 0 ||
(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
- (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
+ (scsw_stctl(&cdev->private->dma_area->irb.scsw) &
+ SCSW_STCTL_STATUS_PEND)) {
/*
* No final status yet or final status not yet delivered
* to the device driver. Can't do path verification now,
@@ -719,7 +727,7 @@ static int ccw_device_call_handler(struct ccw_device *cdev)
* - fast notification was requested (primary status)
* - unsolicited interrupts
*/
- stctl = scsw_stctl(&cdev->private->irb.scsw);
+ stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw);
ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
(stctl == SCSW_STCTL_STATUS_PEND);
@@ -735,9 +743,9 @@ static int ccw_device_call_handler(struct ccw_device *cdev)
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
- &cdev->private->irb);
+ &cdev->private->dma_area->irb);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
return 1;
}
@@ -759,7 +767,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
/* Unit check but no sense data. Need basic sense. */
if (ccw_device_do_sense(cdev, irb) != 0)
goto call_handler_unsol;
- memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+ memcpy(&cdev->private->dma_area->irb, irb,
+ sizeof(struct irb));
cdev->private->state = DEV_STATE_W4SENSE;
cdev->private->intparm = 0;
return;
@@ -842,7 +851,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
if (scsw_fctl(&irb->scsw) &
(SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
cdev->private->flags.dosense = 0;
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb);
goto call_handler;
}
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index f6df83a9dfbb..740996d0dc8c 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -99,7 +99,7 @@ static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
static int diag210_get_dev_info(struct ccw_device *cdev)
{
struct ccw_dev_id *dev_id = &cdev->private->dev_id;
- struct senseid *senseid = &cdev->private->senseid;
+ struct senseid *senseid = &cdev->private->dma_area->senseid;
struct diag210 diag_data;
int rc;
@@ -134,8 +134,10 @@ err_failed:
static void snsid_init(struct ccw_device *cdev)
{
cdev->private->flags.esid = 0;
- memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
- cdev->private->senseid.cu_type = 0xffff;
+
+ memset(&cdev->private->dma_area->senseid, 0,
+ sizeof(cdev->private->dma_area->senseid));
+ cdev->private->dma_area->senseid.cu_type = 0xffff;
}
/*
@@ -143,16 +145,16 @@ static void snsid_init(struct ccw_device *cdev)
*/
static int snsid_check(struct ccw_device *cdev, void *data)
{
- struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
+ struct cmd_scsw *scsw = &cdev->private->dma_area->irb.scsw.cmd;
int len = sizeof(struct senseid) - scsw->count;
/* Check for incomplete SENSE ID data. */
if (len < SENSE_ID_MIN_LEN)
goto out_restart;
- if (cdev->private->senseid.cu_type == 0xffff)
+ if (cdev->private->dma_area->senseid.cu_type == 0xffff)
goto out_restart;
/* Check for incompatible SENSE ID data. */
- if (cdev->private->senseid.reserved != 0xff)
+ if (cdev->private->dma_area->senseid.reserved != 0xff)
return -EOPNOTSUPP;
/* Check for extended-identification information. */
if (len > SENSE_ID_BASIC_LEN)
@@ -170,7 +172,7 @@ out_restart:
static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
{
struct ccw_dev_id *id = &cdev->private->dev_id;
- struct senseid *senseid = &cdev->private->senseid;
+ struct senseid *senseid = &cdev->private->dma_area->senseid;
int vm = 0;
if (rc && MACHINE_IS_VM) {
@@ -200,7 +202,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
CIO_TRACE_EVENT(4, "snsid");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
@@ -208,7 +210,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev)
snsid_init(cdev);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_ID;
- cp->cda = (u32) (addr_t) &cdev->private->senseid;
+ cp->cda = (u32) (addr_t) &cdev->private->dma_area->senseid;
cp->count = sizeof(struct senseid);
cp->flags = CCW_FLAG_SLI;
/* Request setup. */
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 4435ae0b3027..d722458c5928 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -429,8 +429,8 @@ struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
if (cdev->private->flags.esid == 0)
return NULL;
for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
- if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
- return cdev->private->senseid.ciw + ciw_cnt;
+ if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
+ return cdev->private->dma_area->senseid.ciw + ciw_cnt;
return NULL;
}
@@ -699,6 +699,23 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
}
EXPORT_SYMBOL_GPL(ccw_device_get_schid);
+/*
+ * Allocate zeroed dma coherent 31 bit addressable memory using
+ * the subchannels dma pool. Maximal size of allocation supported
+ * is PAGE_SIZE.
+ */
+void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
+{
+ return cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
+}
+EXPORT_SYMBOL(ccw_device_dma_zalloc);
+
+void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size)
+{
+ cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size);
+}
+EXPORT_SYMBOL(ccw_device_dma_free);
+
EXPORT_SYMBOL(ccw_device_set_options_mask);
EXPORT_SYMBOL(ccw_device_set_options);
EXPORT_SYMBOL(ccw_device_clear_options);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index d30a3babf176..767a85635a0f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -57,7 +57,7 @@ out:
static void nop_build_cp(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
cp->cmd_code = CCW_CMD_NOOP;
cp->cda = 0;
@@ -134,9 +134,9 @@ err:
static void spid_build_cp(struct ccw_device *cdev, u8 fn)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
int i = pathmask_to_pos(req->lpm);
- struct pgid *pgid = &cdev->private->pgid[i];
+ struct pgid *pgid = &cdev->private->dma_area->pgid[i];
pgid->inf.fc = fn;
cp->cmd_code = CCW_CMD_SET_PGID;
@@ -300,7 +300,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
int *mismatch, u8 *reserved, u8 *reset)
{
- struct pgid *pgid = &cdev->private->pgid[0];
+ struct pgid *pgid = &cdev->private->dma_area->pgid[0];
struct pgid *first = NULL;
int lpm;
int i;
@@ -342,7 +342,7 @@ static u8 pgid_to_donepm(struct ccw_device *cdev)
lpm = 0x80 >> i;
if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
- pgid = &cdev->private->pgid[i];
+ pgid = &cdev->private->dma_area->pgid[i];
if (sch->opm & lpm) {
if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
continue;
@@ -368,7 +368,8 @@ static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
int i;
for (i = 0; i < 8; i++)
- memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
+ memcpy(&cdev->private->dma_area->pgid[i], pgid,
+ sizeof(struct pgid));
}
/*
@@ -435,12 +436,12 @@ out:
static void snid_build_cp(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
int i = pathmask_to_pos(req->lpm);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_PGID;
- cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
+ cp->cda = (u32) (addr_t) &cdev->private->dma_area->pgid[i];
cp->count = sizeof(struct pgid);
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
@@ -516,7 +517,8 @@ static void verify_start(struct ccw_device *cdev)
sch->lpm = sch->schib.pmcw.pam;
/* Initialize PGID data. */
- memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+ memset(cdev->private->dma_area->pgid, 0,
+ sizeof(cdev->private->dma_area->pgid));
cdev->private->pgid_valid_mask = 0;
cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
cdev->private->path_notoper_mask = 0;
@@ -626,7 +628,7 @@ struct stlck_data {
static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
cp[0].cmd_code = CCW_CMD_STLCK;
cp[0].cda = (u32) (addr_t) buf1;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 7d5c7892b2c4..0bd8f2642732 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -79,15 +79,15 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
* are condition that have to be met for the extended control
* bit to have meaning. Sick.
*/
- cdev->private->irb.scsw.cmd.ectl = 0;
+ cdev->private->dma_area->irb.scsw.cmd.ectl = 0;
if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
!(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
- cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
+ cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
/* Check if extended control word is valid. */
- if (!cdev->private->irb.scsw.cmd.ectl)
+ if (!cdev->private->dma_area->irb.scsw.cmd.ectl)
return;
/* Copy concurrent sense / model dependent information. */
- memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
+ memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw));
}
/*
@@ -118,7 +118,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
if (!ccw_device_accumulate_esw_valid(irb))
return;
- cdev_irb = &cdev->private->irb;
+ cdev_irb = &cdev->private->dma_area->irb;
/* Copy last path used mask. */
cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
@@ -210,7 +210,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
ccw_device_path_notoper(cdev);
/* No irb accumulation for transport mode irbs. */
if (scsw_is_tm(&irb->scsw)) {
- memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+ memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb));
return;
}
/*
@@ -219,7 +219,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
if (!scsw_is_solicited(&irb->scsw))
return;
- cdev_irb = &cdev->private->irb;
+ cdev_irb = &cdev->private->dma_area->irb;
/*
* If the clear function had been performed, all formerly pending
@@ -227,7 +227,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
* intermediate accumulated status to the device driver.
*/
if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
/* Copy bits which are valid only for the start function. */
if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
@@ -329,9 +329,9 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
/*
* We have ending status but no sense information. Do a basic sense.
*/
- sense_ccw = &to_io_private(sch)->sense_ccw;
+ sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
- sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
+ sense_ccw->cda = (__u32) __pa(cdev->private->dma_area->irb.ecw);
sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI;
@@ -364,7 +364,7 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
- cdev->private->irb.esw.esw0.erw.cons = 1;
+ cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
}
/* Check if path verification is required. */
@@ -386,7 +386,7 @@ ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
/* Check for basic sense. */
if (cdev->private->flags.dosense &&
!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
- cdev->private->irb.esw.esw0.erw.cons = 1;
+ cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
return 0;
}
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 90e4e3a7841b..c03b4a19974e 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -9,15 +9,20 @@
#include "css.h"
#include "orb.h"
+struct io_subchannel_dma_area {
+ struct ccw1 sense_ccw; /* static ccw for sense command */
+};
+
struct io_subchannel_private {
union orb orb; /* operation request block */
- struct ccw1 sense_ccw; /* static ccw for sense command */
struct ccw_device *cdev;/* pointer to the child ccw device */
struct {
unsigned int suspend:1; /* allow suspend */
unsigned int prefetch:1;/* deny prefetch */
unsigned int inter:1; /* suppress intermediate interrupts */
} __packed options;
+ struct io_subchannel_dma_area *dma_area;
+ dma_addr_t dma_area_dma;
} __aligned(8);
#define to_io_private(n) ((struct io_subchannel_private *) \
@@ -115,6 +120,13 @@ enum cdev_todo {
#define FAKE_CMD_IRB 1
#define FAKE_TM_IRB 2
+struct ccw_device_dma_area {
+ struct senseid senseid; /* SenseID info */
+ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
+ struct irb irb; /* device status */
+ struct pgid pgid[8]; /* path group IDs per chpid*/
+};
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
@@ -156,11 +168,7 @@ struct ccw_device_private {
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
- struct irb irb; /* device status */
int async_kill_io_rc;
- struct senseid senseid; /* SenseID info */
- struct pgid pgid[8]; /* path group IDs per chpid*/
- struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
struct work_struct todo_work;
enum cdev_todo todo;
wait_queue_head_t wait_q;
@@ -169,6 +177,8 @@ struct ccw_device_private {
struct list_head cmb_list; /* list of measured devices */
u64 cmb_start_time; /* clock value of cmb reset */
void *cmb_wait; /* deferred cmb enable/disable */
+ struct gen_pool *dma_pool;
+ struct ccw_device_dma_area *dma_area;
enum interruption_class int_class;
};
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 7b7620de2acd..730c4e68094b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -736,6 +736,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
+ case SLSB_P_OUTPUT_PENDING:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
"out empty:%1d %02x", q->nr, count);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 99d7d2566a3a..d4101cecdc8d 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -150,6 +150,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
return -ENOMEM;
}
irq_ptr_qs[i] = q;
+ INIT_LIST_HEAD(&q->entry);
}
return 0;
}
@@ -178,6 +179,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
+ INIT_LIST_HEAD(&q->entry);
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 28d59ac2204c..93ee067c10ca 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -79,7 +79,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
mutex_lock(&tiq_list_lock);
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
- xchg(irq_ptr->dsci, 1 << 7);
}
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
@@ -87,14 +86,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
struct qdio_q *q;
q = irq_ptr->input_qs[0];
- /* if establish triggered an error */
- if (!q || !q->entry.prev || !q->entry.next)
+ if (!q)
return;
mutex_lock(&tiq_list_lock);
list_del_rcu(&q->entry);
mutex_unlock(&tiq_list_lock);
synchronize_rcu();
+ INIT_LIST_HEAD(&q->entry);
}
static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
@@ -178,6 +177,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @airq: pointer to adapter interrupt descriptor
+ * @floating: flag to recognize floating vs. directed interrupts (unused)
*/
static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
{
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 0e79799e9a71..1d4c893ead23 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -16,12 +16,6 @@
#include "vfio_ccw_cp.h"
-/*
- * Max length for ccw chain.
- * XXX: Limit to 256, need to check more?
- */
-#define CCWCHAIN_LEN_MAX 256
-
struct pfn_array {
/* Starting guest physical I/O address. */
unsigned long pa_iova;
@@ -33,11 +27,6 @@ struct pfn_array {
int pa_nr;
};
-struct pfn_array_table {
- struct pfn_array *pat_pa;
- int pat_nr;
-};
-
struct ccwchain {
struct list_head next;
struct ccw1 *ch_ccw;
@@ -46,35 +35,29 @@ struct ccwchain {
/* Count of the valid ccws in chain. */
int ch_len;
/* Pinned PAGEs for the original data. */
- struct pfn_array_table *ch_pat;
+ struct pfn_array *ch_pa;
};
/*
- * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
+ * pfn_array_alloc() - alloc memory for PFNs
* @pa: pfn_array on which to perform the operation
- * @mdev: the mediated device to perform pin/unpin operations
* @iova: target guest physical address
* @len: number of bytes that should be pinned from @iova
*
- * Attempt to allocate memory for PFNs, and pin user pages in memory.
+ * Attempt to allocate memory for PFNs.
*
* Usage of pfn_array:
* We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
* this structure will be filled in by this function.
*
* Returns:
- * Number of pages pinned on success.
- * If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
- * returns -EINVAL.
- * If no pages were pinned, returns -errno.
+ * 0 if PFNs are allocated
+ * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL
+ * -ENOMEM if alloc failed
*/
-static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
- u64 iova, unsigned int len)
+static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
{
- int i, ret = 0;
-
- if (!len)
- return 0;
+ int i;
if (pa->pa_nr || pa->pa_iova_pfn)
return -EINVAL;
@@ -94,8 +77,27 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
- for (i = 1; i < pa->pa_nr; i++)
+ pa->pa_pfn[0] = -1ULL;
+ for (i = 1; i < pa->pa_nr; i++) {
pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
+ pa->pa_pfn[i] = -1ULL;
+ }
+
+ return 0;
+}
+
+/*
+ * pfn_array_pin() - Pin user pages in memory
+ * @pa: pfn_array on which to perform the operation
+ * @mdev: the mediated device to perform pin operations
+ *
+ * Returns number of pages pinned upon success.
+ * If the pin request partially succeeds, or fails completely,
+ * all pages are left unpinned and a negative error value is returned.
+ */
+static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
+{
+ int ret = 0;
ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
@@ -112,8 +114,6 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
err_out:
pa->pa_nr = 0;
- kfree(pa->pa_iova_pfn);
- pa->pa_iova_pfn = NULL;
return ret;
}
@@ -121,60 +121,30 @@ err_out:
/* Unpin the pages before releasing the memory. */
static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
{
- vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+ /* Only unpin if any pages were pinned to begin with */
+ if (pa->pa_nr)
+ vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
pa->pa_nr = 0;
kfree(pa->pa_iova_pfn);
}
-static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
-{
- pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
- if (unlikely(ZERO_OR_NULL_PTR(pat->pat_pa))) {
- pat->pat_nr = 0;
- return -ENOMEM;
- }
-
- pat->pat_nr = nr;
-
- return 0;
-}
-
-static void pfn_array_table_unpin_free(struct pfn_array_table *pat,
- struct device *mdev)
+static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
{
- int i;
-
- for (i = 0; i < pat->pat_nr; i++)
- pfn_array_unpin_free(pat->pat_pa + i, mdev);
-
- if (pat->pat_nr) {
- kfree(pat->pat_pa);
- pat->pat_pa = NULL;
- pat->pat_nr = 0;
- }
-}
-
-static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
- unsigned long iova)
-{
- struct pfn_array *pa = pat->pat_pa;
unsigned long iova_pfn = iova >> PAGE_SHIFT;
- int i, j;
+ int i;
- for (i = 0; i < pat->pat_nr; i++, pa++)
- for (j = 0; j < pa->pa_nr; j++)
- if (pa->pa_iova_pfn[j] == iova_pfn)
- return true;
+ for (i = 0; i < pa->pa_nr; i++)
+ if (pa->pa_iova_pfn[i] == iova_pfn)
+ return true;
return false;
}
-/* Create the list idal words for a pfn_array_table. */
-static inline void pfn_array_table_idal_create_words(
- struct pfn_array_table *pat,
+/* Create the list of IDAL words for a pfn_array. */
+static inline void pfn_array_idal_create_words(
+ struct pfn_array *pa,
unsigned long *idaws)
{
- struct pfn_array *pa;
- int i, j, k;
+ int i;
/*
* Idal words (execept the first one) rely on the memory being 4k
@@ -183,19 +153,36 @@ static inline void pfn_array_table_idal_create_words(
* there will be no problem here to simply use the phys to create an
* idaw.
*/
- k = 0;
- for (i = 0; i < pat->pat_nr; i++) {
- pa = pat->pat_pa + i;
- for (j = 0; j < pa->pa_nr; j++) {
- idaws[k] = pa->pa_pfn[j] << PAGE_SHIFT;
- if (k == 0)
- idaws[k] += pa->pa_iova & (PAGE_SIZE - 1);
- k++;
+
+ for (i = 0; i < pa->pa_nr; i++)
+ idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
+
+ /* Adjust the first IDAW, since it may not start on a page boundary */
+ idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
+}
+
+static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
+{
+ struct ccw0 ccw0;
+ struct ccw1 *pccw1 = source;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ ccw0 = *(struct ccw0 *)pccw1;
+ if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
+ pccw1->cmd_code = CCW_CMD_TIC;
+ pccw1->flags = 0;
+ pccw1->count = 0;
+ } else {
+ pccw1->cmd_code = ccw0.cmd_code;
+ pccw1->flags = ccw0.flags;
+ pccw1->count = ccw0.count;
}
+ pccw1->cda = ccw0.cda;
+ pccw1++;
}
}
-
/*
* Within the domain (@mdev), copy @n bytes from a guest physical
* address (@iova) to a host physical address (@to).
@@ -209,9 +196,15 @@ static long copy_from_iova(struct device *mdev,
int i, ret;
unsigned long l, m;
- ret = pfn_array_alloc_pin(&pa, mdev, iova, n);
- if (ret <= 0)
+ ret = pfn_array_alloc(&pa, iova, n);
+ if (ret < 0)
+ return ret;
+
+ ret = pfn_array_pin(&pa, mdev);
+ if (ret < 0) {
+ pfn_array_unpin_free(&pa, mdev);
return ret;
+ }
l = n;
for (i = 0; i < pa.pa_nr; i++) {
@@ -235,55 +228,60 @@ static long copy_from_iova(struct device *mdev,
return l;
}
-static long copy_ccw_from_iova(struct channel_program *cp,
- struct ccw1 *to, u64 iova,
- unsigned long len)
-{
- struct ccw0 ccw0;
- struct ccw1 *pccw1;
- int ret;
- int i;
-
- ret = copy_from_iova(cp->mdev, to, iova, len * sizeof(struct ccw1));
- if (ret)
- return ret;
-
- if (!cp->orb.cmd.fmt) {
- pccw1 = to;
- for (i = 0; i < len; i++) {
- ccw0 = *(struct ccw0 *)pccw1;
- if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
- pccw1->cmd_code = CCW_CMD_TIC;
- pccw1->flags = 0;
- pccw1->count = 0;
- } else {
- pccw1->cmd_code = ccw0.cmd_code;
- pccw1->flags = ccw0.flags;
- pccw1->count = ccw0.count;
- }
- pccw1->cda = ccw0.cda;
- pccw1++;
- }
- }
-
- return ret;
-}
-
/*
* Helpers to operate ccwchain.
*/
-#define ccw_is_test(_ccw) (((_ccw)->cmd_code & 0x0F) == 0)
+#define ccw_is_read(_ccw) (((_ccw)->cmd_code & 0x03) == 0x02)
+#define ccw_is_read_backward(_ccw) (((_ccw)->cmd_code & 0x0F) == 0x0C)
+#define ccw_is_sense(_ccw) (((_ccw)->cmd_code & 0x0F) == CCW_CMD_BASIC_SENSE)
#define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP)
#define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC)
#define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA)
-
+#define ccw_is_skip(_ccw) ((_ccw)->flags & CCW_FLAG_SKIP)
#define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
/*
+ * ccw_does_data_transfer()
+ *
+ * Determine whether a CCW will move any data, such that the guest pages
+ * would need to be pinned before performing the I/O.
+ *
+ * Returns 1 if yes, 0 if no.
+ */
+static inline int ccw_does_data_transfer(struct ccw1 *ccw)
+{
+ /* If the count field is zero, then no data will be transferred */
+ if (ccw->count == 0)
+ return 0;
+
+ /* If the command is a NOP, then no data will be transferred */
+ if (ccw_is_noop(ccw))
+ return 0;
+
+ /* If the skip flag is off, then data will be transferred */
+ if (!ccw_is_skip(ccw))
+ return 1;
+
+ /*
+ * If the skip flag is on, it is only meaningful if the command
+ * code is a read, read backward, sense, or sense ID. In those
+ * cases, no data will be transferred.
+ */
+ if (ccw_is_read(ccw) || ccw_is_read_backward(ccw))
+ return 0;
+
+ if (ccw_is_sense(ccw))
+ return 0;
+
+ /* The skip flag is on, but it is ignored for this command code. */
+ return 1;
+}
+
+/*
* is_cpa_within_range()
*
* @cpa: channel program address being questioned
@@ -319,7 +317,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
/* Make ccw address aligned to 8. */
size = ((sizeof(*chain) + 7L) & -8L) +
sizeof(*chain->ch_ccw) * len +
- sizeof(*chain->ch_pat) * len;
+ sizeof(*chain->ch_pa) * len;
chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
if (!chain)
return NULL;
@@ -328,7 +326,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
chain->ch_ccw = (struct ccw1 *)data;
data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
- chain->ch_pat = (struct pfn_array_table *)data;
+ chain->ch_pa = (struct pfn_array *)data;
chain->ch_len = len;
@@ -348,31 +346,12 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx)
{
struct ccw1 *ccw = chain->ch_ccw + idx;
- if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw))
- return;
- if (!ccw->count)
+ if (ccw_is_tic(ccw))
return;
kfree((void *)(u64)ccw->cda);
}
-/* Unpin the pages then free the memory resources. */
-static void cp_unpin_free(struct channel_program *cp)
-{
- struct ccwchain *chain, *temp;
- int i;
-
- cp->initialized = false;
- list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
- for (i = 0; i < chain->ch_len; i++) {
- pfn_array_table_unpin_free(chain->ch_pat + i,
- cp->mdev);
- ccwchain_cda_free(chain, i);
- }
- ccwchain_free(chain);
- }
-}
-
/**
* ccwchain_calc_length - calculate the length of the ccw chain.
* @iova: guest physical address of the target ccw chain
@@ -388,25 +367,9 @@ static void cp_unpin_free(struct channel_program *cp)
*/
static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
{
- struct ccw1 *ccw, *p;
- int cnt;
-
- /*
- * Copy current chain from guest to host kernel.
- * Currently the chain length is limited to CCWCHAIN_LEN_MAX (256).
- * So copying 2K is enough (safe).
- */
- p = ccw = kcalloc(CCWCHAIN_LEN_MAX, sizeof(*ccw), GFP_KERNEL);
- if (!ccw)
- return -ENOMEM;
-
- cnt = copy_ccw_from_iova(cp, ccw, iova, CCWCHAIN_LEN_MAX);
- if (cnt) {
- kfree(ccw);
- return cnt;
- }
+ struct ccw1 *ccw = cp->guest_cp;
+ int cnt = 0;
- cnt = 0;
do {
cnt++;
@@ -415,10 +378,8 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
* orb specified one of the unsupported formats, we defer
* checking for IDAWs in unsupported formats to here.
*/
- if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
- kfree(p);
+ if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
return -EOPNOTSUPP;
- }
/*
* We want to keep counting if the current CCW has the
@@ -437,7 +398,6 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
if (cnt == CCWCHAIN_LEN_MAX + 1)
cnt = -EINVAL;
- kfree(p);
return cnt;
}
@@ -458,17 +418,23 @@ static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
static int ccwchain_loop_tic(struct ccwchain *chain,
struct channel_program *cp);
-static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp)
+static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
{
struct ccwchain *chain;
- int len, ret;
+ int len;
- /* May transfer to an existing chain. */
- if (tic_target_chain_exists(tic, cp))
- return 0;
+ /* Copy 2K (the most we support today) of possible CCWs */
+ len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
+ CCWCHAIN_LEN_MAX * sizeof(struct ccw1));
+ if (len)
+ return len;
- /* Get chain length. */
- len = ccwchain_calc_length(tic->cda, cp);
+ /* Convert any Format-0 CCWs to Format-1 */
+ if (!cp->orb.cmd.fmt)
+ convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX);
+
+ /* Count the CCWs in the current chain */
+ len = ccwchain_calc_length(cda, cp);
if (len < 0)
return len;
@@ -476,14 +442,10 @@ static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp)
chain = ccwchain_alloc(cp, len);
if (!chain)
return -ENOMEM;
- chain->ch_iova = tic->cda;
+ chain->ch_iova = cda;
- /* Copy the new chain from user. */
- ret = copy_ccw_from_iova(cp, chain->ch_ccw, tic->cda, len);
- if (ret) {
- ccwchain_free(chain);
- return ret;
- }
+ /* Copy the actual CCWs into the new chain */
+ memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
/* Loop for tics on this new chain. */
return ccwchain_loop_tic(chain, cp);
@@ -501,7 +463,12 @@ static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
if (!ccw_is_tic(tic))
continue;
- ret = ccwchain_handle_tic(tic, cp);
+ /* May transfer to an existing chain. */
+ if (tic_target_chain_exists(tic, cp))
+ continue;
+
+ /* Build a ccwchain for the next segment */
+ ret = ccwchain_handle_ccw(tic->cda, cp);
if (ret)
return ret;
}
@@ -534,115 +501,90 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
struct channel_program *cp)
{
struct ccw1 *ccw;
- struct pfn_array_table *pat;
+ struct pfn_array *pa;
+ u64 iova;
unsigned long *idaws;
int ret;
+ int bytes = 1;
+ int idaw_nr, idal_len;
+ int i;
ccw = chain->ch_ccw + idx;
- if (!ccw->count) {
- /*
- * We just want the translation result of any direct ccw
- * to be an IDA ccw, so let's add the IDA flag for it.
- * Although the flag will be ignored by firmware.
- */
- ccw->flags |= CCW_FLAG_IDA;
- return 0;
- }
-
- /*
- * Pin data page(s) in memory.
- * The number of pages actually is the count of the idaws which will be
- * needed when translating a direct ccw to a idal ccw.
- */
- pat = chain->ch_pat + idx;
- ret = pfn_array_table_init(pat, 1);
- if (ret)
- goto out_init;
-
- ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
- if (ret < 0)
- goto out_unpin;
+ if (ccw->count)
+ bytes = ccw->count;
- /* Translate this direct ccw to a idal ccw. */
- idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
- if (!idaws) {
- ret = -ENOMEM;
- goto out_unpin;
+ /* Calculate size of IDAL */
+ if (ccw_is_idal(ccw)) {
+ /* Read first IDAW to see if it's 4K-aligned or not. */
+ /* All subsequent IDAws will be 4K-aligned. */
+ ret = copy_from_iova(cp->mdev, &iova, ccw->cda, sizeof(iova));
+ if (ret)
+ return ret;
+ } else {
+ iova = ccw->cda;
}
- ccw->cda = (__u32) virt_to_phys(idaws);
- ccw->flags |= CCW_FLAG_IDA;
-
- pfn_array_table_idal_create_words(pat, idaws);
-
- return 0;
-
-out_unpin:
- pfn_array_table_unpin_free(pat, cp->mdev);
-out_init:
- ccw->cda = 0;
- return ret;
-}
-
-static int ccwchain_fetch_idal(struct ccwchain *chain,
- int idx,
- struct channel_program *cp)
-{
- struct ccw1 *ccw;
- struct pfn_array_table *pat;
- unsigned long *idaws;
- u64 idaw_iova;
- unsigned int idaw_nr, idaw_len;
- int i, ret;
-
- ccw = chain->ch_ccw + idx;
-
- if (!ccw->count)
- return 0;
-
- /* Calculate size of idaws. */
- ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova));
- if (ret)
- return ret;
- idaw_nr = idal_nr_words((void *)(idaw_iova), ccw->count);
- idaw_len = idaw_nr * sizeof(*idaws);
-
- /* Pin data page(s) in memory. */
- pat = chain->ch_pat + idx;
- ret = pfn_array_table_init(pat, idaw_nr);
- if (ret)
- goto out_init;
+ idaw_nr = idal_nr_words((void *)iova, bytes);
+ idal_len = idaw_nr * sizeof(*idaws);
- /* Translate idal ccw to use new allocated idaws. */
- idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
+ /* Allocate an IDAL from host storage */
+ idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
if (!idaws) {
ret = -ENOMEM;
- goto out_unpin;
+ goto out_init;
}
- ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idaw_len);
- if (ret)
+ /*
+ * Allocate an array of pfn's for pages to pin/translate.
+ * The number of pages is actually the count of the idaws
+ * required for the data transfer, since we only only support
+ * 4K IDAWs today.
+ */
+ pa = chain->ch_pa + idx;
+ ret = pfn_array_alloc(pa, iova, bytes);
+ if (ret < 0)
goto out_free_idaws;
- ccw->cda = virt_to_phys(idaws);
+ if (ccw_is_idal(ccw)) {
+ /* Copy guest IDAL into host IDAL */
+ ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idal_len);
+ if (ret)
+ goto out_unpin;
- for (i = 0; i < idaw_nr; i++) {
- idaw_iova = *(idaws + i);
+ /*
+ * Copy guest IDAWs into pfn_array, in case the memory they
+ * occupy is not contiguous.
+ */
+ for (i = 0; i < idaw_nr; i++)
+ pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
+ } else {
+ /*
+ * No action is required here; the iova addresses in pfn_array
+ * were initialized sequentially in pfn_array_alloc() beginning
+ * with the contents of ccw->cda.
+ */
+ }
- ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev,
- idaw_iova, 1);
+ if (ccw_does_data_transfer(ccw)) {
+ ret = pfn_array_pin(pa, cp->mdev);
if (ret < 0)
- goto out_free_idaws;
+ goto out_unpin;
+ } else {
+ pa->pa_nr = 0;
}
- pfn_array_table_idal_create_words(pat, idaws);
+ ccw->cda = (__u32) virt_to_phys(idaws);
+ ccw->flags |= CCW_FLAG_IDA;
+
+ /* Populate the IDAL with pinned/translated addresses from pfn */
+ pfn_array_idal_create_words(pa, idaws);
return 0;
+out_unpin:
+ pfn_array_unpin_free(pa, cp->mdev);
out_free_idaws:
kfree(idaws);
-out_unpin:
- pfn_array_table_unpin_free(pat, cp->mdev);
out_init:
ccw->cda = 0;
return ret;
@@ -660,15 +602,9 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
{
struct ccw1 *ccw = chain->ch_ccw + idx;
- if (ccw_is_test(ccw) || ccw_is_noop(ccw))
- return 0;
-
if (ccw_is_tic(ccw))
return ccwchain_fetch_tic(chain, idx, cp);
- if (ccw_is_idal(ccw))
- return ccwchain_fetch_idal(chain, idx, cp);
-
return ccwchain_fetch_direct(chain, idx, cp);
}
@@ -691,9 +627,7 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
*/
int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
{
- u64 iova = orb->cmd.cpa;
- struct ccwchain *chain;
- int len, ret;
+ int ret;
/*
* XXX:
@@ -706,28 +640,11 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
memcpy(&cp->orb, orb, sizeof(*orb));
cp->mdev = mdev;
- /* Get chain length. */
- len = ccwchain_calc_length(iova, cp);
- if (len < 0)
- return len;
-
- /* Alloc mem for the head chain. */
- chain = ccwchain_alloc(cp, len);
- if (!chain)
- return -ENOMEM;
- chain->ch_iova = iova;
-
- /* Copy the head chain from guest. */
- ret = copy_ccw_from_iova(cp, chain->ch_ccw, iova, len);
- if (ret) {
- ccwchain_free(chain);
- return ret;
- }
-
- /* Now loop for its TICs. */
- ret = ccwchain_loop_tic(chain, cp);
+ /* Build a ccwchain for the first CCW segment */
+ ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
if (ret)
- cp_unpin_free(cp);
+ cp_free(cp);
+
/* It is safe to force: if not set but idals used
* ccwchain_calc_length returns an error.
*/
@@ -750,8 +667,20 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
*/
void cp_free(struct channel_program *cp)
{
- if (cp->initialized)
- cp_unpin_free(cp);
+ struct ccwchain *chain, *temp;
+ int i;
+
+ if (!cp->initialized)
+ return;
+
+ cp->initialized = false;
+ list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
+ for (i = 0; i < chain->ch_len; i++) {
+ pfn_array_unpin_free(chain->ch_pa + i, cp->mdev);
+ ccwchain_cda_free(chain, i);
+ }
+ ccwchain_free(chain);
+ }
}
/**
@@ -886,7 +815,11 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
*/
list_for_each_entry(chain, &cp->ccwchain_list, next) {
ccw_head = (u32)(u64)chain->ch_ccw;
- if (is_cpa_within_range(cpa, ccw_head, chain->ch_len)) {
+ /*
+ * On successful execution, cpa points just beyond the end
+ * of the chain.
+ */
+ if (is_cpa_within_range(cpa, ccw_head, chain->ch_len + 1)) {
/*
* (cpa - ccw_head) is the offset value of the host
* physical ccw to its chain head.
@@ -919,8 +852,7 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova)
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
- if (pfn_array_table_iova_pinned(chain->ch_pat + i,
- iova))
+ if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
return true;
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index 3c20cd208da5..7cdc38049033 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -16,6 +16,12 @@
#include "orb.h"
+/*
+ * Max length for ccw chain.
+ * XXX: Limit to 256, need to check more?
+ */
+#define CCWCHAIN_LEN_MAX 256
+
/**
* struct channel_program - manage information for channel program
* @ccwchain_list: list head of ccwchains
@@ -32,6 +38,7 @@ struct channel_program {
union orb orb;
struct device *mdev;
bool initialized;
+ struct ccw1 *guest_cp;
};
extern int cp_init(struct channel_program *cp, struct device *mdev,
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 9125f7f4e64c..2b90a5ecaeb9 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -95,11 +95,11 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
memcpy(private->io_region->irb_area, irb, sizeof(*irb));
mutex_unlock(&private->io_mutex);
- if (private->io_trigger)
- eventfd_signal(private->io_trigger, 1);
-
if (private->mdev && is_final)
private->state = VFIO_CCW_STATE_IDLE;
+
+ if (private->io_trigger)
+ eventfd_signal(private->io_trigger, 1);
}
/*
@@ -129,6 +129,11 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (!private)
return -ENOMEM;
+ private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
+ GFP_KERNEL);
+ if (!private->cp.guest_cp)
+ goto out_free;
+
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA);
if (!private->io_region)
@@ -169,6 +174,7 @@ out_free:
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
if (private->io_region)
kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ kfree(private->cp.guest_cp);
kfree(private);
return ret;
}
@@ -185,6 +191,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ kfree(private->cp.guest_cp);
kfree(private);
return 0;
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 45eb0c14b880..7f418d2d8cdf 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -690,7 +690,7 @@ int pkey_clr2protkey(u32 keytype,
*/
if (!cpacf_test_func(&pckmo_functions, fc)) {
DEBUG_ERR("%s pckmo functions not available\n", __func__);
- return -EOPNOTSUPP;
+ return -ENODEV;
}
/* prepare param block */
@@ -1695,15 +1695,15 @@ static int __init pkey_init(void)
* are able to work with protected keys.
*/
if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
- return -EOPNOTSUPP;
+ return -ENODEV;
/* check for kmc instructions available */
if (!cpacf_query(CPACF_KMC, &kmc_functions))
- return -EOPNOTSUPP;
+ return -ENODEV;
if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256))
- return -EOPNOTSUPP;
+ return -ENODEV;
pkey_debug_init();
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index e9824c35c34f..003662aa8060 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -5,6 +5,7 @@
* Copyright IBM Corp. 2018
*
* Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
*/
#include <linux/module.h>
@@ -40,14 +41,45 @@ static struct ap_device_id ap_queue_ids[] = {
MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
+/**
+ * vfio_ap_queue_dev_probe:
+ *
+ * Allocate a vfio_ap_queue structure and associate it
+ * with the device as driver_data.
+ */
static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
{
+ struct vfio_ap_queue *q;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+ dev_set_drvdata(&apdev->device, q);
+ q->apqn = to_ap_queue(&apdev->device)->qid;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
return 0;
}
+/**
+ * vfio_ap_queue_dev_remove:
+ *
+ * Takes the matrix lock to avoid actions on this device while removing
+ * Free the associated vfio_ap_queue structure
+ */
static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
{
- /* Nothing to do yet */
+ struct vfio_ap_queue *q;
+ int apid, apqi;
+
+ mutex_lock(&matrix_dev->lock);
+ q = dev_get_drvdata(&apdev->device);
+ dev_set_drvdata(&apdev->device, NULL);
+ apid = AP_QID_CARD(q->apqn);
+ apqi = AP_QID_QUEUE(q->apqn);
+ vfio_ap_mdev_reset_queue(apid, apqi, 1);
+ vfio_ap_irq_disable(q);
+ kfree(q);
+ mutex_unlock(&matrix_dev->lock);
}
static void vfio_ap_matrix_dev_release(struct device *dev)
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 900b9cf20ca5..2c9fb1423a39 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -24,6 +24,296 @@
#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
+static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
+
+static int match_apqn(struct device *dev, void *data)
+{
+ struct vfio_ap_queue *q = dev_get_drvdata(dev);
+
+ return (q->apqn == *(int *)(data)) ? 1 : 0;
+}
+
+/**
+ * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list
+ * @matrix_mdev: the associated mediated matrix
+ * @apqn: The queue APQN
+ *
+ * Retrieve a queue with a specific APQN from the list of the
+ * devices of the vfio_ap_drv.
+ * Verify that the APID and the APQI are set in the matrix.
+ *
+ * Returns the pointer to the associated vfio_ap_queue
+ */
+static struct vfio_ap_queue *vfio_ap_get_queue(
+ struct ap_matrix_mdev *matrix_mdev,
+ int apqn)
+{
+ struct vfio_ap_queue *q;
+ struct device *dev;
+
+ if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
+ return NULL;
+ if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
+ return NULL;
+
+ dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
+ &apqn, match_apqn);
+ if (!dev)
+ return NULL;
+ q = dev_get_drvdata(dev);
+ q->matrix_mdev = matrix_mdev;
+ put_device(dev);
+
+ return q;
+}
+
+/**
+ * vfio_ap_wait_for_irqclear
+ * @apqn: The AP Queue number
+ *
+ * Checks the IRQ bit for the status of this APQN using ap_tapq.
+ * Returns if the ap_tapq function succeeded and the bit is clear.
+ * Returns if ap_tapq function failed with invalid, deconfigured or
+ * checkstopped AP.
+ * Otherwise retries up to 5 times after waiting 20ms.
+ *
+ */
+static void vfio_ap_wait_for_irqclear(int apqn)
+{
+ struct ap_queue_status status;
+ int retry = 5;
+
+ do {
+ status = ap_tapq(apqn, NULL);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ if (!status.irq_enabled)
+ return;
+ /* Fall through */
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ default:
+ WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
+ status.response_code, apqn);
+ return;
+ }
+ } while (--retry);
+
+ WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
+ __func__, status.response_code, apqn);
+}
+
+/**
+ * vfio_ap_free_aqic_resources
+ * @q: The vfio_ap_queue
+ *
+ * Unregisters the ISC in the GIB when the saved ISC not invalid.
+ * Unpin the guest's page holding the NIB when it exist.
+ * Reset the saved_pfn and saved_isc to invalid values.
+ * Clear the pointer to the matrix mediated device.
+ *
+ */
+static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
+{
+ if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev)
+ kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
+ if (q->saved_pfn && q->matrix_mdev)
+ vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
+ &q->saved_pfn, 1);
+ q->saved_pfn = 0;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ q->matrix_mdev = NULL;
+}
+
+/**
+ * vfio_ap_irq_disable
+ * @q: The vfio_ap_queue
+ *
+ * Uses ap_aqic to disable the interruption and in case of success, reset
+ * in progress or IRQ disable command already proceeded: calls
+ * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
+ * and calls vfio_ap_free_aqic_resources() to free the resources associated
+ * with the AP interrupt handling.
+ *
+ * In the case the AP is busy, or a reset is in progress,
+ * retries after 20ms, up to 5 times.
+ *
+ * Returns if ap_aqic function failed with invalid, deconfigured or
+ * checkstopped AP.
+ */
+struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
+{
+ struct ap_qirq_ctrl aqic_gisa = {};
+ struct ap_queue_status status;
+ int retries = 5;
+
+ do {
+ status = ap_aqic(q->apqn, aqic_gisa, NULL);
+ switch (status.response_code) {
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ case AP_RESPONSE_NORMAL:
+ vfio_ap_wait_for_irqclear(q->apqn);
+ goto end_free;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_INVALID_ADDRESS:
+ default:
+ /* All cases in default means AP not operational */
+ WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
+ status.response_code);
+ goto end_free;
+ }
+ } while (retries--);
+
+ WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
+ status.response_code);
+end_free:
+ vfio_ap_free_aqic_resources(q);
+ return status;
+}
+
+/**
+ * vfio_ap_setirq: Enable Interruption for a APQN
+ *
+ * @dev: the device associated with the ap_queue
+ * @q: the vfio_ap_queue holding AQIC parameters
+ *
+ * Pin the NIB saved in *q
+ * Register the guest ISC to GIB interface and retrieve the
+ * host ISC to issue the host side PQAP/AQIC
+ *
+ * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the
+ * vfio_pin_pages failed.
+ *
+ * Otherwise return the ap_queue_status returned by the ap_aqic(),
+ * all retry handling will be done by the guest.
+ */
+static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
+ int isc,
+ unsigned long nib)
+{
+ struct ap_qirq_ctrl aqic_gisa = {};
+ struct ap_queue_status status = {};
+ struct kvm_s390_gisa *gisa;
+ struct kvm *kvm;
+ unsigned long h_nib, g_pfn, h_pfn;
+ int ret;
+
+ g_pfn = nib >> PAGE_SHIFT;
+ ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &h_pfn);
+ switch (ret) {
+ case 1:
+ break;
+ default:
+ status.response_code = AP_RESPONSE_INVALID_ADDRESS;
+ return status;
+ }
+
+ kvm = q->matrix_mdev->kvm;
+ gisa = kvm->arch.gisa_int.origin;
+
+ h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
+ aqic_gisa.gisc = isc;
+ aqic_gisa.isc = kvm_s390_gisc_register(kvm, isc);
+ aqic_gisa.ir = 1;
+ aqic_gisa.gisa = (uint64_t)gisa >> 4;
+
+ status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ /* See if we did clear older IRQ configuration */
+ vfio_ap_free_aqic_resources(q);
+ q->saved_pfn = g_pfn;
+ q->saved_isc = isc;
+ break;
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ /* We could not modify IRQ setings: clear new configuration */
+ vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1);
+ kvm_s390_gisc_unregister(kvm, isc);
+ break;
+ default:
+ pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
+ status.response_code);
+ vfio_ap_irq_disable(q);
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * handle_pqap: PQAP instruction callback
+ *
+ * @vcpu: The vcpu on which we received the PQAP instruction
+ *
+ * Get the general register contents to initialize internal variables.
+ * REG[0]: APQN
+ * REG[1]: IR and ISC
+ * REG[2]: NIB
+ *
+ * Response.status may be set to following Response Code:
+ * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
+ * - AP_RESPONSE_DECONFIGURED: if the queue is not configured
+ * - AP_RESPONSE_NORMAL (0) : in case of successs
+ * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
+ * We take the matrix_dev lock to ensure serialization on queues and
+ * mediated device access.
+ *
+ * Return 0 if we could handle the request inside KVM.
+ * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
+ */
+static int handle_pqap(struct kvm_vcpu *vcpu)
+{
+ uint64_t status;
+ uint16_t apqn;
+ struct vfio_ap_queue *q;
+ struct ap_queue_status qstatus = {
+ .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
+ struct ap_matrix_mdev *matrix_mdev;
+
+ /* If we do not use the AIV facility just go to userland */
+ if (!(vcpu->arch.sie_block->eca & ECA_AIV))
+ return -EOPNOTSUPP;
+
+ apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
+ mutex_lock(&matrix_dev->lock);
+
+ if (!vcpu->kvm->arch.crypto.pqap_hook)
+ goto out_unlock;
+ matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
+ struct ap_matrix_mdev, pqap_hook);
+
+ q = vfio_ap_get_queue(matrix_mdev, apqn);
+ if (!q)
+ goto out_unlock;
+
+ status = vcpu->run->s.regs.gprs[1];
+
+ /* If IR bit(16) is set we enable the interrupt */
+ if ((status >> (63 - 16)) & 0x01)
+ qstatus = vfio_ap_irq_enable(q, status & 0x07,
+ vcpu->run->s.regs.gprs[2]);
+ else
+ qstatus = vfio_ap_irq_disable(q);
+
+out_unlock:
+ memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
+ vcpu->run->s.regs.gprs[1] >>= 32;
+ mutex_unlock(&matrix_dev->lock);
+ return 0;
+}
+
static void vfio_ap_matrix_init(struct ap_config_info *info,
struct ap_matrix *matrix)
{
@@ -45,8 +335,11 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
return -ENOMEM;
}
+ matrix_mdev->mdev = mdev;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
mdev_set_drvdata(mdev, matrix_mdev);
+ matrix_mdev->pqap_hook.hook = handle_pqap;
+ matrix_mdev->pqap_hook.owner = THIS_MODULE;
mutex_lock(&matrix_dev->lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
mutex_unlock(&matrix_dev->lock);
@@ -62,6 +355,7 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
return -EBUSY;
mutex_lock(&matrix_dev->lock);
+ vfio_ap_mdev_reset_queues(mdev);
list_del(&matrix_mdev->node);
mutex_unlock(&matrix_dev->lock);
@@ -754,11 +1048,42 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
}
matrix_mdev->kvm = kvm;
+ kvm_get_kvm(kvm);
+ kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
mutex_unlock(&matrix_dev->lock);
return 0;
}
+/*
+ * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback
+ *
+ * @nb: The notifier block
+ * @action: Action to be taken
+ * @data: data associated with the request
+ *
+ * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
+ * pinned before). Other requests are ignored.
+ *
+ */
+static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier);
+
+ if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
+ struct vfio_iommu_type1_dma_unmap *unmap = data;
+ unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
+
+ vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -790,15 +1115,36 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
- unsigned int retry)
+static void vfio_ap_irq_disable_apqn(int apqn)
+{
+ struct device *dev;
+ struct vfio_ap_queue *q;
+
+ dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
+ &apqn, match_apqn);
+ if (dev) {
+ q = dev_get_drvdata(dev);
+ vfio_ap_irq_disable(q);
+ put_device(dev);
+ }
+}
+
+int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+ unsigned int retry)
{
struct ap_queue_status status;
+ int retry2 = 2;
+ int apqn = AP_MKQID(apid, apqi);
do {
- status = ap_zapq(AP_MKQID(apid, apqi));
+ status = ap_zapq(apqn);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
+ while (!status.queue_empty && retry2--) {
+ msleep(20);
+ status = ap_tapq(apqn, NULL);
+ }
+ WARN_ON_ONCE(retry <= 0);
return 0;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
@@ -832,6 +1178,7 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
*/
if (ret)
rc = ret;
+ vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi));
}
}
@@ -858,20 +1205,37 @@ static int vfio_ap_mdev_open(struct mdev_device *mdev)
return ret;
}
- return 0;
+ matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
+ events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &events, &matrix_mdev->iommu_notifier);
+ if (!ret)
+ return ret;
+
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &matrix_mdev->group_notifier);
+ module_put(THIS_MODULE);
+ return ret;
}
static void vfio_ap_mdev_release(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
- if (matrix_mdev->kvm)
+ mutex_lock(&matrix_dev->lock);
+ if (matrix_mdev->kvm) {
kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+ vfio_ap_mdev_reset_queues(mdev);
+ kvm_put_kvm(matrix_mdev->kvm);
+ matrix_mdev->kvm = NULL;
+ }
+ mutex_unlock(&matrix_dev->lock);
- vfio_ap_mdev_reset_queues(mdev);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &matrix_mdev->iommu_notifier);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
&matrix_mdev->group_notifier);
- matrix_mdev->kvm = NULL;
module_put(THIS_MODULE);
}
@@ -900,6 +1264,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
{
int ret;
+ mutex_lock(&matrix_dev->lock);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
ret = vfio_ap_mdev_get_device_info(arg);
@@ -911,6 +1276,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
ret = -EOPNOTSUPP;
break;
}
+ mutex_unlock(&matrix_dev->lock);
return ret;
}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index 76b7f98e47e9..f46dde56b464 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -4,6 +4,7 @@
*
* Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
* Halil Pasic <pasic@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
*
* Copyright IBM Corp. 2018
*/
@@ -16,6 +17,7 @@
#include <linux/mdev.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/kvm_host.h>
#include "ap_bus.h"
@@ -80,10 +82,23 @@ struct ap_matrix_mdev {
struct list_head node;
struct ap_matrix matrix;
struct notifier_block group_notifier;
+ struct notifier_block iommu_notifier;
struct kvm *kvm;
+ struct kvm_s390_module_hook pqap_hook;
+ struct mdev_device *mdev;
};
extern int vfio_ap_mdev_register(void);
extern void vfio_ap_mdev_unregister(void);
+int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+ unsigned int retry);
+struct vfio_ap_queue {
+ struct ap_matrix_mdev *matrix_mdev;
+ unsigned long saved_pfn;
+ int apqn;
+#define VFIO_AP_ISC_INVALID 0xff
+ unsigned char saved_isc;
+};
+struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q);
#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 0cbcc238ef98..12fe9deb265e 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -567,6 +567,10 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
*fcode = payload_hdr->func_val & 0xFFFF;
+ /* enable special processing based on the cprbs flags special bit */
+ if (msg->cprbx.flags & 0x20)
+ ap_msg->special = 1;
+
return 0;
}
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 7c5a25ddf832..ced896d1534a 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -7,10 +7,10 @@ config LCS
prompt "Lan Channel Station Interface"
depends on CCW && NETDEVICES && (ETHERNET || FDDI)
help
- Select this option if you want to use LCS networking on IBM System z.
- This device driver supports FDDI (IEEE 802.7) and Ethernet.
- To compile as a module, choose M. The module name is lcs.
- If you do not know what it is, it's safe to choose Y.
+ Select this option if you want to use LCS networking on IBM System z.
+ This device driver supports FDDI (IEEE 802.7) and Ethernet.
+ To compile as a module, choose M. The module name is lcs.
+ If you do not know what it is, it's safe to choose Y.
config CTCM
def_tristate m
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 6a3076881321..1a55e5942d36 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -46,9 +46,15 @@ struct vq_config_block {
#define VIRTIO_CCW_CONFIG_SIZE 0x100
/* same as PCI config space size, should be enough for all drivers */
+struct vcdev_dma_area {
+ unsigned long indicators;
+ unsigned long indicators2;
+ struct vq_config_block config_block;
+ __u8 status;
+};
+
struct virtio_ccw_device {
struct virtio_device vdev;
- __u8 *status;
__u8 config[VIRTIO_CCW_CONFIG_SIZE];
struct ccw_device *cdev;
__u32 curr_io;
@@ -58,17 +64,24 @@ struct virtio_ccw_device {
spinlock_t lock;
struct mutex io_lock; /* Serializes I/O requests */
struct list_head virtqueues;
- unsigned long indicators;
- unsigned long indicators2;
- struct vq_config_block *config_block;
bool is_thinint;
bool going_away;
bool device_lost;
unsigned int config_ready;
void *airq_info;
- u64 dma_mask;
+ struct vcdev_dma_area *dma_area;
};
+static inline unsigned long *indicators(struct virtio_ccw_device *vcdev)
+{
+ return &vcdev->dma_area->indicators;
+}
+
+static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev)
+{
+ return &vcdev->dma_area->indicators2;
+}
+
struct vq_info_block_legacy {
__u64 queue;
__u32 align;
@@ -127,11 +140,17 @@ static int virtio_ccw_use_airq = 1;
struct airq_info {
rwlock_t lock;
- u8 summary_indicator;
+ u8 summary_indicator_idx;
struct airq_struct airq;
struct airq_iv *aiv;
};
static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
+static u8 *summary_indicators;
+
+static inline u8 *get_summary_indicator(struct airq_info *info)
+{
+ return summary_indicators + info->summary_indicator_idx;
+}
#define CCW_CMD_SET_VQ 0x13
#define CCW_CMD_VDEV_RESET 0x33
@@ -196,7 +215,7 @@ static void virtio_airq_handler(struct airq_struct *airq, bool floating)
break;
vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
}
- info->summary_indicator = 0;
+ *(get_summary_indicator(info)) = 0;
smp_wmb();
/* Walk through indicators field, summary indicator not active. */
for (ai = 0;;) {
@@ -208,7 +227,7 @@ static void virtio_airq_handler(struct airq_struct *airq, bool floating)
read_unlock(&info->lock);
}
-static struct airq_info *new_airq_info(void)
+static struct airq_info *new_airq_info(int index)
{
struct airq_info *info;
int rc;
@@ -217,13 +236,15 @@ static struct airq_info *new_airq_info(void)
if (!info)
return NULL;
rwlock_init(&info->lock);
- info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
+ info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR
+ | AIRQ_IV_CACHELINE);
if (!info->aiv) {
kfree(info);
return NULL;
}
info->airq.handler = virtio_airq_handler;
- info->airq.lsi_ptr = &info->summary_indicator;
+ info->summary_indicator_idx = index;
+ info->airq.lsi_ptr = get_summary_indicator(info);
info->airq.lsi_mask = 0xff;
info->airq.isc = VIRTIO_AIRQ_ISC;
rc = register_adapter_interrupt(&info->airq);
@@ -245,7 +266,7 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
if (!airq_areas[i])
- airq_areas[i] = new_airq_info();
+ airq_areas[i] = new_airq_info(i);
info = airq_areas[i];
if (!info)
return 0;
@@ -326,29 +347,29 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
struct airq_info *airq_info = vcdev->airq_info;
if (vcdev->is_thinint) {
- thinint_area = kzalloc(sizeof(*thinint_area),
- GFP_DMA | GFP_KERNEL);
+ thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*thinint_area));
if (!thinint_area)
return;
thinint_area->summary_indicator =
- (unsigned long) &airq_info->summary_indicator;
+ (unsigned long) get_summary_indicator(airq_info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->count = sizeof(*thinint_area);
ccw->cda = (__u32)(unsigned long) thinint_area;
} else {
/* payload is the address of the indicators */
- indicatorp = kmalloc(sizeof(&vcdev->indicators),
- GFP_DMA | GFP_KERNEL);
+ indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(indicators(vcdev)));
if (!indicatorp)
return;
*indicatorp = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
- ccw->count = sizeof(&vcdev->indicators);
+ ccw->count = sizeof(indicators(vcdev));
ccw->cda = (__u32)(unsigned long) indicatorp;
}
/* Deregister indicators from host. */
- vcdev->indicators = 0;
+ *indicators(vcdev) = 0;
ccw->flags = 0;
ret = ccw_io_helper(vcdev, ccw,
vcdev->is_thinint ?
@@ -359,8 +380,8 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
"Failed to deregister indicators (%d)\n", ret);
else if (vcdev->is_thinint)
virtio_ccw_drop_indicators(vcdev);
- kfree(indicatorp);
- kfree(thinint_area);
+ ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
}
static inline long __do_kvm_notify(struct subchannel_id schid,
@@ -407,15 +428,15 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
{
int ret;
- vcdev->config_block->index = index;
+ vcdev->dma_area->config_block.index = index;
ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
ccw->flags = 0;
ccw->count = sizeof(struct vq_config_block);
- ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
+ ccw->cda = (__u32)(unsigned long)(&vcdev->dma_area->config_block);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
if (ret)
return ret;
- return vcdev->config_block->num ?: -ENOENT;
+ return vcdev->dma_area->config_block.num ?: -ENOENT;
}
static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
@@ -460,7 +481,8 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
ret, index);
vring_del_virtqueue(vq);
- kfree(info->info_block);
+ ccw_device_dma_free(vcdev->cdev, info->info_block,
+ sizeof(*info->info_block));
kfree(info);
}
@@ -470,7 +492,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
struct ccw1 *ccw;
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
@@ -479,7 +501,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
virtio_ccw_del_vq(vq, ccw);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
@@ -502,8 +524,8 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = -ENOMEM;
goto out_err;
}
- info->info_block = kzalloc(sizeof(*info->info_block),
- GFP_DMA | GFP_KERNEL);
+ info->info_block = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*info->info_block));
if (!info->info_block) {
dev_warn(&vcdev->cdev->dev, "no info block\n");
err = -ENOMEM;
@@ -567,7 +589,8 @@ out_err:
if (vq)
vring_del_virtqueue(vq);
if (info) {
- kfree(info->info_block);
+ ccw_device_dma_free(vcdev->cdev, info->info_block,
+ sizeof(*info->info_block));
}
kfree(info);
return ERR_PTR(err);
@@ -581,7 +604,8 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
struct virtio_thinint_area *thinint_area = NULL;
struct airq_info *info;
- thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
+ thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*thinint_area));
if (!thinint_area) {
ret = -ENOMEM;
goto out;
@@ -596,7 +620,7 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
}
info = vcdev->airq_info;
thinint_area->summary_indicator =
- (unsigned long) &info->summary_indicator;
+ (unsigned long) get_summary_indicator(info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->flags = CCW_FLAG_SLI;
@@ -617,7 +641,7 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
virtio_ccw_drop_indicators(vcdev);
}
out:
- kfree(thinint_area);
+ ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
return ret;
}
@@ -633,7 +657,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
int ret, i, queue_idx = 0;
struct ccw1 *ccw;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
@@ -657,10 +681,11 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* We need a data area under 2G to communicate. Our payload is
* the address of the indicators.
*/
- indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
+ indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(indicators(vcdev)));
if (!indicatorp)
goto out;
- *indicatorp = (unsigned long) &vcdev->indicators;
+ *indicatorp = (unsigned long) indicators(vcdev);
if (vcdev->is_thinint) {
ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
if (ret)
@@ -669,32 +694,36 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
}
if (!vcdev->is_thinint) {
/* Register queue indicators with host. */
- vcdev->indicators = 0;
+ *indicators(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->flags = 0;
- ccw->count = sizeof(&vcdev->indicators);
+ ccw->count = sizeof(indicators(vcdev));
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
if (ret)
goto out;
}
/* Register indicators2 with host for config changes */
- *indicatorp = (unsigned long) &vcdev->indicators2;
- vcdev->indicators2 = 0;
+ *indicatorp = (unsigned long) indicators2(vcdev);
+ *indicators2(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_CONF_IND;
ccw->flags = 0;
- ccw->count = sizeof(&vcdev->indicators2);
+ ccw->count = sizeof(indicators2(vcdev));
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
if (ret)
goto out;
- kfree(indicatorp);
- kfree(ccw);
+ if (indicatorp)
+ ccw_device_dma_free(vcdev->cdev, indicatorp,
+ sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return 0;
out:
- kfree(indicatorp);
- kfree(ccw);
+ if (indicatorp)
+ ccw_device_dma_free(vcdev->cdev, indicatorp,
+ sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
virtio_ccw_del_vqs(vdev);
return ret;
}
@@ -704,12 +733,12 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct ccw1 *ccw;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
/* Zero status bits. */
- *vcdev->status = 0;
+ vcdev->dma_area->status = 0;
/* Send a reset ccw on device. */
ccw->cmd_code = CCW_CMD_VDEV_RESET;
@@ -717,7 +746,7 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
ccw->count = 0;
ccw->cda = 0;
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static u64 virtio_ccw_get_features(struct virtio_device *vdev)
@@ -728,11 +757,11 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
u64 rc;
struct ccw1 *ccw;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return 0;
- features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
if (!features) {
rc = 0;
goto out_free;
@@ -765,8 +794,8 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
rc |= (u64)le32_to_cpu(features->features) << 32;
out_free:
- kfree(features);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return rc;
}
@@ -791,11 +820,11 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
- features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
if (!features) {
ret = -ENOMEM;
goto out_free;
@@ -830,8 +859,8 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
out_free:
- kfree(features);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return ret;
}
@@ -845,11 +874,12 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
- config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+ config_area = ccw_device_dma_zalloc(vcdev->cdev,
+ VIRTIO_CCW_CONFIG_SIZE);
if (!config_area)
goto out_free;
@@ -871,8 +901,8 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
memcpy(buf, config_area + offset, len);
out_free:
- kfree(config_area);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static void virtio_ccw_set_config(struct virtio_device *vdev,
@@ -884,11 +914,12 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
- config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+ config_area = ccw_device_dma_zalloc(vcdev->cdev,
+ VIRTIO_CCW_CONFIG_SIZE);
if (!config_area)
goto out_free;
@@ -907,61 +938,61 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
out_free:
- kfree(config_area);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static u8 virtio_ccw_get_status(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- u8 old_status = *vcdev->status;
+ u8 old_status = vcdev->dma_area->status;
struct ccw1 *ccw;
if (vcdev->revision < 1)
- return *vcdev->status;
+ return vcdev->dma_area->status;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return old_status;
ccw->cmd_code = CCW_CMD_READ_STATUS;
ccw->flags = 0;
- ccw->count = sizeof(*vcdev->status);
- ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw->count = sizeof(vcdev->dma_area->status);
+ ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
/*
* If the channel program failed (should only happen if the device
* was hotunplugged, and then we clean up via the machine check
- * handler anyway), vcdev->status was not overwritten and we just
+ * handler anyway), vcdev->dma_area->status was not overwritten and we just
* return the old status, which is fine.
*/
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
- return *vcdev->status;
+ return vcdev->dma_area->status;
}
static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- u8 old_status = *vcdev->status;
+ u8 old_status = vcdev->dma_area->status;
struct ccw1 *ccw;
int ret;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
/* Write the status to the host. */
- *vcdev->status = status;
+ vcdev->dma_area->status = status;
ccw->cmd_code = CCW_CMD_WRITE_STATUS;
ccw->flags = 0;
ccw->count = sizeof(status);
- ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
/* Write failed? We assume status is unchanged. */
if (ret)
- *vcdev->status = old_status;
- kfree(ccw);
+ vcdev->dma_area->status = old_status;
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static const char *virtio_ccw_bus_name(struct virtio_device *vdev)
@@ -994,8 +1025,8 @@ static void virtio_ccw_release_dev(struct device *_d)
struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_ccw_device *vcdev = to_vc_device(dev);
- kfree(vcdev->status);
- kfree(vcdev->config_block);
+ ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
+ sizeof(*vcdev->dma_area));
kfree(vcdev);
}
@@ -1093,17 +1124,17 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
vcdev->err = -EIO;
}
virtio_ccw_check_activity(vcdev, activity);
- for_each_set_bit(i, &vcdev->indicators,
- sizeof(vcdev->indicators) * BITS_PER_BYTE) {
+ for_each_set_bit(i, indicators(vcdev),
+ sizeof(*indicators(vcdev)) * BITS_PER_BYTE) {
/* The bit clear must happen before the vring kick. */
- clear_bit(i, &vcdev->indicators);
+ clear_bit(i, indicators(vcdev));
barrier();
vq = virtio_ccw_vq_by_ind(vcdev, i);
vring_interrupt(0, vq);
}
- if (test_bit(0, &vcdev->indicators2)) {
+ if (test_bit(0, indicators2(vcdev))) {
virtio_config_changed(&vcdev->vdev);
- clear_bit(0, &vcdev->indicators2);
+ clear_bit(0, indicators2(vcdev));
}
}
@@ -1203,12 +1234,12 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
struct ccw1 *ccw;
int ret;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
- rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
+ rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev));
if (!rev) {
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return -ENOMEM;
}
@@ -1238,8 +1269,8 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
}
} while (ret == -EOPNOTSUPP);
- kfree(ccw);
- kfree(rev);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ ccw_device_dma_free(vcdev->cdev, rev, sizeof(*rev));
return ret;
}
@@ -1255,24 +1286,11 @@ static int virtio_ccw_online(struct ccw_device *cdev)
ret = -ENOMEM;
goto out_free;
}
-
vcdev->vdev.dev.parent = &cdev->dev;
- cdev->dev.dma_mask = &vcdev->dma_mask;
- /* we are fine with common virtio infrastructure using 64 bit DMA */
- ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64));
- if (ret) {
- dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n");
- goto out_free;
- }
-
- vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
- GFP_DMA | GFP_KERNEL);
- if (!vcdev->config_block) {
- ret = -ENOMEM;
- goto out_free;
- }
- vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
- if (!vcdev->status) {
+ vcdev->cdev = cdev;
+ vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*vcdev->dma_area));
+ if (!vcdev->dma_area) {
ret = -ENOMEM;
goto out_free;
}
@@ -1281,7 +1299,6 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->vdev.dev.release = virtio_ccw_release_dev;
vcdev->vdev.config = &virtio_ccw_config_ops;
- vcdev->cdev = cdev;
init_waitqueue_head(&vcdev->wait_q);
INIT_LIST_HEAD(&vcdev->virtqueues);
spin_lock_init(&vcdev->lock);
@@ -1312,8 +1329,8 @@ out_put:
return ret;
out_free:
if (vcdev) {
- kfree(vcdev->status);
- kfree(vcdev->config_block);
+ ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
+ sizeof(*vcdev->dma_area));
}
kfree(vcdev);
return ret;
@@ -1483,8 +1500,17 @@ static void __init no_auto_parse(void)
static int __init virtio_ccw_init(void)
{
+ int rc;
+
/* parse no_auto string before we do anything further */
no_auto_parse();
- return ccw_driver_register(&virtio_ccw_driver);
+
+ summary_indicators = cio_dma_zalloc(MAX_AIRQ_AREAS);
+ if (!summary_indicators)
+ return -ENOMEM;
+ rc = ccw_driver_register(&virtio_ccw_driver);
+ if (rc)
+ cio_dma_free(summary_indicators, MAX_AIRQ_AREAS);
+ return rc;
}
device_initcall(virtio_ccw_init);
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index ecee4b3ff073..377b07b2feeb 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -763,6 +763,7 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
struct pvscsi_adapter *adapter = shost_priv(host);
struct pvscsi_ctx *ctx;
unsigned long flags;
+ unsigned char op;
spin_lock_irqsave(&adapter->hw_lock, flags);
@@ -775,13 +776,14 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
}
cmd->scsi_done = done;
+ op = cmd->cmnd[0];
dev_dbg(&cmd->device->sdev_gendev,
- "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
+ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
- pvscsi_kick_io(adapter, cmd->cmnd[0]);
+ pvscsi_kick_io(adapter, op);
return 0;
}
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 524ecdc2a9bb..2ec355003524 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
obj-y += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-$(CONFIG_SOC_TI) += ti/
+obj-y += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
obj-y += xilinx/
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index ea0859f7b185..d7d50d48d05d 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -75,10 +75,10 @@ config TI_SCI_PM_DOMAINS
called ti_sci_pm_domains. Note this is needed early in boot before
rootfs may be available.
+endif # SOC_TI
+
config TI_SCI_INTA_MSI_DOMAIN
bool
select GENERIC_MSI_IRQ_DOMAIN
help
Driver to enable Interrupt Aggregator specific MSI Domain.
-
-endif # SOC_TI
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index b6e4862cc242..51ddca2033e0 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -81,6 +81,12 @@ out:
return CHAP_DIGEST_UNKNOWN;
}
+static void chap_close(struct iscsi_conn *conn)
+{
+ kfree(conn->auth_protocol);
+ conn->auth_protocol = NULL;
+}
+
static struct iscsi_chap *chap_server_open(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
@@ -118,7 +124,7 @@ static struct iscsi_chap *chap_server_open(
case CHAP_DIGEST_UNKNOWN:
default:
pr_err("Unsupported CHAP_A value\n");
- kfree(conn->auth_protocol);
+ chap_close(conn);
return NULL;
}
@@ -133,19 +139,13 @@ static struct iscsi_chap *chap_server_open(
* Generate Challenge.
*/
if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) {
- kfree(conn->auth_protocol);
+ chap_close(conn);
return NULL;
}
return chap;
}
-static void chap_close(struct iscsi_conn *conn)
-{
- kfree(conn->auth_protocol);
- conn->auth_protocol = NULL;
-}
-
static int chap_server_compute_md5(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index f4a075303e9a..6949ea8bc387 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -502,7 +502,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> SECTOR_SHIFT;
- sectors -= 1;
+ sectors -= sg->length >> SECTOR_SHIFT;
}
iblock_submit_bios(&list);
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 319b77126168..e85d54d1cdf3 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -43,7 +43,7 @@ MODULE_PARM_DESC(notify_delay_ms,
*/
#define MAX_NUMBER_OF_TRIPS 2
-struct pkg_device {
+struct zone_device {
int cpu;
bool work_scheduled;
u32 tj_max;
@@ -58,10 +58,10 @@ static struct thermal_zone_params pkg_temp_tz_params = {
.no_hwmon = true,
};
-/* Keep track of how many package pointers we allocated in init() */
-static int max_packages __read_mostly;
-/* Array of package pointers */
-static struct pkg_device **packages;
+/* Keep track of how many zone pointers we allocated in init() */
+static int max_id __read_mostly;
+/* Array of zone pointers */
+static struct zone_device **zones;
/* Serializes interrupt notification, work and hotplug */
static DEFINE_SPINLOCK(pkg_temp_lock);
/* Protects zone operation in the work function against hotplug removal */
@@ -108,12 +108,12 @@ err_out:
*
* - Other callsites: Must hold pkg_temp_lock
*/
-static struct pkg_device *pkg_temp_thermal_get_dev(unsigned int cpu)
+static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu)
{
- int pkgid = topology_logical_package_id(cpu);
+ int id = topology_logical_die_id(cpu);
- if (pkgid >= 0 && pkgid < max_packages)
- return packages[pkgid];
+ if (id >= 0 && id < max_id)
+ return zones[id];
return NULL;
}
@@ -138,12 +138,13 @@ static int get_tj_max(int cpu, u32 *tj_max)
static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
{
- struct pkg_device *pkgdev = tzd->devdata;
+ struct zone_device *zonedev = tzd->devdata;
u32 eax, edx;
- rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_STATUS, &eax, &edx);
+ rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_STATUS,
+ &eax, &edx);
if (eax & 0x80000000) {
- *temp = pkgdev->tj_max - ((eax >> 16) & 0x7f) * 1000;
+ *temp = zonedev->tj_max - ((eax >> 16) & 0x7f) * 1000;
pr_debug("sys_get_curr_temp %d\n", *temp);
return 0;
}
@@ -153,7 +154,7 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
static int sys_get_trip_temp(struct thermal_zone_device *tzd,
int trip, int *temp)
{
- struct pkg_device *pkgdev = tzd->devdata;
+ struct zone_device *zonedev = tzd->devdata;
unsigned long thres_reg_value;
u32 mask, shift, eax, edx;
int ret;
@@ -169,14 +170,14 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
shift = THERM_SHIFT_THRESHOLD0;
}
- ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
&eax, &edx);
if (ret < 0)
return ret;
thres_reg_value = (eax & mask) >> shift;
if (thres_reg_value)
- *temp = pkgdev->tj_max - thres_reg_value * 1000;
+ *temp = zonedev->tj_max - thres_reg_value * 1000;
else
*temp = 0;
pr_debug("sys_get_trip_temp %d\n", *temp);
@@ -187,14 +188,14 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
static int
sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
{
- struct pkg_device *pkgdev = tzd->devdata;
+ struct zone_device *zonedev = tzd->devdata;
u32 l, h, mask, shift, intr;
int ret;
- if (trip >= MAX_NUMBER_OF_TRIPS || temp >= pkgdev->tj_max)
+ if (trip >= MAX_NUMBER_OF_TRIPS || temp >= zonedev->tj_max)
return -EINVAL;
- ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
&l, &h);
if (ret < 0)
return ret;
@@ -216,11 +217,12 @@ sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
if (!temp) {
l &= ~intr;
} else {
- l |= (pkgdev->tj_max - temp)/1000 << shift;
+ l |= (zonedev->tj_max - temp)/1000 << shift;
l |= intr;
}
- return wrmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+ return wrmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ l, h);
}
static int sys_get_trip_type(struct thermal_zone_device *thermal, int trip,
@@ -275,26 +277,26 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
{
struct thermal_zone_device *tzone = NULL;
int cpu = smp_processor_id();
- struct pkg_device *pkgdev;
+ struct zone_device *zonedev;
u64 msr_val, wr_val;
mutex_lock(&thermal_zone_mutex);
spin_lock_irq(&pkg_temp_lock);
++pkg_work_cnt;
- pkgdev = pkg_temp_thermal_get_dev(cpu);
- if (!pkgdev) {
+ zonedev = pkg_temp_thermal_get_dev(cpu);
+ if (!zonedev) {
spin_unlock_irq(&pkg_temp_lock);
mutex_unlock(&thermal_zone_mutex);
return;
}
- pkgdev->work_scheduled = false;
+ zonedev->work_scheduled = false;
rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
wr_val = msr_val & ~(THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1);
if (wr_val != msr_val) {
wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, wr_val);
- tzone = pkgdev->tzone;
+ tzone = zonedev->tzone;
}
enable_pkg_thres_interrupt();
@@ -320,7 +322,7 @@ static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work)
static int pkg_thermal_notify(u64 msr_val)
{
int cpu = smp_processor_id();
- struct pkg_device *pkgdev;
+ struct zone_device *zonedev;
unsigned long flags;
spin_lock_irqsave(&pkg_temp_lock, flags);
@@ -329,10 +331,10 @@ static int pkg_thermal_notify(u64 msr_val)
disable_pkg_thres_interrupt();
/* Work is per package, so scheduling it once is enough. */
- pkgdev = pkg_temp_thermal_get_dev(cpu);
- if (pkgdev && !pkgdev->work_scheduled) {
- pkgdev->work_scheduled = true;
- pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work);
+ zonedev = pkg_temp_thermal_get_dev(cpu);
+ if (zonedev && !zonedev->work_scheduled) {
+ zonedev->work_scheduled = true;
+ pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work);
}
spin_unlock_irqrestore(&pkg_temp_lock, flags);
@@ -341,12 +343,12 @@ static int pkg_thermal_notify(u64 msr_val)
static int pkg_temp_thermal_device_add(unsigned int cpu)
{
- int pkgid = topology_logical_package_id(cpu);
+ int id = topology_logical_die_id(cpu);
u32 tj_max, eax, ebx, ecx, edx;
- struct pkg_device *pkgdev;
+ struct zone_device *zonedev;
int thres_count, err;
- if (pkgid >= max_packages)
+ if (id >= max_id)
return -ENOMEM;
cpuid(6, &eax, &ebx, &ecx, &edx);
@@ -360,51 +362,51 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
if (err)
return err;
- pkgdev = kzalloc(sizeof(*pkgdev), GFP_KERNEL);
- if (!pkgdev)
+ zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL);
+ if (!zonedev)
return -ENOMEM;
- INIT_DELAYED_WORK(&pkgdev->work, pkg_temp_thermal_threshold_work_fn);
- pkgdev->cpu = cpu;
- pkgdev->tj_max = tj_max;
- pkgdev->tzone = thermal_zone_device_register("x86_pkg_temp",
+ INIT_DELAYED_WORK(&zonedev->work, pkg_temp_thermal_threshold_work_fn);
+ zonedev->cpu = cpu;
+ zonedev->tj_max = tj_max;
+ zonedev->tzone = thermal_zone_device_register("x86_pkg_temp",
thres_count,
(thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01,
- pkgdev, &tzone_ops, &pkg_temp_tz_params, 0, 0);
- if (IS_ERR(pkgdev->tzone)) {
- err = PTR_ERR(pkgdev->tzone);
- kfree(pkgdev);
+ zonedev, &tzone_ops, &pkg_temp_tz_params, 0, 0);
+ if (IS_ERR(zonedev->tzone)) {
+ err = PTR_ERR(zonedev->tzone);
+ kfree(zonedev);
return err;
}
/* Store MSR value for package thermal interrupt, to restore at exit */
- rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, pkgdev->msr_pkg_therm_low,
- pkgdev->msr_pkg_therm_high);
+ rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, zonedev->msr_pkg_therm_low,
+ zonedev->msr_pkg_therm_high);
- cpumask_set_cpu(cpu, &pkgdev->cpumask);
+ cpumask_set_cpu(cpu, &zonedev->cpumask);
spin_lock_irq(&pkg_temp_lock);
- packages[pkgid] = pkgdev;
+ zones[id] = zonedev;
spin_unlock_irq(&pkg_temp_lock);
return 0;
}
static int pkg_thermal_cpu_offline(unsigned int cpu)
{
- struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu);
+ struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu);
bool lastcpu, was_target;
int target;
- if (!pkgdev)
+ if (!zonedev)
return 0;
- target = cpumask_any_but(&pkgdev->cpumask, cpu);
- cpumask_clear_cpu(cpu, &pkgdev->cpumask);
+ target = cpumask_any_but(&zonedev->cpumask, cpu);
+ cpumask_clear_cpu(cpu, &zonedev->cpumask);
lastcpu = target >= nr_cpu_ids;
/*
* Remove the sysfs files, if this is the last cpu in the package
* before doing further cleanups.
*/
if (lastcpu) {
- struct thermal_zone_device *tzone = pkgdev->tzone;
+ struct thermal_zone_device *tzone = zonedev->tzone;
/*
* We must protect against a work function calling
@@ -413,7 +415,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
* won't try to call.
*/
mutex_lock(&thermal_zone_mutex);
- pkgdev->tzone = NULL;
+ zonedev->tzone = NULL;
mutex_unlock(&thermal_zone_mutex);
thermal_zone_device_unregister(tzone);
@@ -427,8 +429,8 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
* one. When we drop the lock, then the interrupt notify function
* will see the new target.
*/
- was_target = pkgdev->cpu == cpu;
- pkgdev->cpu = target;
+ was_target = zonedev->cpu == cpu;
+ zonedev->cpu = target;
/*
* If this is the last CPU in the package remove the package
@@ -437,23 +439,23 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
* worker will see the package anymore.
*/
if (lastcpu) {
- packages[topology_logical_package_id(cpu)] = NULL;
+ zones[topology_logical_die_id(cpu)] = NULL;
/* After this point nothing touches the MSR anymore. */
wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
- pkgdev->msr_pkg_therm_low, pkgdev->msr_pkg_therm_high);
+ zonedev->msr_pkg_therm_low, zonedev->msr_pkg_therm_high);
}
/*
* Check whether there is work scheduled and whether the work is
* targeted at the outgoing CPU.
*/
- if (pkgdev->work_scheduled && was_target) {
+ if (zonedev->work_scheduled && was_target) {
/*
* To cancel the work we need to drop the lock, otherwise
* we might deadlock if the work needs to be flushed.
*/
spin_unlock_irq(&pkg_temp_lock);
- cancel_delayed_work_sync(&pkgdev->work);
+ cancel_delayed_work_sync(&zonedev->work);
spin_lock_irq(&pkg_temp_lock);
/*
* If this is not the last cpu in the package and the work
@@ -461,21 +463,21 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
* need to reschedule the work, otherwise the interrupt
* stays disabled forever.
*/
- if (!lastcpu && pkgdev->work_scheduled)
- pkg_thermal_schedule_work(target, &pkgdev->work);
+ if (!lastcpu && zonedev->work_scheduled)
+ pkg_thermal_schedule_work(target, &zonedev->work);
}
spin_unlock_irq(&pkg_temp_lock);
/* Final cleanup if this is the last cpu */
if (lastcpu)
- kfree(pkgdev);
+ kfree(zonedev);
return 0;
}
static int pkg_thermal_cpu_online(unsigned int cpu)
{
- struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu);
+ struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu);
/* Paranoia check */
@@ -483,8 +485,8 @@ static int pkg_thermal_cpu_online(unsigned int cpu)
return -ENODEV;
/* If the package exists, nothing to do */
- if (pkgdev) {
- cpumask_set_cpu(cpu, &pkgdev->cpumask);
+ if (zonedev) {
+ cpumask_set_cpu(cpu, &zonedev->cpumask);
return 0;
}
return pkg_temp_thermal_device_add(cpu);
@@ -503,10 +505,10 @@ static int __init pkg_temp_thermal_init(void)
if (!x86_match_cpu(pkg_temp_thermal_ids))
return -ENODEV;
- max_packages = topology_max_packages();
- packages = kcalloc(max_packages, sizeof(struct pkg_device *),
+ max_id = topology_max_packages() * topology_max_die_per_package();
+ zones = kcalloc(max_id, sizeof(struct zone_device *),
GFP_KERNEL);
- if (!packages)
+ if (!zones)
return -ENOMEM;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online",
@@ -525,7 +527,7 @@ static int __init pkg_temp_thermal_init(void)
return 0;
err:
- kfree(packages);
+ kfree(zones);
return ret;
}
module_init(pkg_temp_thermal_init)
@@ -537,7 +539,7 @@ static void __exit pkg_temp_thermal_exit(void)
cpuhp_remove_state(pkg_thermal_hp_state);
debugfs_remove_recursive(debugfs);
- kfree(packages);
+ kfree(zones);
}
module_exit(pkg_temp_thermal_exit)
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index e38f104db174..fde8d4073e74 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -487,7 +487,7 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
{
- lockdep_assert_held_exclusive(&tty->ldisc_sem);
+ lockdep_assert_held_write(&tty->ldisc_sem);
WARN_ON(!test_bit(TTY_LDISC_OPEN, &tty->flags));
clear_bit(TTY_LDISC_OPEN, &tty->flags);
if (ld->ops->close)
@@ -509,7 +509,7 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld)
struct tty_ldisc *disc = tty_ldisc_get(tty, ld);
int r;
- lockdep_assert_held_exclusive(&tty->ldisc_sem);
+ lockdep_assert_held_write(&tty->ldisc_sem);
if (IS_ERR(disc))
return PTR_ERR(disc);
tty->ldisc = disc;
@@ -633,7 +633,7 @@ EXPORT_SYMBOL_GPL(tty_set_ldisc);
*/
static void tty_ldisc_kill(struct tty_struct *tty)
{
- lockdep_assert_held_exclusive(&tty->ldisc_sem);
+ lockdep_assert_held_write(&tty->ldisc_sem);
if (!tty->ldisc)
return;
/*
@@ -681,7 +681,7 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
struct tty_ldisc *ld;
int retval;
- lockdep_assert_held_exclusive(&tty->ldisc_sem);
+ lockdep_assert_held_write(&tty->ldisc_sem);
ld = tty_ldisc_get(tty, disc);
if (IS_ERR(ld)) {
BUG_ON(disc == N_TTY);
OpenPOWER on IntegriCloud