summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pptt.c61
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c5
-rw-r--r--drivers/auxdisplay/ht16k33.c4
-rw-r--r--drivers/base/cacheinfo.c5
-rw-r--r--drivers/clk/clk.c2
-rw-r--r--drivers/clk/meson/g12a.c4
-rw-r--r--drivers/clk/meson/g12a.h2
-rw-r--r--drivers/clk/meson/meson8b.c10
-rw-r--r--drivers/clk/socfpga/clk-s10.c4
-rw-r--r--drivers/clk/tegra/clk-tegra210.c2
-rw-r--r--drivers/clk/ti/clkctrl.c7
-rw-r--r--drivers/dma/dma-jz4780.c5
-rw-r--r--drivers/dma/imx-sdma.c52
-rw-r--r--drivers/dma/qcom/bam_dma.c3
-rw-r--r--drivers/firmware/efi/efi-bgrt.c5
-rw-r--r--drivers/firmware/efi/efi.c12
-rw-r--r--drivers/firmware/efi/efibc.c12
-rw-r--r--drivers/gpio/gpiolib-of.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-multitouch.c4
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-uclogic-core.c2
-rw-r--r--drivers/hid/hid-uclogic-params.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c15
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c15
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c35
-rw-r--r--drivers/irqchip/irq-gic-v3.c7
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c4
-rw-r--r--drivers/md/dm-init.c10
-rw-r--r--drivers/md/dm-log-writes.c23
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/mfd/stmfx.c12
-rw-r--r--drivers/mtd/nand/raw/ingenic/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/ingenic/Makefile4
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c9
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c (renamed from drivers/mtd/nand/raw/ingenic/ingenic_nand.c)0
-rw-r--r--drivers/mtd/nand/raw/nand_base.c3
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c40
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c2
-rw-r--r--drivers/mtd/nand/spi/macronix.c4
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c119
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c19
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c28
-rw-r--r--drivers/net/ethernet/sis/sis900.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ppp/ppp_mppe.c1
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/pci/pci-driver.c8
-rw-r--r--drivers/perf/Kconfig8
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm_pmu_acpi.c72
-rw-r--r--drivers/perf/arm_spe_pmu.c12
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c554
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c34
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c8
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c18
-rw-r--r--drivers/s390/block/Kconfig2
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/char/Kconfig22
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/sclp_async.c189
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/airq.c37
-rw-r--r--drivers/s390/cio/ccwreq.c9
-rw-r--r--drivers/s390/cio/chsc.c30
-rw-r--r--drivers/s390/cio/cio.h3
-rw-r--r--drivers/s390/cio/css.c187
-rw-r--r--drivers/s390/cio/device.c68
-rw-r--r--drivers/s390/cio/device_fsm.c49
-rw-r--r--drivers/s390/cio/device_id.c20
-rw-r--r--drivers/s390/cio/device_ops.c21
-rw-r--r--drivers/s390/cio/device_pgid.c22
-rw-r--r--drivers/s390/cio/device_status.c24
-rw-r--r--drivers/s390/cio/io_sch.h20
-rw-r--r--drivers/s390/cio/qdio_main.c1
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c524
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h7
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c13
-rw-r--r--drivers/s390/crypto/pkey_api.c8
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c34
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c380
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h15
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c4
-rw-r--r--drivers/s390/net/Kconfig8
-rw-r--r--drivers/s390/virtio/virtio_ccw.c246
-rw-r--r--drivers/scsi/vmw_pvscsi.c6
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/ti/Kconfig4
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c16
-rw-r--r--drivers/target/target_core_iblock.c2
116 files changed, 2379 insertions, 1031 deletions
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index b72e6afaa8fb..1e7ac0bd0d3a 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -432,17 +432,40 @@ static void cache_setup_acpi_cpu(struct acpi_table_header *table,
}
}
+static bool flag_identical(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu)
+{
+ struct acpi_pptt_processor *next;
+
+ /* heterogeneous machines must use PPTT revision > 1 */
+ if (table_hdr->revision < 2)
+ return false;
+
+ /* Locate the last node in the tree with IDENTICAL set */
+ if (cpu->flags & ACPI_PPTT_ACPI_IDENTICAL) {
+ next = fetch_pptt_node(table_hdr, cpu->parent);
+ if (!(next && next->flags & ACPI_PPTT_ACPI_IDENTICAL))
+ return true;
+ }
+
+ return false;
+}
+
/* Passing level values greater than this will result in search termination */
#define PPTT_ABORT_PACKAGE 0xFF
-static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu,
- int level, int flag)
+static struct acpi_pptt_processor *acpi_find_processor_tag(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu,
+ int level, int flag)
{
struct acpi_pptt_processor *prev_node;
while (cpu && level) {
- if (cpu->flags & flag)
+ /* special case the identical flag to find last identical */
+ if (flag == ACPI_PPTT_ACPI_IDENTICAL) {
+ if (flag_identical(table_hdr, cpu))
+ break;
+ } else if (cpu->flags & flag)
break;
pr_debug("level %d\n", level);
prev_node = fetch_pptt_node(table_hdr, cpu->parent);
@@ -480,8 +503,8 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
if (cpu_node) {
- cpu_node = acpi_find_processor_package_id(table, cpu_node,
- level, flag);
+ cpu_node = acpi_find_processor_tag(table, cpu_node,
+ level, flag);
/*
* As per specification if the processor structure represents
* an actual processor, then ACPI processor ID must be valid.
@@ -660,3 +683,29 @@ int find_acpi_cpu_topology_package(unsigned int cpu)
return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
ACPI_PPTT_PHYSICAL_PACKAGE);
}
+
+/**
+ * find_acpi_cpu_topology_hetero_id() - Get a core architecture tag
+ * @cpu: Kernel logical CPU number
+ *
+ * Determine a unique heterogeneous tag for the given CPU. CPUs with the same
+ * implementation should have matching tags.
+ *
+ * The returned tag can be used to group peers with identical implementation.
+ *
+ * The search terminates when a level is found with the identical implementation
+ * flag set or we reach a root node.
+ *
+ * Due to limitations in the PPTT data structure, there may be rare situations
+ * where two cores in a heterogeneous machine may be identical, but won't have
+ * the same tag.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
+ * Otherwise returns a value which represents a group of identical cores
+ * similar to this CPU.
+ */
+int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
+{
+ return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
+ ACPI_PPTT_ACPI_IDENTICAL);
+}
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 40c8a552a478..4074886b7bc8 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -52,8 +52,9 @@ static const struct fb_var_screeninfo cfag12864bfb_var = {
static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
- return vm_insert_page(vma, vma->vm_start,
- virt_to_page(cfag12864b_buffer));
+ struct page *pages = virt_to_page(cfag12864b_buffer);
+
+ return vm_map_pages_zero(vma, &pages, 1);
}
static struct fb_ops cfag12864bfb_ops = {
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 21393ec3b9a4..9c0bb771751d 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -223,9 +223,9 @@ static const struct backlight_ops ht16k33_bl_ops = {
static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct ht16k33_priv *priv = info->par;
+ struct page *pages = virt_to_page(priv->fbdev.buffer);
- return vm_insert_page(vma, vma->vm_start,
- virt_to_page(priv->fbdev.buffer));
+ return vm_map_pages_zero(vma, &pages, 1);
}
static struct fb_ops ht16k33_fb_ops = {
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index a7359535caf5..8827c60f51e2 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -213,6 +213,8 @@ int __weak cache_setup_acpi(unsigned int cpu)
return -ENOTSUPP;
}
+unsigned int coherency_max_size;
+
static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -251,6 +253,9 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
}
}
+ /* record the maximum cache line size */
+ if (this_leaf->coherency_line_size > coherency_max_size)
+ coherency_max_size = this_leaf->coherency_line_size;
}
return 0;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index aa51756fd4d6..87b410d6e51d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -368,7 +368,7 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
const char *dev_id = dev ? dev_name(dev) : NULL;
struct device_node *np = core->of_node;
- if (np && index >= 0)
+ if (np && (name || index >= 0))
hw = of_clk_get_hw(np, index, name);
/*
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 739f64fdf1e3..206fafd299ea 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -2734,8 +2734,8 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = {
[CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw,
[CLKID_MALI_1] = &g12a_mali_1.hw,
[CLKID_MALI] = &g12a_mali.hw,
- [CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw,
- [CLKID_MPLL_5OM] = &g12a_mpll_50m.hw,
+ [CLKID_MPLL_50M_DIV] = &g12a_mpll_50m_div.hw,
+ [CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
[CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
index 39c41af70804..bcc05cd9882f 100644
--- a/drivers/clk/meson/g12a.h
+++ b/drivers/clk/meson/g12a.h
@@ -166,7 +166,7 @@
#define CLKID_HDMI_DIV 167
#define CLKID_MALI_0_DIV 170
#define CLKID_MALI_1_DIV 173
-#define CLKID_MPLL_5OM_DIV 176
+#define CLKID_MPLL_50M_DIV 176
#define CLKID_SYS_PLL_DIV16_EN 178
#define CLKID_SYS_PLL_DIV16 179
#define CLKID_CPU_CLK_DYN0_SEL 180
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 37cf0f01bb5d..62cd3a7f1f65 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -1761,7 +1761,7 @@ static struct clk_regmap meson8m2_gp_pll = {
},
};
-static const char * const mmeson8b_vpu_0_1_parent_names[] = {
+static const char * const meson8b_vpu_0_1_parent_names[] = {
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
};
@@ -1778,8 +1778,8 @@ static struct clk_regmap meson8b_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = mmeson8b_vpu_0_1_parent_names,
- .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names),
+ .parent_names = meson8b_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1837,8 +1837,8 @@ static struct clk_regmap meson8b_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = mmeson8b_vpu_0_1_parent_names,
- .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names),
+ .parent_names = meson8b_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 8281dfbf38c2..5bed36e12951 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -103,9 +103,9 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
{ STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
0, 0, 0, 0x3C, 1},
{ STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
- 0, 0, 4, 0xB0, 0},
+ 0, 0, 2, 0xB0, 0},
{ STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
- 0, 0, 4, 0xB0, 1},
+ 0, 0, 2, 0xB0, 1},
{ STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2},
{ STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index e1ba62d2b1a0..ac1d27a8c650 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3366,6 +3366,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 },
+ { TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 },
/* This MUST be the last entry. */
{ TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 8e834317c97d..975995eea15c 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -229,6 +229,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
{
struct omap_clkctrl_provider *provider = data;
struct omap_clkctrl_clk *entry;
+ bool found = false;
if (clkspec->args_count != 2)
return ERR_PTR(-EINVAL);
@@ -238,11 +239,13 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
list_for_each_entry(entry, &provider->clocks, node) {
if (entry->reg_offset == clkspec->args[0] &&
- entry->bit_offset == clkspec->args[1])
+ entry->bit_offset == clkspec->args[1]) {
+ found = true;
break;
+ }
}
- if (!entry)
+ if (!found)
return ERR_PTR(-EINVAL);
return entry->clk;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 263bee76ef0d..6b8c4c458e8a 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -718,12 +718,13 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
{
struct jz4780_dma_dev *jzdma = data;
unsigned int nb_channels = jzdma->soc_data->nb_channels;
- uint32_t pending, dmac;
+ unsigned long pending;
+ uint32_t dmac;
int i;
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
- for_each_set_bit(i, (unsigned long *)&pending, nb_channels) {
+ for_each_set_bit(i, &pending, nb_channels) {
if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
pending &= ~BIT(i);
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 99d9f431ae2c..4ec84a633bd3 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -703,7 +703,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
spin_lock_irqsave(&sdma->channel_0_lock, flags);
bd0->mode.command = C0_SETPM;
- bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+ bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
bd0->mode.count = size / 2;
bd0->buffer_addr = buf_phys;
bd0->ext_buffer_addr = address;
@@ -1025,7 +1025,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
context->gReg[7] = sdmac->watermark_level;
bd0->mode.command = C0_SETDM;
- bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+ bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
bd0->mode.count = sizeof(*context) / 4;
bd0->buffer_addr = sdma->context_phys;
bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
@@ -2096,27 +2096,6 @@ static int sdma_probe(struct platform_device *pdev)
if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs);
- if (pdata) {
- ret = sdma_get_firmware(sdma, pdata->fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
- } else {
- /*
- * Because that device tree does not encode ROM script address,
- * the RAM script in firmware is mandatory for device tree
- * probe, otherwise it fails.
- */
- ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
- &fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware name\n");
- else {
- ret = sdma_get_firmware(sdma, fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
- }
- }
-
sdma->dma_device.dev = &pdev->dev;
sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
@@ -2161,6 +2140,33 @@ static int sdma_probe(struct platform_device *pdev)
of_node_put(spba_bus);
}
+ /*
+ * Kick off firmware loading as the very last step:
+ * attempt to load firmware only if we're not on the error path, because
+ * the firmware callback requires a fully functional and allocated sdma
+ * instance.
+ */
+ if (pdata) {
+ ret = sdma_get_firmware(sdma, pdata->fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
+ } else {
+ /*
+ * Because that device tree does not encode ROM script address,
+ * the RAM script in firmware is mandatory for device tree
+ * probe, otherwise it fails.
+ */
+ ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
+ &fw_name);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get firmware name\n");
+ } else {
+ ret = sdma_get_firmware(sdma, fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
+ }
+ }
+
return 0;
err_register:
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 4b43844f6af5..8e90a405939d 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -799,6 +799,9 @@ static u32 process_channel_irqs(struct bam_device *bdev)
/* Number of bytes available to read */
avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
+ if (offset < bchan->head)
+ avail--;
+
list_for_each_entry_safe(async_desc, tmp,
&bchan->desc_list, desc_node) {
/* Not enough data to read */
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index a2384184a7de..b07c17643210 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -47,11 +47,6 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
bgrt->version);
goto out;
}
- if (bgrt->status & 0xfe) {
- pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
- bgrt->status);
- goto out;
- }
if (bgrt->image_type != 0) {
pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
bgrt->image_type);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 16b2137d117c..4b7cf7bc0ded 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -1009,14 +1009,16 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
/* first try to find a slot in an existing linked list entry */
for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
- rsv = __va(prsv);
+ rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
if (index < rsv->size) {
rsv->entry[index].base = addr;
rsv->entry[index].size = size;
+ memunmap(rsv);
return 0;
}
+ memunmap(rsv);
}
/* no slot found - allocate a new linked list entry */
@@ -1024,7 +1026,13 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
if (!rsv)
return -ENOMEM;
- rsv->size = EFI_MEMRESERVE_COUNT(PAGE_SIZE);
+ /*
+ * The memremap() call above assumes that a linux_efi_memreserve entry
+ * never crosses a page boundary, so let's ensure that this remains true
+ * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
+ * using SZ_4K explicitly in the size calculation below.
+ */
+ rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
atomic_set(&rsv->count, 1);
rsv->entry[0].base = addr;
rsv->entry[0].size = size;
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 61e099826cbb..35dccc88ac0a 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -43,11 +43,13 @@ static int efibc_set_variable(const char *name, const char *value)
efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
- ret = efivar_entry_set(entry,
- EFI_VARIABLE_NON_VOLATILE
- | EFI_VARIABLE_BOOTSERVICE_ACCESS
- | EFI_VARIABLE_RUNTIME_ACCESS,
- size, entry->var.Data, NULL);
+ ret = efivar_entry_set_safe(entry->var.VariableName,
+ entry->var.VendorGuid,
+ EFI_VARIABLE_NON_VOLATILE
+ | EFI_VARIABLE_BOOTSERVICE_ACCESS
+ | EFI_VARIABLE_RUNTIME_ACCESS,
+ false, size, entry->var.Data);
+
if (ret)
pr_err("failed to set %s EFI variable: 0x%x\n",
name, ret);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index aec7bd86ae7e..9c9b965d7d6d 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -118,8 +118,15 @@ static void of_gpio_flags_quirks(struct device_node *np,
* Legacy handling of SPI active high chip select. If we have a
* property named "cs-gpios" we need to inspect the child node
* to determine if the flags should have inverted semantics.
+ *
+ * This does not apply to an SPI device named "spi-gpio", because
+ * these have traditionally obtained their own GPIOs by parsing
+ * the device tree directly and did not respect any "spi-cs-high"
+ * property on the SPI bus children.
*/
- if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") &&
+ if (IS_ENABLED(CONFIG_SPI_MASTER) &&
+ !strcmp(propname, "cs-gpios") &&
+ !of_device_is_compatible(np, "spi-gpio") &&
of_property_read_bool(np, "cs-gpios")) {
struct device_node *child;
u32 cs;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index b610e3b30d95..2f18c64d531f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1959,25 +1959,6 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
gfx_v9_0_init_compute_vmid(adev);
-
- mutex_lock(&adev->grbm_idx_mutex);
- /*
- * making sure that the following register writes will be broadcasted
- * to all the shaders
- */
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-
- WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
- (adev->gfx.config.sc_prim_fifo_size_frontend <<
- PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
- (adev->gfx.config.sc_prim_fifo_size_backend <<
- PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
- (adev->gfx.config.sc_hiz_tile_fifo_size <<
- PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
- (adev->gfx.config.sc_earlyz_tile_fifo_size <<
- PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
- mutex_unlock(&adev->grbm_idx_mutex);
-
}
static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index f1d326caf69e..a7e8340baf90 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -326,7 +326,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
if (ret)
return ret;
- ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index ae64ff7153d6..1cd5a8b5cdc1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -916,8 +916,10 @@ static int init_thermal_controller(
PHM_PlatformCaps_ThermalController
);
- if (0 == powerplay_table->usFanTableOffset)
+ if (0 == powerplay_table->usFanTableOffset) {
+ hwmgr->thermal_controller.use_hw_fan_control = 1;
return 0;
+ }
fan_table = (const PPTable_Generic_SubTable_Header *)
(((unsigned long)powerplay_table) +
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index c92999aac07c..eccb26fddbd0 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -694,6 +694,7 @@ struct pp_thermal_controller_info {
uint8_t ucType;
uint8_t ucI2cLine;
uint8_t ucI2cAddress;
+ uint8_t use_hw_fan_control;
struct pp_fan_info fanInfo;
struct pp_advance_fan_control_parameters advanceFanControlParameters;
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 2d4cfe14f72e..29e641c6a5db 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -2092,6 +2092,10 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
+ /* use hardware fan control */
+ if (hwmgr->thermal_controller.use_hw_fan_control)
+ return 0;
+
tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
usPWMMin * duty100;
do_div(tmp64, 10000);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 72d01e873160..5418a1a87b2c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -760,7 +760,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
if (IS_ERR(gpu->cmdbuf_suballoc)) {
dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
ret = PTR_ERR(gpu->cmdbuf_suballoc);
- goto fail;
+ goto destroy_iommu;
}
/* Create buffer: */
@@ -768,7 +768,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
PAGE_SIZE);
if (ret) {
dev_err(gpu->dev, "could not create command buffer\n");
- goto destroy_iommu;
+ goto destroy_suballoc;
}
if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
@@ -800,6 +800,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
free_buffer:
etnaviv_cmdbuf_free(&gpu->buffer);
gpu->buffer.suballoc = NULL;
+destroy_suballoc:
+ etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
+ gpu->cmdbuf_suballoc = NULL;
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 029fd8ec1857..f0d45ccc1aac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1888,12 +1888,12 @@ static int ring_request_alloc(struct i915_request *request)
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- ret = switch_context(request);
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
- /* Unconditionally invalidate GPU caches and TLBs. */
- ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ ret = switch_context(request);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 9cc1d678674f..c436a28d50e4 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -91,14 +91,14 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
ipu_dc_disable(ipu);
ipu_prg_disable(ipu);
+ drm_crtc_vblank_off(crtc);
+
spin_lock_irq(&crtc->dev->event_lock);
- if (crtc->state->event) {
+ if (crtc->state->event && !crtc->state->active) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
-
- drm_crtc_vblank_off(crtc);
}
static void imx_drm_crtc_reset(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index d11e2281dde6..7e43b25785f7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -63,7 +63,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
return 0;
err_free:
- drm_gem_object_put_unlocked(&shmem->base);
+ drm_gem_handle_delete(file, args->handle);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index e62fe24b1a2e..5bb0f0a084e9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -619,11 +619,11 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
output = vgdev->outputs + scanout;
new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
+ drm_connector_update_edid_property(&output->conn, new_edid);
spin_lock(&vgdev->display_info_lock);
old_edid = output->edid;
output->edid = new_edid;
- drm_connector_update_edid_property(&output->conn, output->edid);
spin_unlock(&vgdev->display_info_lock);
kfree(old_edid);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index eac0c54c5970..b032d3899fa3 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -80,6 +80,7 @@
#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
#define HID_DEVICE_ID_ALPS_U1 0x1215
#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
+#define HID_DEVICE_ID_ALPS_1222 0x1222
#define USB_VENDOR_ID_AMI 0x046b
@@ -269,6 +270,7 @@
#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053
+#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2 0x0939
#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
#define USB_DEVICE_ID_ASUS_AK1D 0x1125
#define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408
@@ -569,6 +571,7 @@
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
+#define USB_DEVICE_ID_HUION_HS64 0x006d
#define USB_VENDOR_ID_IBM 0x04b3
#define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index e564bff86515..bfcf2ee58d14 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -30,6 +30,7 @@
#define REPORT_ID_HIDPP_SHORT 0x10
#define REPORT_ID_HIDPP_LONG 0x11
+#define REPORT_ID_HIDPP_VERY_LONG 0x12
#define HIDPP_REPORT_SHORT_LENGTH 7
#define HIDPP_REPORT_LONG_LENGTH 20
@@ -1242,7 +1243,8 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
int ret;
if ((buf[0] == REPORT_ID_HIDPP_SHORT) ||
- (buf[0] == REPORT_ID_HIDPP_LONG)) {
+ (buf[0] == REPORT_ID_HIDPP_LONG) ||
+ (buf[0] == REPORT_ID_HIDPP_VERY_LONG)) {
if (count < 2)
return -EINVAL;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 5df5dd56ecc8..b603c14d043b 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1776,6 +1776,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_ALPS_JP,
HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
+ { .driver_data = MT_CLS_WIN_8_DUAL,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_ALPS_JP,
+ HID_DEVICE_ID_ALPS_1222) },
/* Lenovo X1 TAB Gen 2 */
{ .driver_data = MT_CLS_WIN_8_DUAL,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index e5ca6fe2ca57..671a285724f9 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -42,6 +42,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET },
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index 8fe02d81265d..914fb527ae7a 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -369,6 +369,8 @@ static const struct hid_device_id uclogic_devices[] = {
USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION,
USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HUION,
+ USB_DEVICE_ID_HUION_HS64) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 0187c9f8fc22..273d784fff66 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -977,6 +977,8 @@ int uclogic_params_init(struct uclogic_params *params,
/* FALL THROUGH */
case VID_PID(USB_VENDOR_ID_HUION,
USB_DEVICE_ID_HUION_TABLET):
+ case VID_PID(USB_VENDOR_ID_HUION,
+ USB_DEVICE_ID_HUION_HS64):
case VID_PID(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_HUION_TABLET):
case VID_PID(USB_VENDOR_ID_UCLOGIC,
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index 22ba21457035..aa2dbed30fc3 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -816,9 +816,9 @@ static int load_fw_from_host(struct ishtp_cl_data *client_data)
goto end_err_fw_release;
release_firmware(fw);
- kfree(filename);
dev_info(cl_data_to_dev(client_data), "ISH firmware %s loaded\n",
filename);
+ kfree(filename);
return 0;
end_err_fw_release:
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index c0487b34d2cf..6ba944b40fdb 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -891,7 +891,7 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
*/
static int hid_ishtp_cl_suspend(struct device *device)
{
- struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
@@ -912,7 +912,7 @@ static int hid_ishtp_cl_suspend(struct device *device)
*/
static int hid_ishtp_cl_resume(struct device *device)
{
- struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 794e700d65f7..c47c3328a0f4 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -471,7 +471,6 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
}
ishtp_device_ready = true;
- dev_set_drvdata(&device->dev, device);
return device;
}
@@ -640,6 +639,20 @@ void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device)
EXPORT_SYMBOL(ishtp_get_drvdata);
/**
+ * ishtp_dev_to_cl_device() - get ishtp_cl_device instance from device instance
+ * @device: device instance
+ *
+ * Get ish_cl_device instance which embeds device instance in it.
+ *
+ * Return: pointer to ishtp_cl_device instance
+ */
+struct ishtp_cl_device *ishtp_dev_to_cl_device(struct device *device)
+{
+ return to_ishtp_cl_device(device);
+}
+EXPORT_SYMBOL(ishtp_dev_to_cl_device);
+
+/**
* ishtp_bus_new_client() - Create a new client
* @dev: ISHTP device instance
*
diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c
index c67c961ab6cc..a4c1aacba1ff 100644
--- a/drivers/irqchip/irq-csky-mpintc.c
+++ b/drivers/irqchip/irq-csky-mpintc.c
@@ -89,8 +89,19 @@ static int csky_irq_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;
- /* Enable interrupt destination */
- cpu |= BIT(31);
+ /*
+ * The csky,mpintc could support auto irq deliver, but it only
+ * could deliver external irq to one cpu or all cpus. So it
+ * doesn't support deliver external irq to a group of cpus
+ * with cpu_mask.
+ * SO we only use auto deliver mode when affinity mask_val is
+ * equal to cpu_present_mask.
+ *
+ */
+ if (cpumask_equal(mask_val, cpu_present_mask))
+ cpu = 0;
+ else
+ cpu |= BIT(31);
writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d29b44b677e4..35500801dc2b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -733,32 +733,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
}
static int its_wait_for_range_completion(struct its_node *its,
- struct its_cmd_block *from,
+ u64 prev_idx,
struct its_cmd_block *to)
{
- u64 rd_idx, from_idx, to_idx;
+ u64 rd_idx, to_idx, linear_idx;
u32 count = 1000000; /* 1s! */
- from_idx = its_cmd_ptr_to_offset(its, from);
+ /* Linearize to_idx if the command set has wrapped around */
to_idx = its_cmd_ptr_to_offset(its, to);
+ if (to_idx < prev_idx)
+ to_idx += ITS_CMD_QUEUE_SZ;
+
+ linear_idx = prev_idx;
while (1) {
+ s64 delta;
+
rd_idx = readl_relaxed(its->base + GITS_CREADR);
- /* Direct case */
- if (from_idx < to_idx && rd_idx >= to_idx)
- break;
+ /*
+ * Compute the read pointer progress, taking the
+ * potential wrap-around into account.
+ */
+ delta = rd_idx - prev_idx;
+ if (rd_idx < prev_idx)
+ delta += ITS_CMD_QUEUE_SZ;
- /* Wrapped case */
- if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
+ linear_idx += delta;
+ if (linear_idx >= to_idx)
break;
count--;
if (!count) {
- pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
- from_idx, to_idx, rd_idx);
+ pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
+ to_idx, linear_idx);
return -1;
}
+ prev_idx = rd_idx;
cpu_relax();
udelay(1);
}
@@ -775,6 +786,7 @@ void name(struct its_node *its, \
struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
synctype *sync_obj; \
unsigned long flags; \
+ u64 rd_idx; \
\
raw_spin_lock_irqsave(&its->lock, flags); \
\
@@ -796,10 +808,11 @@ void name(struct its_node *its, \
} \
\
post: \
+ rd_idx = readl_relaxed(its->base + GITS_CREADR); \
next_cmd = its_post_commands(its); \
raw_spin_unlock_irqrestore(&its->lock, flags); \
\
- if (its_wait_for_range_completion(its, cmd, next_cmd)) \
+ if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 6377cb864f4c..f3e44d6d9255 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -461,8 +461,12 @@ static void gic_deactivate_unhandled(u32 irqnr)
static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
{
+ bool irqs_enabled = interrupts_enabled(regs);
int err;
+ if (irqs_enabled)
+ nmi_enter();
+
if (static_branch_likely(&supports_deactivate_key))
gic_write_eoir(irqnr);
/*
@@ -474,6 +478,9 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
err = handle_domain_nmi(gic_data.domain, irqnr, regs);
if (err)
gic_deactivate_unhandled(irqnr);
+
+ if (irqs_enabled)
+ nmi_exit();
}
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index d32268cc1174..f3985469c221 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -388,7 +388,7 @@ static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
cd = irq_data_get_irq_chip_data(d);
- write_gic_vl_map(intr, cd->map);
+ write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
if (cd->mask)
write_gic_vl_smask(BIT(intr));
}
@@ -517,7 +517,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
- write_gic_vo_map(intr, map);
+ write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
}
spin_unlock_irqrestore(&gic_lock, flags);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 011b60a49e3f..ef4d625d2d80 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -159,9 +159,9 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
parent_fwspec.param[1] = vint_desc->vint_id;
parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
- if (parent_virq <= 0) {
+ if (parent_virq == 0) {
kfree(vint_desc);
- return ERR_PTR(parent_virq);
+ return ERR_PTR(-EINVAL);
}
vint_desc->parent_virq = parent_virq;
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index 352e803f566e..728733a514c7 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -140,8 +140,8 @@ static char __init *dm_parse_table_entry(struct dm_device *dev, char *str)
return ERR_PTR(-EINVAL);
}
/* target_args */
- dev->target_args_array[n] = kstrndup(field[3], GFP_KERNEL,
- DM_MAX_STR_SIZE);
+ dev->target_args_array[n] = kstrndup(field[3], DM_MAX_STR_SIZE,
+ GFP_KERNEL);
if (!dev->target_args_array[n])
return ERR_PTR(-ENOMEM);
@@ -272,10 +272,10 @@ static int __init dm_init_init(void)
return 0;
if (strlen(create) >= DM_MAX_STR_SIZE) {
- DMERR("Argument is too big. Limit is %d\n", DM_MAX_STR_SIZE);
+ DMERR("Argument is too big. Limit is %d", DM_MAX_STR_SIZE);
return -EINVAL;
}
- str = kstrndup(create, GFP_KERNEL, DM_MAX_STR_SIZE);
+ str = kstrndup(create, DM_MAX_STR_SIZE, GFP_KERNEL);
if (!str)
return -ENOMEM;
@@ -283,7 +283,7 @@ static int __init dm_init_init(void)
if (r)
goto out;
- DMINFO("waiting for all devices to be available before creating mapped devices\n");
+ DMINFO("waiting for all devices to be available before creating mapped devices");
wait_for_device_probe();
list_for_each_entry(dev, &devices, list) {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 9ea2b0291f20..e549392e0ea5 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -60,6 +60,7 @@
#define WRITE_LOG_VERSION 1ULL
#define WRITE_LOG_MAGIC 0x6a736677736872ULL
+#define WRITE_LOG_SUPER_SECTOR 0
/*
* The disk format for this is braindead simple.
@@ -115,6 +116,7 @@ struct log_writes_c {
struct list_head logging_blocks;
wait_queue_head_t wait;
struct task_struct *log_kthread;
+ struct completion super_done;
};
struct pending_block {
@@ -180,6 +182,14 @@ static void log_end_io(struct bio *bio)
bio_put(bio);
}
+static void log_end_super(struct bio *bio)
+{
+ struct log_writes_c *lc = bio->bi_private;
+
+ complete(&lc->super_done);
+ log_end_io(bio);
+}
+
/*
* Meant to be called if there is an error, it will free all the pages
* associated with the block.
@@ -215,7 +225,8 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, lc->logdev->bdev);
- bio->bi_end_io = log_end_io;
+ bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
+ log_end_super : log_end_io;
bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -418,11 +429,18 @@ static int log_super(struct log_writes_c *lc)
super.nr_entries = cpu_to_le64(lc->logged_entries);
super.sectorsize = cpu_to_le32(lc->sectorsize);
- if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) {
+ if (write_metadata(lc, &super, sizeof(super), NULL, 0,
+ WRITE_LOG_SUPER_SECTOR)) {
DMERR("Couldn't write super");
return -1;
}
+ /*
+ * Super sector should be writen in-order, otherwise the
+ * nr_entries could be rewritten incorrectly by an old bio.
+ */
+ wait_for_completion_io(&lc->super_done);
+
return 0;
}
@@ -531,6 +549,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_LIST_HEAD(&lc->unflushed_blocks);
INIT_LIST_HEAD(&lc->logging_blocks);
init_waitqueue_head(&lc->wait);
+ init_completion(&lc->super_done);
atomic_set(&lc->io_blocks, 0);
atomic_set(&lc->pending_blocks, 0);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 350cf0451456..ec8b27e20de3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -561,7 +561,7 @@ static char **realloc_argv(unsigned *size, char **old_argv)
gfp = GFP_NOIO;
}
argv = kmalloc_array(new_size, sizeof(*argv), gfp);
- if (argv) {
+ if (argv && old_argv) {
memcpy(argv, old_argv, *size * sizeof(*argv));
*size = new_size;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 720d06531aa3..ea24ff0612e3 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -235,8 +235,8 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
BUG();
}
- DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
- block);
+ DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
+ type_str, block);
if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
DMERR("%s: reached maximum errors", v->data_dev->name);
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index fe8efba2d45f..857991cb3cbb 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -204,12 +204,11 @@ static struct irq_chip stmfx_irq_chip = {
static irqreturn_t stmfx_irq_handler(int irq, void *data)
{
struct stmfx *stmfx = data;
- unsigned long n, pending;
- u32 ack;
- int ret;
+ unsigned long bits;
+ u32 pending, ack;
+ int n, ret;
- ret = regmap_read(stmfx->map, STMFX_REG_IRQ_PENDING,
- (u32 *)&pending);
+ ret = regmap_read(stmfx->map, STMFX_REG_IRQ_PENDING, &pending);
if (ret)
return IRQ_NONE;
@@ -224,7 +223,8 @@ static irqreturn_t stmfx_irq_handler(int irq, void *data)
return IRQ_NONE;
}
- for_each_set_bit(n, &pending, STMFX_REG_IRQ_SRC_MAX)
+ bits = pending;
+ for_each_set_bit(n, &bits, STMFX_REG_IRQ_SRC_MAX)
handle_nested_irq(irq_find_mapping(stmfx->irq_domain, n));
return IRQ_HANDLED;
diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig
index 19a96ce515c1..66b7cffdb0c2 100644
--- a/drivers/mtd/nand/raw/ingenic/Kconfig
+++ b/drivers/mtd/nand/raw/ingenic/Kconfig
@@ -16,7 +16,7 @@ config MTD_NAND_JZ4780
if MTD_NAND_JZ4780
config MTD_NAND_INGENIC_ECC
- tristate
+ bool
config MTD_NAND_JZ4740_ECC
tristate "Hardware BCH support for JZ4740 SoC"
diff --git a/drivers/mtd/nand/raw/ingenic/Makefile b/drivers/mtd/nand/raw/ingenic/Makefile
index 1ac4f455baea..b63d36889263 100644
--- a/drivers/mtd/nand/raw/ingenic/Makefile
+++ b/drivers/mtd/nand/raw/ingenic/Makefile
@@ -2,7 +2,9 @@
obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
obj-$(CONFIG_MTD_NAND_JZ4780) += ingenic_nand.o
-obj-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o
+ingenic_nand-y += ingenic_nand_drv.o
+ingenic_nand-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o
+
obj-$(CONFIG_MTD_NAND_JZ4740_ECC) += jz4740_ecc.o
obj-$(CONFIG_MTD_NAND_JZ4725B_BCH) += jz4725b_bch.o
obj-$(CONFIG_MTD_NAND_JZ4780_BCH) += jz4780_bch.o
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index d3e085c5685a..c954189606f6 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -30,7 +30,6 @@ int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
{
return ecc->ops->calculate(ecc, params, buf, ecc_code);
}
-EXPORT_SYMBOL(ingenic_ecc_calculate);
/**
* ingenic_ecc_correct() - detect and correct bit errors
@@ -51,7 +50,6 @@ int ingenic_ecc_correct(struct ingenic_ecc *ecc,
{
return ecc->ops->correct(ecc, params, buf, ecc_code);
}
-EXPORT_SYMBOL(ingenic_ecc_correct);
/**
* ingenic_ecc_get() - get the ECC controller device
@@ -111,7 +109,6 @@ struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *of_node)
}
return ecc;
}
-EXPORT_SYMBOL(of_ingenic_ecc_get);
/**
* ingenic_ecc_release() - release the ECC controller device
@@ -122,7 +119,6 @@ void ingenic_ecc_release(struct ingenic_ecc *ecc)
clk_disable_unprepare(ecc->clk);
put_device(ecc->dev);
}
-EXPORT_SYMBOL(ingenic_ecc_release);
int ingenic_ecc_probe(struct platform_device *pdev)
{
@@ -159,8 +155,3 @@ int ingenic_ecc_probe(struct platform_device *pdev)
return 0;
}
EXPORT_SYMBOL(ingenic_ecc_probe);
-
-MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
-MODULE_DESCRIPTION("Ingenic ECC common driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index d7b7c0f13909..d7b7c0f13909 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index b5b68aa16eb3..6eb131292eb2 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -4662,7 +4662,6 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
memorg = nanddev_get_memorg(&chip->base);
memorg->planes_per_lun = 1;
memorg->luns_per_target = 1;
- memorg->ntargets = 1;
/*
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
@@ -5027,6 +5026,8 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
if (ret)
return ret;
+ memorg->ntargets = maxchips;
+
/* Read the flash type */
ret = nand_detect(chip, table);
if (ret) {
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index b021a5720b42..89773293c64d 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -51,6 +51,7 @@
#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
#define NFC_REG_SPARE_AREA 0x00A0
#define NFC_REG_PAT_ID 0x00A4
+#define NFC_REG_MDMA_CNT 0x00C4
#define NFC_RAM0_BASE 0x0400
#define NFC_RAM1_BASE 0x0800
@@ -69,6 +70,7 @@
#define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
#define NFC_SAM BIT(12)
#define NFC_RAM_METHOD BIT(14)
+#define NFC_DMA_TYPE_NORMAL BIT(15)
#define NFC_DEBUG_CTL BIT(31)
/* define bit use in NFC_ST */
@@ -205,14 +207,13 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
* NAND Controller capabilities structure: stores NAND controller capabilities
* for distinction between compatible strings.
*
- * @sram_through_ahb: On A23, we choose to access the internal RAM through AHB
- * instead of MBUS (less configuration). A10, A10s, A13 and
- * A20 use the MBUS but no extra configuration is needed.
+ * @extra_mbus_conf: Contrary to A10, A10s and A13, accessing internal RAM
+ * through MBUS on A23/A33 needs extra configuration.
* @reg_io_data: I/O data register
* @dma_maxburst: DMA maxburst
*/
struct sunxi_nfc_caps {
- bool sram_through_ahb;
+ bool extra_mbus_conf;
unsigned int reg_io_data;
unsigned int dma_maxburst;
};
@@ -368,28 +369,12 @@ static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
goto err_unmap_buf;
}
- /*
- * On A23, we suppose the "internal RAM" (p.12 of the NFC user manual)
- * refers to the NAND controller's internal SRAM. This memory is mapped
- * and so is accessible from the AHB. It seems that it can also be
- * accessed by the MBUS. MBUS accesses are mandatory when using the
- * internal DMA instead of the external DMA engine.
- *
- * During DMA I/O operation, either we access this memory from the AHB
- * by clearing the NFC_RAM_METHOD bit, or we set the bit and use the
- * MBUS. In this case, we should also configure the MBUS DMA length
- * NFC_REG_MDMA_CNT(0xC4) to be chunksize * nchunks. NAND I/O over MBUS
- * are also limited to 32kiB pages.
- */
- if (nfc->caps->sram_through_ahb)
- writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
- nfc->regs + NFC_REG_CTL);
- else
- writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
- nfc->regs + NFC_REG_CTL);
-
+ writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
writel(chunksize, nfc->regs + NFC_REG_CNT);
+ if (nfc->caps->extra_mbus_conf)
+ writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT);
dmat = dmaengine_submit(dmad);
@@ -2151,6 +2136,11 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
dmac_cfg.src_maxburst = nfc->caps->dma_maxburst;
dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst;
dmaengine_slave_config(nfc->dmac, &dmac_cfg);
+
+ if (nfc->caps->extra_mbus_conf)
+ writel(readl(nfc->regs + NFC_REG_CTL) |
+ NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL);
+
} else {
dev_warn(dev, "failed to request rxtx DMA channel\n");
}
@@ -2200,7 +2190,7 @@ static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
};
static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = {
- .sram_through_ahb = true,
+ .extra_mbus_conf = true,
.reg_io_data = NFC_REG_A23_IO_DATA,
.dma_maxburst = 8,
};
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index e5586390026a..e6c646007cda 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -180,7 +180,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F4GQ4xA", 0xF4,
- NAND_MEMORG(1, 2048, 64, 64, 4096, 40, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 6502727049a8..21def3f8fb36 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -100,7 +100,7 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB", 0x12,
- NAND_MEMORG(1, 2048, 64, 64, 1024, 40, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -109,7 +109,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF2GE4AB", 0x22,
- NAND_MEMORG(1, 2048, 64, 64, 2048, 20, 2, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 73172d7f512b..0c2ec1c21434 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1636,6 +1636,95 @@ static int sr2_bit7_quad_enable(struct spi_nor *nor)
return 0;
}
+/**
+ * spi_nor_clear_sr_bp() - clear the Status Register Block Protection bits.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Read-modify-write function that clears the Block Protection bits from the
+ * Status Register without affecting other bits.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_clear_sr_bp(struct spi_nor *nor)
+{
+ int ret;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+
+ ret = read_sr(nor);
+ if (ret < 0) {
+ dev_err(nor->dev, "error while reading status register\n");
+ return ret;
+ }
+
+ write_enable(nor);
+
+ ret = write_sr(nor, ret & ~mask);
+ if (ret) {
+ dev_err(nor->dev, "write to status register failed\n");
+ return ret;
+ }
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ dev_err(nor->dev, "timeout while writing status register\n");
+ return ret;
+}
+
+/**
+ * spi_nor_spansion_clear_sr_bp() - clear the Status Register Block Protection
+ * bits on spansion flashes.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Read-modify-write function that clears the Block Protection bits from the
+ * Status Register without affecting other bits. The function is tightly
+ * coupled with the spansion_quad_enable() function. Both assume that the Write
+ * Register with 16 bits, together with the Read Configuration Register (35h)
+ * instructions are supported.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
+{
+ int ret;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 sr_cr[2] = {0};
+
+ /* Check current Quad Enable bit value. */
+ ret = read_cr(nor);
+ if (ret < 0) {
+ dev_err(nor->dev,
+ "error while reading configuration register\n");
+ return ret;
+ }
+
+ /*
+ * When the configuration register Quad Enable bit is one, only the
+ * Write Status (01h) command with two data bytes may be used.
+ */
+ if (ret & CR_QUAD_EN_SPAN) {
+ sr_cr[1] = ret;
+
+ ret = read_sr(nor);
+ if (ret < 0) {
+ dev_err(nor->dev,
+ "error while reading status register\n");
+ return ret;
+ }
+ sr_cr[0] = ret & ~mask;
+
+ ret = write_sr_cr(nor, sr_cr);
+ if (ret)
+ dev_err(nor->dev, "16-bit write register failed\n");
+ return ret;
+ }
+
+ /*
+ * If the Quad Enable bit is zero, use the Write Status (01h) command
+ * with one data byte.
+ */
+ return spi_nor_clear_sr_bp(nor);
+}
+
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
.id = { \
@@ -3660,6 +3749,8 @@ static int spi_nor_init_params(struct spi_nor *nor,
default:
/* Kept only for backward compatibility purpose. */
params->quad_enable = spansion_quad_enable;
+ if (nor->clear_sr_bp)
+ nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
break;
}
@@ -3912,17 +4003,13 @@ static int spi_nor_init(struct spi_nor *nor)
{
int err;
- /*
- * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
- * with the software protection bits set
- */
- if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
- JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
- JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
- nor->info->flags & SPI_NOR_HAS_LOCK) {
- write_enable(nor);
- write_sr(nor, 0);
- spi_nor_wait_till_ready(nor);
+ if (nor->clear_sr_bp) {
+ err = nor->clear_sr_bp(nor);
+ if (err) {
+ dev_err(nor->dev,
+ "fail to clear block protection bits\n");
+ return err;
+ }
}
if (nor->quad_enable) {
@@ -4047,6 +4134,16 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & SPI_S3AN)
nor->flags |= SNOR_F_READY_XSR_RDY;
+ /*
+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+ * with the software protection bits set.
+ */
+ if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
+ nor->info->flags & SPI_NOR_HAS_LOCK)
+ nor->clear_sr_bp = spi_nor_clear_sr_bp;
+
/* Parse the Serial Flash Discoverable Parameters table. */
ret = spi_nor_init_params(nor, &params);
if (ret)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 407f4095a37a..799fc38c5c34 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4320,12 +4320,12 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->features |= NETIF_F_NETNS_LOCAL;
bond_dev->hw_features = BOND_VLAN_FEATURES |
- NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
bond_dev->features |= bond_dev->hw_features;
+ bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
}
/* Destroy a bonding device.
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index f46086fa9064..db91b213eae1 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -436,9 +436,9 @@ int ksz_switch_register(struct ksz_device *dev,
return PTR_ERR(dev->reset_gpio);
if (dev->reset_gpio) {
- gpiod_set_value(dev->reset_gpio, 1);
+ gpiod_set_value_cansleep(dev->reset_gpio, 1);
mdelay(10);
- gpiod_set_value(dev->reset_gpio, 0);
+ gpiod_set_value_cansleep(dev->reset_gpio, 0);
}
mutex_init(&dev->dev_mutex);
@@ -487,7 +487,7 @@ void ksz_switch_remove(struct ksz_device *dev)
dsa_unregister_switch(dev->ds);
if (dev->reset_gpio)
- gpiod_set_value(dev->reset_gpio, 1);
+ gpiod_set_value_cansleep(dev->reset_gpio, 1);
}
EXPORT_SYMBOL(ksz_switch_remove);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 18bc035da850..1fff462a4175 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -843,9 +843,14 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
return err;
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
- if (hweight < AQ_VLAN_MAX_FILTERS)
- err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true);
+ if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) {
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
+ !(aq_nic->packet_filter & IFF_PROMISC));
+ aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
+ } else {
/* otherwise left in promiscue mode */
+ aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
+ }
}
return err;
@@ -866,6 +871,7 @@ int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
return -EOPNOTSUPP;
+ aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
return err;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 0da5e161ec5d..41172fbebddd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -126,6 +126,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
cfg->features = cfg->aq_hw_caps->hw_features;
+ cfg->is_vlan_force_promisc = true;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index eb2e3c7c36f9..0f22f5d5691b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -35,6 +35,7 @@ struct aq_nic_cfg_s {
u32 flow_control;
u32 link_speed_msk;
u32 wol;
+ bool is_vlan_force_promisc;
u16 is_mc_list_enabled;
u16 mc_list_count;
bool is_autoneg;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 1c7593d54035..13ac2661a473 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -778,8 +778,15 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter)
{
unsigned int i = 0U;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+
+ hw_atl_rpfl2promiscuous_mode_en_set(self,
+ IS_FILTER_ENABLED(IFF_PROMISC));
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self,
+ IS_FILTER_ENABLED(IFF_PROMISC) ||
+ cfg->is_vlan_force_promisc);
- hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
hw_atl_rpfl2multicast_flr_en_set(self,
IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
@@ -788,13 +795,13 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
- self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
+ cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
hw_atl_rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled &&
- (i <= self->aq_nic_cfg->mc_list_count)) ?
- 1U : 0U, i);
+ (cfg->is_mc_list_enabled &&
+ (i <= cfg->mc_list_count)) ?
+ 1U : 0U, i);
return aq_hw_err_from_flags(self);
}
@@ -1086,7 +1093,7 @@ static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
{
/* set promisc in case of disabing the vland filter */
- hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
return aq_hw_err_from_flags(self);
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 2375a13bb446..262a28ff81fc 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4180,7 +4180,7 @@ static int macb_probe(struct platform_device *pdev)
if (PTR_ERR(mac) == -EPROBE_DEFER) {
err = -EPROBE_DEFER;
goto err_out_free_netdev;
- } else if (!IS_ERR(mac)) {
+ } else if (!IS_ERR_OR_NULL(mac)) {
ether_addr_copy(bp->dev->dev_addr, mac);
} else {
macb_get_hwaddr(bp);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 8a6785173228..492f8769ac12 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -891,7 +891,7 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
- int status;
+ int status, cnt;
u8 link_status = 0;
if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
@@ -902,6 +902,9 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
+ /* check link status before offline tests */
+ link_status = netif_carrier_ok(netdev);
+
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
@@ -922,13 +925,26 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
test->flags |= ETH_TEST_FL_FAILED;
}
- status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
- if (status) {
- test->flags |= ETH_TEST_FL_FAILED;
- data[4] = -1;
- } else if (!link_status) {
+ /* link status was down prior to test */
+ if (!link_status) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = 1;
+ return;
+ }
+
+ for (cnt = 10; cnt; cnt--) {
+ status = be_cmd_link_status_query(adapter, NULL, &link_status,
+ 0);
+ if (status) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = -1;
+ break;
+ }
+
+ if (link_status)
+ break;
+
+ msleep_interruptible(500);
}
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 67f9bb6e941b..9b036c857b1d 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1057,7 +1057,7 @@ sis900_open(struct net_device *net_dev)
sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
/* Enable all known interrupts by setting the interrupt mask. */
- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
sw32(cr, RxENA | sr32(cr));
sw32(ier, IE);
@@ -1578,7 +1578,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
sw32(txdp, sis_priv->tx_ring_dma);
/* Enable all known interrupts by setting the interrupt mask. */
- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
}
/**
@@ -1618,7 +1618,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
return NETDEV_TX_OK;
}
- sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
+ sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len);
sw32(cr, TxENA | sr32(cr));
sis_priv->cur_tx ++;
@@ -1674,7 +1674,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
do {
status = sr32(isr);
- if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
+ if ((status & (HIBERR|TxURN|TxERR|TxIDLE|TxDESC|RxORN|RxERR|RxOK)) == 0)
/* nothing intresting happened */
break;
handled = 1;
@@ -1684,7 +1684,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
/* Rx interrupt */
sis900_rx(net_dev);
- if (status & (TxURN | TxERR | TxIDLE))
+ if (status & (TxURN | TxERR | TxIDLE | TxDESC))
/* Tx interrupt */
sis900_finish_xmit(net_dev);
@@ -1896,8 +1896,8 @@ static void sis900_finish_xmit (struct net_device *net_dev)
if (tx_status & OWN) {
/* The packet is not transmitted yet (owned by hardware) !
- * Note: the interrupt is generated only when Tx Machine
- * is idle, so this is an almost impossible case */
+ * Note: this is an almost impossible condition
+ * in case of TxDESC ('descriptor interrupt') */
break;
}
@@ -2473,7 +2473,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
/* Enable all known interrupts by setting the interrupt mask. */
- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
sw32(cr, RxENA | sr32(cr));
sw32(ier, IE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 2dcdf761d525..020159622559 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -112,7 +112,7 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
* programmed with (2^32 – <new_sec_value>)
*/
if (gmac4)
- sec = (100000000ULL - sec);
+ sec = -sec;
value = readl(ioaddr + PTP_TCR);
if (value & PTP_TCR_TSCTRLSSR)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 06dd51f47cfd..06358fe5b245 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2947,12 +2947,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* Manage tx mitigation */
tx_q->tx_count_frames += nfrags + 1;
- if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
+ if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
+ !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ stmmac_tx_timer_arm(priv, queue);
+ } else {
+ tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
- tx_q->tx_count_frames = 0;
- } else {
- stmmac_tx_timer_arm(priv, queue);
}
skb_tx_timestamp(skb);
@@ -3166,12 +3169,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* element in case of no SG.
*/
tx_q->tx_count_frames += nfrags + 1;
- if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
+ if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
+ !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ stmmac_tx_timer_arm(priv, queue);
+ } else {
+ tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
- tx_q->tx_count_frames = 0;
- } else {
- stmmac_tx_timer_arm(priv, queue);
}
skb_tx_timestamp(skb);
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index ff61dd8748de..66c8e65f6872 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -63,6 +63,7 @@ MODULE_AUTHOR("Frank Cusack <fcusack@fcusack.com>");
MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
+MODULE_SOFTDEP("pre: arc4");
MODULE_VERSION("1.0.2");
static unsigned int
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b48006e7fa2f..36916bf51ee6 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2128,12 +2128,12 @@ static void team_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
dev->hw_features = TEAM_VLAN_FEATURES |
- NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
dev->features |= dev->hw_features;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
}
static int team_newlink(struct net *src_net, struct net_device *dev,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d080f8048e52..8b4ad10cf940 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1482,7 +1482,7 @@ static int qmi_wwan_probe(struct usb_interface *intf,
* different. Ignore the current interface if the number of endpoints
* equals the number for the diag interface (two).
*/
- info = (void *)&id->driver_info;
+ info = (void *)id->driver_info;
if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
if (desc->bNumEndpoints == 2)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 11b9525dff27..311b0cc6eb98 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -350,8 +350,8 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
+ const struct in6_addr *nexthop;
struct neighbour *neigh;
- struct in6_addr *nexthop;
int ret;
nf_reset(skb);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 98af9ecd4a90..ca3793002e2f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -859,7 +859,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
pci_dev->bus->self->skip_bus_pm = true;
}
- if (pci_dev->skip_bus_pm && !pm_suspend_via_firmware()) {
+ if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) {
dev_dbg(dev, "PCI PM: Skipped\n");
goto Fixup;
}
@@ -914,10 +914,10 @@ static int pci_pm_resume_noirq(struct device *dev)
/*
* In the suspend-to-idle case, devices left in D0 during suspend will
* stay in D0, so it is not necessary to restore or update their
- * configuration here and attempting to put them into D0 again may
- * confuse some firmware, so avoid doing that.
+ * configuration here and attempting to put them into D0 again is
+ * pointless, so avoid doing that.
*/
- if (!pci_dev->skip_bus_pm || pm_suspend_via_firmware())
+ if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform()))
pci_pm_default_resume_early(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index e4221a107dca..09ae8a970880 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -71,6 +71,14 @@ config ARM_DSU_PMU
system, control logic. The PMU allows counting various events related
to DSU.
+config FSL_IMX8_DDR_PMU
+ tristate "Freescale i.MX8 DDR perf monitor"
+ depends on ARCH_MXC
+ help
+ Provides support for the DDR performance monitor in i.MX8, which
+ can give information about memory throughput and other related
+ events.
+
config HISI_PMU
bool "HiSilicon SoC PMU"
depends on ARM64 && ACPI
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 30489941f3d6..2ebb4de17815 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
+obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index d2c2978409d2..acce8781c456 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -71,6 +71,76 @@ static void arm_pmu_acpi_unregister_irq(int cpu)
acpi_unregister_gsi(gsi);
}
+#if IS_ENABLED(CONFIG_ARM_SPE_PMU)
+static struct resource spe_resources[] = {
+ {
+ /* irq */
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device spe_dev = {
+ .name = ARMV8_SPE_PDEV_NAME,
+ .id = -1,
+ .resource = spe_resources,
+ .num_resources = ARRAY_SIZE(spe_resources)
+};
+
+/*
+ * For lack of a better place, hook the normal PMU MADT walk
+ * and create a SPE device if we detect a recent MADT with
+ * a homogeneous PPI mapping.
+ */
+static void arm_spe_acpi_register_device(void)
+{
+ int cpu, hetid, irq, ret;
+ bool first = true;
+ u16 gsi = 0;
+
+ /*
+ * Sanity check all the GICC tables for the same interrupt number.
+ * For now, we only support homogeneous ACPI/SPE machines.
+ */
+ for_each_possible_cpu(cpu) {
+ struct acpi_madt_generic_interrupt *gicc;
+
+ gicc = acpi_cpu_get_madt_gicc(cpu);
+ if (gicc->header.length < ACPI_MADT_GICC_SPE)
+ return;
+
+ if (first) {
+ gsi = gicc->spe_interrupt;
+ if (!gsi)
+ return;
+ hetid = find_acpi_cpu_topology_hetero_id(cpu);
+ first = false;
+ } else if ((gsi != gicc->spe_interrupt) ||
+ (hetid != find_acpi_cpu_topology_hetero_id(cpu))) {
+ pr_warn("ACPI: SPE must be homogeneous\n");
+ return;
+ }
+ }
+
+ irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE,
+ ACPI_ACTIVE_HIGH);
+ if (irq < 0) {
+ pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi);
+ return;
+ }
+
+ spe_resources[0].start = irq;
+ ret = platform_device_register(&spe_dev);
+ if (ret < 0) {
+ pr_warn("ACPI: SPE: Unable to register device\n");
+ acpi_unregister_gsi(gsi);
+ }
+}
+#else
+static inline void arm_spe_acpi_register_device(void)
+{
+}
+#endif /* CONFIG_ARM_SPE_PMU */
+
static int arm_pmu_acpi_parse_irqs(void)
{
int irq, cpu, irq_cpu, err;
@@ -276,6 +346,8 @@ static int arm_pmu_acpi_init(void)
if (acpi_disabled)
return 0;
+ arm_spe_acpi_register_device();
+
ret = arm_pmu_acpi_parse_irqs();
if (ret)
return ret;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 49b490925255..4e4984a55cd1 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -27,6 +27,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/perf_event.h>
+#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/slab.h>
@@ -1157,7 +1158,13 @@ static const struct of_device_id arm_spe_pmu_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match);
-static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev)
+static const struct platform_device_id arm_spe_match[] = {
+ { ARMV8_SPE_PDEV_NAME, 0},
+ { }
+};
+MODULE_DEVICE_TABLE(platform, arm_spe_match);
+
+static int arm_spe_pmu_device_probe(struct platform_device *pdev)
{
int ret;
struct arm_spe_pmu *spe_pmu;
@@ -1217,11 +1224,12 @@ static int arm_spe_pmu_device_remove(struct platform_device *pdev)
}
static struct platform_driver arm_spe_pmu_driver = {
+ .id_table = arm_spe_match,
.driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(arm_spe_pmu_of_match),
},
- .probe = arm_spe_pmu_device_dt_probe,
+ .probe = arm_spe_pmu_device_probe,
.remove = arm_spe_pmu_device_remove,
};
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
new file mode 100644
index 000000000000..63fe21600072
--- /dev/null
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 NXP
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+
+#define COUNTER_CNTL 0x0
+#define COUNTER_READ 0x20
+
+#define COUNTER_DPCR1 0x30
+
+#define CNTL_OVER 0x1
+#define CNTL_CLEAR 0x2
+#define CNTL_EN 0x4
+#define CNTL_EN_MASK 0xFFFFFFFB
+#define CNTL_CLEAR_MASK 0xFFFFFFFD
+#define CNTL_OVER_MASK 0xFFFFFFFE
+
+#define CNTL_CSV_SHIFT 24
+#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
+
+#define EVENT_CYCLES_ID 0
+#define EVENT_CYCLES_COUNTER 0
+#define NUM_COUNTERS 4
+
+#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
+
+#define DDR_PERF_DEV_NAME "imx8_ddr"
+#define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
+
+static DEFINE_IDA(ddr_ida);
+
+static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
+ { .compatible = "fsl,imx8-ddr-pmu",},
+ { .compatible = "fsl,imx8m-ddr-pmu",},
+ { /* sentinel */ }
+};
+
+struct ddr_pmu {
+ struct pmu pmu;
+ void __iomem *base;
+ unsigned int cpu;
+ struct hlist_node node;
+ struct device *dev;
+ struct perf_event *events[NUM_COUNTERS];
+ int active_events;
+ enum cpuhp_state cpuhp_state;
+ int irq;
+ int id;
+};
+
+static ssize_t ddr_perf_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddr_pmu *pmu = dev_get_drvdata(dev);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
+}
+
+static struct device_attribute ddr_perf_cpumask_attr =
+ __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
+
+static struct attribute *ddr_perf_cpumask_attrs[] = {
+ &ddr_perf_cpumask_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ddr_perf_cpumask_attr_group = {
+ .attrs = ddr_perf_cpumask_attrs,
+};
+
+static ssize_t
+ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
+ .id = _id, } \
+ })[0].attr.attr)
+
+static struct attribute *ddr_perf_events_attrs[] = {
+ IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
+ IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
+ IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
+ IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
+ IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
+ IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
+ IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
+ IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
+ IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
+ IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
+ IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
+ IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
+ IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
+ IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
+ IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
+ IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
+ IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
+ IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
+ IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
+ IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
+ IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
+ IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
+ IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
+ IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
+ IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
+ IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
+ IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
+ IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
+ IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
+ IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
+ NULL,
+};
+
+static struct attribute_group ddr_perf_events_attr_group = {
+ .name = "events",
+ .attrs = ddr_perf_events_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+
+static struct attribute *ddr_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group ddr_perf_format_attr_group = {
+ .name = "format",
+ .attrs = ddr_perf_format_attrs,
+};
+
+static const struct attribute_group *attr_groups[] = {
+ &ddr_perf_events_attr_group,
+ &ddr_perf_format_attr_group,
+ &ddr_perf_cpumask_attr_group,
+ NULL,
+};
+
+static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
+{
+ int i;
+
+ /*
+ * Always map cycle event to counter 0
+ * Cycles counter is dedicated for cycle event
+ * can't used for the other events
+ */
+ if (event == EVENT_CYCLES_ID) {
+ if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+ return EVENT_CYCLES_COUNTER;
+ else
+ return -ENOENT;
+ }
+
+ for (i = 1; i < NUM_COUNTERS; i++) {
+ if (pmu->events[i] == NULL)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
+{
+ pmu->events[counter] = NULL;
+}
+
+static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
+{
+ return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
+}
+
+static int ddr_perf_event_init(struct perf_event *event)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_event *sibling;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
+ if (event->cpu < 0) {
+ dev_warn(pmu->dev, "Can't provide per-task data!\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * We must NOT create groups containing mixed PMUs, although software
+ * events are acceptable (for example to create a CCN group
+ * periodically read when a hrtimer aka cpu-clock leader triggers).
+ */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling->pmu != event->pmu &&
+ !is_software_event(sibling))
+ return -EINVAL;
+ }
+
+ event->cpu = pmu->cpu;
+ hwc->idx = -1;
+
+ return 0;
+}
+
+
+static void ddr_perf_event_update(struct perf_event *event)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev_raw_count, new_raw_count;
+ int counter = hwc->idx;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = ddr_perf_read_counter(pmu, counter);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count);
+
+ delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
+
+ local64_add(delta, &event->count);
+}
+
+static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
+ int counter, bool enable)
+{
+ u8 reg = counter * 4 + COUNTER_CNTL;
+ int val;
+
+ if (enable) {
+ /*
+ * must disable first, then enable again
+ * otherwise, cycle counter will not work
+ * if previous state is enabled.
+ */
+ writel(0, pmu->base + reg);
+ val = CNTL_EN | CNTL_CLEAR;
+ val |= FIELD_PREP(CNTL_CSV_MASK, config);
+ writel(val, pmu->base + reg);
+ } else {
+ /* Disable counter */
+ writel(0, pmu->base + reg);
+ }
+}
+
+static void ddr_perf_event_start(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ local64_set(&hwc->prev_count, 0);
+
+ ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
+
+ hwc->state = 0;
+}
+
+static int ddr_perf_event_add(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter;
+ int cfg = event->attr.config;
+
+ counter = ddr_perf_alloc_counter(pmu, cfg);
+ if (counter < 0) {
+ dev_dbg(pmu->dev, "There are not enough counters\n");
+ return -EOPNOTSUPP;
+ }
+
+ pmu->events[counter] = event;
+ pmu->active_events++;
+ hwc->idx = counter;
+
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ ddr_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void ddr_perf_event_stop(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
+ ddr_perf_event_update(event);
+
+ hwc->state |= PERF_HES_STOPPED;
+}
+
+static void ddr_perf_event_del(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ ddr_perf_event_stop(event, PERF_EF_UPDATE);
+
+ ddr_perf_free_counter(pmu, counter);
+ pmu->active_events--;
+ hwc->idx = -1;
+}
+
+static void ddr_perf_pmu_enable(struct pmu *pmu)
+{
+ struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+
+ /* enable cycle counter if cycle is not active event list */
+ if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+ ddr_perf_counter_enable(ddr_pmu,
+ EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER,
+ true);
+}
+
+static void ddr_perf_pmu_disable(struct pmu *pmu)
+{
+ struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+
+ if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+ ddr_perf_counter_enable(ddr_pmu,
+ EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER,
+ false);
+}
+
+static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
+ struct device *dev)
+{
+ *pmu = (struct ddr_pmu) {
+ .pmu = (struct pmu) {
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = attr_groups,
+ .event_init = ddr_perf_event_init,
+ .add = ddr_perf_event_add,
+ .del = ddr_perf_event_del,
+ .start = ddr_perf_event_start,
+ .stop = ddr_perf_event_stop,
+ .read = ddr_perf_event_update,
+ .pmu_enable = ddr_perf_pmu_enable,
+ .pmu_disable = ddr_perf_pmu_disable,
+ },
+ .base = base,
+ .dev = dev,
+ };
+
+ pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
+ return pmu->id;
+}
+
+static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
+{
+ int i;
+ struct ddr_pmu *pmu = (struct ddr_pmu *) p;
+ struct perf_event *event, *cycle_event = NULL;
+
+ /* all counter will stop if cycle counter disabled */
+ ddr_perf_counter_enable(pmu,
+ EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER,
+ false);
+ /*
+ * When the cycle counter overflows, all counters are stopped,
+ * and an IRQ is raised. If any other counter overflows, it
+ * continues counting, and no IRQ is raised.
+ *
+ * Cycles occur at least 4 times as often as other events, so we
+ * can update all events on a cycle counter overflow and not
+ * lose events.
+ *
+ */
+ for (i = 0; i < NUM_COUNTERS; i++) {
+
+ if (!pmu->events[i])
+ continue;
+
+ event = pmu->events[i];
+
+ ddr_perf_event_update(event);
+
+ if (event->hw.idx == EVENT_CYCLES_COUNTER)
+ cycle_event = event;
+ }
+
+ ddr_perf_counter_enable(pmu,
+ EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER,
+ true);
+ if (cycle_event)
+ ddr_perf_event_update(cycle_event);
+
+ return IRQ_HANDLED;
+}
+
+static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
+ int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+
+ WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
+
+ return 0;
+}
+
+static int ddr_perf_probe(struct platform_device *pdev)
+{
+ struct ddr_pmu *pmu;
+ struct device_node *np;
+ void __iomem *base;
+ char *name;
+ int num;
+ int ret;
+ int irq;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ np = pdev->dev.of_node;
+
+ pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ num = ddr_perf_init(pmu, base, &pdev->dev);
+
+ platform_set_drvdata(pdev, pmu);
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
+ num);
+ if (!name)
+ return -ENOMEM;
+
+ pmu->cpu = raw_smp_processor_id();
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ DDR_CPUHP_CB_NAME,
+ NULL,
+ ddr_perf_offline_cpu);
+
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
+ goto ddr_perf_err;
+ }
+
+ pmu->cpuhp_state = ret;
+
+ /* Register the pmu instance for cpu hotplug */
+ cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+
+ /* Request irq */
+ irq = of_irq_get(np, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get irq: %d", irq);
+ ret = irq;
+ goto ddr_perf_err;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq,
+ ddr_perf_irq_handler,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ DDR_CPUHP_CB_NAME,
+ pmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Request irq failed: %d", ret);
+ goto ddr_perf_err;
+ }
+
+ pmu->irq = irq;
+ ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
+ if (ret) {
+ dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
+ goto ddr_perf_err;
+ }
+
+ ret = perf_pmu_register(&pmu->pmu, name, -1);
+ if (ret)
+ goto ddr_perf_err;
+
+ return 0;
+
+ddr_perf_err:
+ if (pmu->cpuhp_state)
+ cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+
+ ida_simple_remove(&ddr_ida, pmu->id);
+ dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
+ return ret;
+}
+
+static int ddr_perf_remove(struct platform_device *pdev)
+{
+ struct ddr_pmu *pmu = platform_get_drvdata(pdev);
+
+ cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ irq_set_affinity_hint(pmu->irq, NULL);
+
+ perf_pmu_unregister(&pmu->pmu);
+
+ ida_simple_remove(&ddr_ida, pmu->id);
+ return 0;
+}
+
+static struct platform_driver imx_ddr_pmu_driver = {
+ .driver = {
+ .name = "imx-ddr-pmu",
+ .of_match_table = imx_ddr_pmu_dt_ids,
+ },
+ .probe = ddr_perf_probe,
+ .remove = ddr_perf_remove,
+};
+
+module_platform_driver(imx_ddr_pmu_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index f464f8cd274b..7e526bcf5e0b 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -113,6 +113,8 @@ static void mtk_eint_mask(struct irq_data *d)
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
eint->regs->mask_set);
+ eint->cur_mask[d->hwirq >> 5] &= ~mask;
+
writel(mask, reg);
}
@@ -123,6 +125,8 @@ static void mtk_eint_unmask(struct irq_data *d)
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
eint->regs->mask_clr);
+ eint->cur_mask[d->hwirq >> 5] |= mask;
+
writel(mask, reg);
if (eint->dual_edge[d->hwirq])
@@ -217,19 +221,6 @@ static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
}
}
-static void mtk_eint_chip_read_mask(const struct mtk_eint *eint,
- void __iomem *base, u32 *buf)
-{
- int port;
- void __iomem *reg;
-
- for (port = 0; port < eint->hw->ports; port++) {
- reg = base + eint->regs->mask + (port << 2);
- buf[port] = ~readl_relaxed(reg);
- /* Mask is 0 when irq is enabled, and 1 when disabled. */
- }
-}
-
static int mtk_eint_irq_request_resources(struct irq_data *d)
{
struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
@@ -318,7 +309,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
struct mtk_eint *eint = irq_desc_get_handler_data(desc);
unsigned int status, eint_num;
- int offset, index, virq;
+ int offset, mask_offset, index, virq;
void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat);
int dual_edge, start_level, curr_level;
@@ -328,10 +319,24 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
status = readl(reg);
while (status) {
offset = __ffs(status);
+ mask_offset = eint_num >> 5;
index = eint_num + offset;
virq = irq_find_mapping(eint->domain, index);
status &= ~BIT(offset);
+ /*
+ * If we get an interrupt on pin that was only required
+ * for wake (but no real interrupt requested), mask the
+ * interrupt (as would mtk_eint_resume do anyway later
+ * in the resume sequence).
+ */
+ if (eint->wake_mask[mask_offset] & BIT(offset) &&
+ !(eint->cur_mask[mask_offset] & BIT(offset))) {
+ writel_relaxed(BIT(offset), reg -
+ eint->regs->stat +
+ eint->regs->mask_set);
+ }
+
dual_edge = eint->dual_edge[index];
if (dual_edge) {
/*
@@ -370,7 +375,6 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
int mtk_eint_do_suspend(struct mtk_eint *eint)
{
- mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask);
mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
return 0;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 568ca96cdb6d..3a235487e38d 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -771,6 +771,10 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
if (ret < 0)
goto fail;
+ ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
+ if (ret < 0)
+ goto fail;
+
mcp->irq_controller =
device_property_read_bool(dev, "interrupt-controller");
if (mcp->irq && mcp->irq_controller) {
@@ -812,10 +816,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
goto fail;
}
- ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
- if (ret < 0)
- goto fail;
-
if (one_regmap_config) {
mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL,
"mcp23xxx-pinctrl.%d", raw_chip_address);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 3b4ca52d2456..fb76fb2e9ea5 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -396,7 +396,7 @@ static int ocelot_pin_function_idx(struct ocelot_pinctrl *info,
return -1;
}
-#define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32)))
+#define REG_ALT(msb, info, p) (OCELOT_GPIO_ALT0 * (info)->stride + 4 * ((msb) + ((info)->stride * ((p) / 32))))
static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int selector, unsigned int group)
@@ -412,19 +412,21 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
/*
* f is encoded on two bits.
- * bit 0 of f goes in BIT(pin) of ALT0, bit 1 of f goes in BIT(pin) of
- * ALT1
+ * bit 0 of f goes in BIT(pin) of ALT[0], bit 1 of f goes in BIT(pin) of
+ * ALT[1]
* This is racy because both registers can't be updated at the same time
* but it doesn't matter much for now.
*/
- regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT0, info, pin->pin),
+ regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
BIT(p), f << p);
- regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT1, info, pin->pin),
+ regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
BIT(p), f << (p - 1));
return 0;
}
+#define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32)))
+
static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int pin, bool input)
@@ -432,7 +434,7 @@ static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev,
struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
unsigned int p = pin % 32;
- regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, p), BIT(p),
+ regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, pin), BIT(p),
input ? 0 : BIT(p));
return 0;
@@ -445,9 +447,9 @@ static int ocelot_gpio_request_enable(struct pinctrl_dev *pctldev,
struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
unsigned int p = offset % 32;
- regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT0, info, offset),
+ regmap_update_bits(info->map, REG_ALT(0, info, offset),
BIT(p), 0);
- regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT1, info, offset),
+ regmap_update_bits(info->map, REG_ALT(1, info, offset),
BIT(p), 0);
return 0;
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 9ac7574e3cfb..a8682f69effc 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -38,7 +38,7 @@ config DASD_PROFILE
depends on DASD
help
Enable this option if you want to see profiling information
- in /proc/dasd/statistics.
+ in /proc/dasd/statistics.
config DASD_ECKD
def_tristate y
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index fab35c6170cc..245f33c2f71e 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -203,7 +203,7 @@ static int __init dasd_feature_list(char *str)
else if (len == 8 && !strncmp(str, "failfast", 8))
features |= DASD_FEATURE_FAILFAST;
else {
- pr_warn("%*s is not a supported device option\n",
+ pr_warn("%.*s is not a supported device option\n",
len, str);
rc = -EINVAL;
}
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index ab0b243a947d..6cc4b19acf85 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -79,27 +79,6 @@ config SCLP_VT220_CONSOLE
Include support for using an IBM SCLP VT220-compatible terminal as a
Linux system console.
-config SCLP_ASYNC
- def_tristate m
- prompt "Support for Call Home via Asynchronous SCLP Records"
- depends on S390
- help
- This option enables the call home function, which is able to inform
- the service element and connected organisations about a kernel panic.
- You should only select this option if you know what you are doing,
- want for inform other people about your kernel panics,
- need this feature and intend to run your kernel in LPAR.
-
-config SCLP_ASYNC_ID
- string "Component ID for Call Home"
- depends on SCLP_ASYNC
- default "000000000"
- help
- The Component ID for Call Home is used to identify the correct
- problem reporting queue the call home records should be sent to.
-
- If your are unsure, please use the default value "000000000".
-
config HMC_DRV
def_tristate m
prompt "Support for file transfers from HMC drive CD/DVD-ROM"
@@ -205,4 +184,3 @@ config S390_VMUR
depends on S390
help
Character device driver for z/VM reader, puncher and printer.
-
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 3072b89785dd..b8a8816d94e7 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_TN3215) += con3215.o
obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
-obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
obj-$(CONFIG_PCI) += sclp_pci.o
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
deleted file mode 100644
index e69b12a40636..000000000000
--- a/drivers/s390/char/sclp_async.c
+++ /dev/null
@@ -1,189 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Enable Asynchronous Notification via SCLP.
- *
- * Copyright IBM Corp. 2009
- * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/kmod.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/proc_fs.h>
-#include <linux/sysctl.h>
-#include <linux/utsname.h>
-#include "sclp.h"
-
-static int callhome_enabled;
-static struct sclp_req *request;
-static struct sclp_async_sccb *sccb;
-static int sclp_async_send_wait(char *message);
-static struct ctl_table_header *callhome_sysctl_header;
-static DEFINE_SPINLOCK(sclp_async_lock);
-#define SCLP_NORMAL_WRITE 0x00
-
-struct async_evbuf {
- struct evbuf_header header;
- u64 reserved;
- u8 rflags;
- u8 empty;
- u8 rtype;
- u8 otype;
- char comp_id[12];
- char data[3000]; /* there is still some space left */
-} __attribute__((packed));
-
-struct sclp_async_sccb {
- struct sccb_header header;
- struct async_evbuf evbuf;
-} __attribute__((packed));
-
-static struct sclp_register sclp_async_register = {
- .send_mask = EVTYP_ASYNC_MASK,
-};
-
-static int call_home_on_panic(struct notifier_block *self,
- unsigned long event, void *data)
-{
- strncat(data, init_utsname()->nodename,
- sizeof(init_utsname()->nodename));
- sclp_async_send_wait(data);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block call_home_panic_nb = {
- .notifier_call = call_home_on_panic,
- .priority = INT_MAX,
-};
-
-static int zero;
-static int one = 1;
-
-static struct ctl_table callhome_table[] = {
- {
- .procname = "callhome",
- .data = &callhome_enabled,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
- },
- {}
-};
-
-static struct ctl_table kern_dir_table[] = {
- {
- .procname = "kernel",
- .maxlen = 0,
- .mode = 0555,
- .child = callhome_table,
- },
- {}
-};
-
-/*
- * Function used to transfer asynchronous notification
- * records which waits for send completion
- */
-static int sclp_async_send_wait(char *message)
-{
- struct async_evbuf *evb;
- int rc;
- unsigned long flags;
-
- if (!callhome_enabled)
- return 0;
- sccb->evbuf.header.type = EVTYP_ASYNC;
- sccb->evbuf.rtype = 0xA5;
- sccb->evbuf.otype = 0x00;
- evb = &sccb->evbuf;
- request->command = SCLP_CMDW_WRITE_EVENT_DATA;
- request->sccb = sccb;
- request->status = SCLP_REQ_FILLED;
- strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
- /*
- * Retain Queue
- * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
- */
- strncpy(sccb->evbuf.comp_id, CONFIG_SCLP_ASYNC_ID,
- sizeof(sccb->evbuf.comp_id));
- sccb->evbuf.header.length = sizeof(sccb->evbuf);
- sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
- sccb->header.function_code = SCLP_NORMAL_WRITE;
- rc = sclp_add_request(request);
- if (rc)
- return rc;
- spin_lock_irqsave(&sclp_async_lock, flags);
- while (request->status != SCLP_REQ_DONE &&
- request->status != SCLP_REQ_FAILED) {
- sclp_sync_wait();
- }
- spin_unlock_irqrestore(&sclp_async_lock, flags);
- if (request->status != SCLP_REQ_DONE)
- return -EIO;
- rc = ((struct sclp_async_sccb *)
- request->sccb)->header.response_code;
- if (rc != 0x0020)
- return -EIO;
- if (evb->header.flags != 0x80)
- return -EIO;
- return rc;
-}
-
-static int __init sclp_async_init(void)
-{
- int rc;
-
- rc = sclp_register(&sclp_async_register);
- if (rc)
- return rc;
- rc = -EOPNOTSUPP;
- if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
- goto out_sclp;
- rc = -ENOMEM;
- callhome_sysctl_header = register_sysctl_table(kern_dir_table);
- if (!callhome_sysctl_header)
- goto out_sclp;
- request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
- sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!request || !sccb)
- goto out_mem;
- rc = atomic_notifier_chain_register(&panic_notifier_list,
- &call_home_panic_nb);
- if (!rc)
- goto out;
-out_mem:
- kfree(request);
- free_page((unsigned long) sccb);
- unregister_sysctl_table(callhome_sysctl_header);
-out_sclp:
- sclp_unregister(&sclp_async_register);
-out:
- return rc;
-}
-module_init(sclp_async_init);
-
-static void __exit sclp_async_exit(void)
-{
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &call_home_panic_nb);
- unregister_sysctl_table(callhome_sysctl_header);
- sclp_unregister(&sclp_async_register);
- free_page((unsigned long) sccb);
- kfree(request);
-}
-module_exit(sclp_async_exit);
-
-MODULE_AUTHOR("Copyright IBM Corp. 2009");
-MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 405a60538630..08f812475f5e 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -4,7 +4,7 @@
* dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
* dump format as s390 standalone dumps.
*
- * For more information please refer to Documentation/s390/zfcpdump.txt
+ * For more information please refer to Documentation/s390/zfcpdump.rst
*
* Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 4534afc63591..427b2e24a8ce 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -16,9 +16,11 @@
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/slab.h>
+#include <linux/dmapool.h>
#include <asm/airq.h>
#include <asm/isc.h>
+#include <asm/cio.h>
#include "cio.h"
#include "cio_debug.h"
@@ -27,7 +29,7 @@
static DEFINE_SPINLOCK(airq_lists_lock);
static struct hlist_head airq_lists[MAX_ISC+1];
-static struct kmem_cache *airq_iv_cache;
+static struct dma_pool *airq_iv_cache;
/**
* register_adapter_interrupt() - register adapter interrupt handler
@@ -115,6 +117,11 @@ void __init init_airq_interrupts(void)
setup_irq(THIN_INTERRUPT, &airq_interrupt);
}
+static inline unsigned long iv_size(unsigned long bits)
+{
+ return BITS_TO_LONGS(bits) * sizeof(unsigned long);
+}
+
/**
* airq_iv_create - create an interrupt vector
* @bits: number of bits in the interrupt vector
@@ -132,17 +139,19 @@ struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
goto out;
iv->bits = bits;
iv->flags = flags;
- size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
+ size = iv_size(bits);
if (flags & AIRQ_IV_CACHELINE) {
- if ((cache_line_size() * BITS_PER_BYTE) < bits)
+ if ((cache_line_size() * BITS_PER_BYTE) < bits
+ || !airq_iv_cache)
goto out_free;
- iv->vector = kmem_cache_zalloc(airq_iv_cache, GFP_KERNEL);
+ iv->vector = dma_pool_zalloc(airq_iv_cache, GFP_KERNEL,
+ &iv->vector_dma);
if (!iv->vector)
goto out_free;
} else {
- iv->vector = kzalloc(size, GFP_KERNEL);
+ iv->vector = cio_dma_zalloc(size);
if (!iv->vector)
goto out_free;
}
@@ -178,10 +187,10 @@ out_free:
kfree(iv->ptr);
kfree(iv->bitlock);
kfree(iv->avail);
- if (iv->flags & AIRQ_IV_CACHELINE)
- kmem_cache_free(airq_iv_cache, iv->vector);
+ if (iv->flags & AIRQ_IV_CACHELINE && iv->vector)
+ dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
else
- kfree(iv->vector);
+ cio_dma_free(iv->vector, size);
kfree(iv);
out:
return NULL;
@@ -198,9 +207,9 @@ void airq_iv_release(struct airq_iv *iv)
kfree(iv->ptr);
kfree(iv->bitlock);
if (iv->flags & AIRQ_IV_CACHELINE)
- kmem_cache_free(airq_iv_cache, iv->vector);
+ dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
else
- kfree(iv->vector);
+ cio_dma_free(iv->vector, iv_size(iv->bits));
kfree(iv->avail);
kfree(iv);
}
@@ -295,12 +304,12 @@ unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
}
EXPORT_SYMBOL(airq_iv_scan);
-static int __init airq_init(void)
+int __init airq_init(void)
{
- airq_iv_cache = kmem_cache_create("airq_iv_cache", cache_line_size(),
- cache_line_size(), 0, NULL);
+ airq_iv_cache = dma_pool_create("airq_iv_cache", cio_get_dma_css_dev(),
+ cache_line_size(),
+ cache_line_size(), PAGE_SIZE);
if (!airq_iv_cache)
return -ENOMEM;
return 0;
}
-subsys_initcall(airq_init);
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 603268a33ea1..73582a0a2622 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -63,7 +63,7 @@ static void ccwreq_stop(struct ccw_device *cdev, int rc)
return;
req->done = 1;
ccw_device_set_timeout(cdev, 0);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
if (rc && rc != -ENODEV && req->drc)
rc = req->drc;
req->callback(cdev, req->data, rc);
@@ -86,7 +86,7 @@ static void ccwreq_do(struct ccw_device *cdev)
continue;
}
/* Perform start function. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
rc = cio_start(sch, cp, (u8) req->mask);
if (rc == 0) {
/* I/O started successfully. */
@@ -169,7 +169,7 @@ int ccw_request_cancel(struct ccw_device *cdev)
*/
static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
{
- struct irb *irb = &cdev->private->irb;
+ struct irb *irb = &cdev->private->dma_area->irb;
struct cmd_scsw *scsw = &irb->scsw.cmd;
enum uc_todo todo;
@@ -187,7 +187,8 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
CIO_TRACE_EVENT(2, "sensedata");
CIO_HEX_EVENT(2, &cdev->private->dev_id,
sizeof(struct ccw_dev_id));
- CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
+ CIO_HEX_EVENT(2, &cdev->private->dma_area->irb.ecw,
+ SENSE_MAX_COUNT);
/* Check for command reject. */
if (irb->ecw[0] & SNS0_CMD_REJECT)
return IO_REJECTED;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a835b31aad99..6392a1b95b02 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -323,36 +323,6 @@ struct chsc_sei {
} __packed __aligned(PAGE_SIZE);
/*
- * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
- */
-
-#define ND_VALIDITY_VALID 0
-#define ND_VALIDITY_OUTDATED 1
-#define ND_VALIDITY_INVALID 2
-
-struct node_descriptor {
- /* Flags. */
- union {
- struct {
- u32 validity:3;
- u32 reserved:5;
- } __packed;
- u8 byte0;
- } __packed;
-
- /* Node parameters. */
- u32 params:24;
-
- /* Node ID. */
- char type[6];
- char model[3];
- char manufacturer[3];
- char plant[2];
- char seq[12];
- u16 tag;
-} __packed;
-
-/*
* Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
*/
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 06a91743335a..ba7d2480613b 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -113,6 +113,7 @@ struct subchannel {
enum sch_todo todo;
struct work_struct todo_work;
struct schib_config config;
+ char *driver_override; /* Driver name to force a match */
} __attribute__ ((aligned(8)));
DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
@@ -135,6 +136,8 @@ extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
+extern int __init airq_init(void);
+
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index aea502922646..e1f2d0eed544 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -20,6 +20,8 @@
#include <linux/reboot.h>
#include <linux/suspend.h>
#include <linux/proc_fs.h>
+#include <linux/genalloc.h>
+#include <linux/dma-mapping.h>
#include <asm/isc.h>
#include <asm/crw.h>
@@ -165,6 +167,7 @@ static void css_subchannel_release(struct device *dev)
sch->config.intparm = 0;
cio_commit_config(sch);
+ kfree(sch->driver_override);
kfree(sch->lock);
kfree(sch);
}
@@ -224,6 +227,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
INIT_WORK(&sch->todo_work, css_sch_todo);
sch->dev.release = &css_subchannel_release;
device_initialize(&sch->dev);
+ /*
+ * The physical addresses of some the dma structures that can
+ * belong to a subchannel need to fit 31 bit width (e.g. ccw).
+ */
+ sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
+ sch->dev.dma_mask = &sch->dev.coherent_dma_mask;
return sch;
err:
@@ -315,9 +324,57 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(modalias);
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ char *driver_override, *old, *cp;
+
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ device_lock(dev);
+ old = sch->driver_override;
+ if (strlen(driver_override)) {
+ sch->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ sch->driver_override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
+ device_unlock(dev);
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
static struct attribute *subch_attrs[] = {
&dev_attr_type.attr,
&dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
NULL,
};
@@ -899,6 +956,13 @@ static int __init setup_css(int nr)
dev_set_name(&css->device, "css%x", nr);
css->device.groups = cssdev_attr_groups;
css->device.release = channel_subsystem_release;
+ /*
+ * We currently allocate notifier bits with this (using
+ * css->device as the device argument with the DMA API)
+ * and are fine with 64 bit addresses.
+ */
+ css->device.coherent_dma_mask = DMA_BIT_MASK(64);
+ css->device.dma_mask = &css->device.coherent_dma_mask;
mutex_init(&css->mutex);
css->cssid = chsc_get_cssid(nr);
@@ -1018,6 +1082,111 @@ static struct notifier_block css_power_notifier = {
.notifier_call = css_power_event,
};
+#define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
+static struct gen_pool *cio_dma_pool;
+
+/* Currently cio supports only a single css */
+struct device *cio_get_dma_css_dev(void)
+{
+ return &channel_subsystems[0]->device;
+}
+
+struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
+{
+ struct gen_pool *gp_dma;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ int i;
+
+ gp_dma = gen_pool_create(3, -1);
+ if (!gp_dma)
+ return NULL;
+ for (i = 0; i < nr_pages; ++i) {
+ cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
+ CIO_DMA_GFP);
+ if (!cpu_addr)
+ return gp_dma;
+ gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
+ dma_addr, PAGE_SIZE, -1);
+ }
+ return gp_dma;
+}
+
+static void __gp_dma_free_dma(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk, void *data)
+{
+ size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
+
+ dma_free_coherent((struct device *) data, chunk_size,
+ (void *) chunk->start_addr,
+ (dma_addr_t) chunk->phys_addr);
+}
+
+void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
+{
+ if (!gp_dma)
+ return;
+ /* this is quite ugly but no better idea */
+ gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
+ gen_pool_destroy(gp_dma);
+}
+
+static int cio_dma_pool_init(void)
+{
+ /* No need to free up the resources: compiled in */
+ cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
+ if (!cio_dma_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ dma_addr_t dma_addr;
+ unsigned long addr;
+ size_t chunk_size;
+
+ if (!gp_dma)
+ return NULL;
+ addr = gen_pool_alloc(gp_dma, size);
+ while (!addr) {
+ chunk_size = round_up(size, PAGE_SIZE);
+ addr = (unsigned long) dma_alloc_coherent(dma_dev,
+ chunk_size, &dma_addr, CIO_DMA_GFP);
+ if (!addr)
+ return NULL;
+ gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
+ addr = gen_pool_alloc(gp_dma, size);
+ }
+ return (void *) addr;
+}
+
+void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
+{
+ if (!cpu_addr)
+ return;
+ memset(cpu_addr, 0, size);
+ gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
+}
+
+/*
+ * Allocate dma memory from the css global pool. Intended for memory not
+ * specific to any single device within the css. The allocated memory
+ * is not guaranteed to be 31-bit addressable.
+ *
+ * Caution: Not suitable for early stuff like console.
+ */
+void *cio_dma_zalloc(size_t size)
+{
+ return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
+}
+
+void cio_dma_free(void *cpu_addr, size_t size)
+{
+ cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
+}
+
/*
* Now that the driver core is running, we can setup our channel subsystem.
* The struct subchannel's are created during probing.
@@ -1059,16 +1228,22 @@ static int __init css_bus_init(void)
if (ret)
goto out_unregister;
ret = register_pm_notifier(&css_power_notifier);
- if (ret) {
- unregister_reboot_notifier(&css_reboot_notifier);
- goto out_unregister;
- }
+ if (ret)
+ goto out_unregister_rn;
+ ret = cio_dma_pool_init();
+ if (ret)
+ goto out_unregister_pmn;
+ airq_init();
css_init_done = 1;
/* Enable default isc for I/O subchannels. */
isc_register(IO_SCH_ISC);
return 0;
+out_unregister_pmn:
+ unregister_pm_notifier(&css_power_notifier);
+out_unregister_rn:
+ unregister_reboot_notifier(&css_reboot_notifier);
out_unregister:
while (i-- > 0) {
struct channel_subsystem *css = channel_subsystems[i];
@@ -1222,6 +1397,10 @@ static int css_bus_match(struct device *dev, struct device_driver *drv)
struct css_driver *driver = to_cssdriver(drv);
struct css_device_id *id;
+ /* When driver_override is set, only bind to the matching driver */
+ if (sch->driver_override && strcmp(sch->driver_override, drv->name))
+ return 0;
+
for (id = driver->subchannel_type; id->match_flags; id++) {
if (sch->st == id->type)
return 1;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 1540229a37bb..9985b7484a6b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -24,6 +24,7 @@
#include <linux/timer.h>
#include <linux/kernel_stat.h>
#include <linux/sched/signal.h>
+#include <linux/dma-mapping.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -687,6 +688,9 @@ ccw_device_release(struct device *dev)
struct ccw_device *cdev;
cdev = to_ccwdev(dev);
+ cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
+ sizeof(*cdev->private->dma_area));
+ cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
/* Release reference of parent subchannel. */
put_device(cdev->dev.parent);
kfree(cdev->private);
@@ -696,15 +700,33 @@ ccw_device_release(struct device *dev)
static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
{
struct ccw_device *cdev;
+ struct gen_pool *dma_pool;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
- if (cdev) {
- cdev->private = kzalloc(sizeof(struct ccw_device_private),
- GFP_KERNEL | GFP_DMA);
- if (cdev->private)
- return cdev;
- }
+ if (!cdev)
+ goto err_cdev;
+ cdev->private = kzalloc(sizeof(struct ccw_device_private),
+ GFP_KERNEL | GFP_DMA);
+ if (!cdev->private)
+ goto err_priv;
+ cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
+ cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask;
+ dma_pool = cio_gp_dma_create(&cdev->dev, 1);
+ if (!dma_pool)
+ goto err_dma_pool;
+ cdev->private->dma_pool = dma_pool;
+ cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
+ sizeof(*cdev->private->dma_area));
+ if (!cdev->private->dma_area)
+ goto err_dma_area;
+ return cdev;
+err_dma_area:
+ cio_gp_dma_destroy(dma_pool, &cdev->dev);
+err_dma_pool:
+ kfree(cdev->private);
+err_priv:
kfree(cdev);
+err_cdev:
return ERR_PTR(-ENOMEM);
}
@@ -884,7 +906,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
wake_up(&ccw_device_init_wq);
break;
case DEV_STATE_OFFLINE:
- /*
+ /*
* We can't register the device in interrupt context so
* we schedule a work item.
*/
@@ -1062,6 +1084,14 @@ static int io_subchannel_probe(struct subchannel *sch)
if (!io_priv)
goto out_schedule;
+ io_priv->dma_area = dma_alloc_coherent(&sch->dev,
+ sizeof(*io_priv->dma_area),
+ &io_priv->dma_area_dma, GFP_KERNEL);
+ if (!io_priv->dma_area) {
+ kfree(io_priv);
+ goto out_schedule;
+ }
+
set_io_private(sch, io_priv);
css_schedule_eval(sch->schid);
return 0;
@@ -1088,6 +1118,8 @@ static int io_subchannel_remove(struct subchannel *sch)
set_io_private(sch, NULL);
spin_unlock_irq(sch->lock);
out_free:
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
kfree(io_priv);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
@@ -1593,13 +1625,19 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
return ERR_CAST(sch);
io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
- if (!io_priv) {
- put_device(&sch->dev);
- return ERR_PTR(-ENOMEM);
- }
+ if (!io_priv)
+ goto err_priv;
+ io_priv->dma_area = dma_alloc_coherent(&sch->dev,
+ sizeof(*io_priv->dma_area),
+ &io_priv->dma_area_dma, GFP_KERNEL);
+ if (!io_priv->dma_area)
+ goto err_dma_area;
set_io_private(sch, io_priv);
cdev = io_subchannel_create_ccwdev(sch);
if (IS_ERR(cdev)) {
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
+ set_io_private(sch, NULL);
put_device(&sch->dev);
kfree(io_priv);
return cdev;
@@ -1607,6 +1645,12 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
cdev->drv = drv;
ccw_device_set_int_class(cdev);
return cdev;
+
+err_dma_area:
+ kfree(io_priv);
+err_priv:
+ put_device(&sch->dev);
+ return ERR_PTR(-ENOMEM);
}
void __init ccw_device_destroy_console(struct ccw_device *cdev)
@@ -1617,6 +1661,8 @@ void __init ccw_device_destroy_console(struct ccw_device *cdev)
set_io_private(sch, NULL);
put_device(&sch->dev);
put_device(&cdev->dev);
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
kfree(io_priv);
}
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 9169af7dbb43..8fc267324ebb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -67,8 +67,10 @@ static void ccw_timeout_log(struct ccw_device *cdev)
sizeof(struct tcw), 0);
} else {
printk(KERN_WARNING "cio: orb indicates command mode\n");
- if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
- (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
+ if ((void *)(addr_t)orb->cmd.cpa ==
+ &private->dma_area->sense_ccw ||
+ (void *)(addr_t)orb->cmd.cpa ==
+ cdev->private->dma_area->iccws)
printk(KERN_WARNING "cio: last channel program "
"(intern):\n");
else
@@ -143,18 +145,22 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
void ccw_device_update_sense_data(struct ccw_device *cdev)
{
memset(&cdev->id, 0, sizeof(cdev->id));
- cdev->id.cu_type = cdev->private->senseid.cu_type;
- cdev->id.cu_model = cdev->private->senseid.cu_model;
- cdev->id.dev_type = cdev->private->senseid.dev_type;
- cdev->id.dev_model = cdev->private->senseid.dev_model;
+ cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type;
+ cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model;
+ cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type;
+ cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model;
}
int ccw_device_test_sense_data(struct ccw_device *cdev)
{
- return cdev->id.cu_type == cdev->private->senseid.cu_type &&
- cdev->id.cu_model == cdev->private->senseid.cu_model &&
- cdev->id.dev_type == cdev->private->senseid.dev_type &&
- cdev->id.dev_model == cdev->private->senseid.dev_model;
+ return cdev->id.cu_type ==
+ cdev->private->dma_area->senseid.cu_type &&
+ cdev->id.cu_model ==
+ cdev->private->dma_area->senseid.cu_model &&
+ cdev->id.dev_type ==
+ cdev->private->dma_area->senseid.dev_type &&
+ cdev->id.dev_model ==
+ cdev->private->dma_area->senseid.dev_model;
}
/*
@@ -342,7 +348,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
cio_disable_subchannel(sch);
/* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
cdev->private->state = state;
@@ -509,13 +515,14 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
- create_fake_irb(&cdev->private->irb,
+ create_fake_irb(&cdev->private->dma_area->irb,
cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
- &cdev->private->irb);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ &cdev->private->dma_area->irb);
+ memset(&cdev->private->dma_area->irb, 0,
+ sizeof(struct irb));
}
ccw_device_report_path_events(cdev);
ccw_device_handle_broken_paths(cdev);
@@ -672,7 +679,8 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
if (scsw_actl(&sch->schib.scsw) != 0 ||
(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
- (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
+ (scsw_stctl(&cdev->private->dma_area->irb.scsw) &
+ SCSW_STCTL_STATUS_PEND)) {
/*
* No final status yet or final status not yet delivered
* to the device driver. Can't do path verification now,
@@ -719,7 +727,7 @@ static int ccw_device_call_handler(struct ccw_device *cdev)
* - fast notification was requested (primary status)
* - unsolicited interrupts
*/
- stctl = scsw_stctl(&cdev->private->irb.scsw);
+ stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw);
ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
(stctl == SCSW_STCTL_STATUS_PEND);
@@ -735,9 +743,9 @@ static int ccw_device_call_handler(struct ccw_device *cdev)
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
- &cdev->private->irb);
+ &cdev->private->dma_area->irb);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
return 1;
}
@@ -759,7 +767,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
/* Unit check but no sense data. Need basic sense. */
if (ccw_device_do_sense(cdev, irb) != 0)
goto call_handler_unsol;
- memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+ memcpy(&cdev->private->dma_area->irb, irb,
+ sizeof(struct irb));
cdev->private->state = DEV_STATE_W4SENSE;
cdev->private->intparm = 0;
return;
@@ -842,7 +851,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
if (scsw_fctl(&irb->scsw) &
(SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
cdev->private->flags.dosense = 0;
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb);
goto call_handler;
}
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index f6df83a9dfbb..740996d0dc8c 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -99,7 +99,7 @@ static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
static int diag210_get_dev_info(struct ccw_device *cdev)
{
struct ccw_dev_id *dev_id = &cdev->private->dev_id;
- struct senseid *senseid = &cdev->private->senseid;
+ struct senseid *senseid = &cdev->private->dma_area->senseid;
struct diag210 diag_data;
int rc;
@@ -134,8 +134,10 @@ err_failed:
static void snsid_init(struct ccw_device *cdev)
{
cdev->private->flags.esid = 0;
- memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
- cdev->private->senseid.cu_type = 0xffff;
+
+ memset(&cdev->private->dma_area->senseid, 0,
+ sizeof(cdev->private->dma_area->senseid));
+ cdev->private->dma_area->senseid.cu_type = 0xffff;
}
/*
@@ -143,16 +145,16 @@ static void snsid_init(struct ccw_device *cdev)
*/
static int snsid_check(struct ccw_device *cdev, void *data)
{
- struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
+ struct cmd_scsw *scsw = &cdev->private->dma_area->irb.scsw.cmd;
int len = sizeof(struct senseid) - scsw->count;
/* Check for incomplete SENSE ID data. */
if (len < SENSE_ID_MIN_LEN)
goto out_restart;
- if (cdev->private->senseid.cu_type == 0xffff)
+ if (cdev->private->dma_area->senseid.cu_type == 0xffff)
goto out_restart;
/* Check for incompatible SENSE ID data. */
- if (cdev->private->senseid.reserved != 0xff)
+ if (cdev->private->dma_area->senseid.reserved != 0xff)
return -EOPNOTSUPP;
/* Check for extended-identification information. */
if (len > SENSE_ID_BASIC_LEN)
@@ -170,7 +172,7 @@ out_restart:
static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
{
struct ccw_dev_id *id = &cdev->private->dev_id;
- struct senseid *senseid = &cdev->private->senseid;
+ struct senseid *senseid = &cdev->private->dma_area->senseid;
int vm = 0;
if (rc && MACHINE_IS_VM) {
@@ -200,7 +202,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
CIO_TRACE_EVENT(4, "snsid");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
@@ -208,7 +210,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev)
snsid_init(cdev);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_ID;
- cp->cda = (u32) (addr_t) &cdev->private->senseid;
+ cp->cda = (u32) (addr_t) &cdev->private->dma_area->senseid;
cp->count = sizeof(struct senseid);
cp->flags = CCW_FLAG_SLI;
/* Request setup. */
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 4435ae0b3027..d722458c5928 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -429,8 +429,8 @@ struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
if (cdev->private->flags.esid == 0)
return NULL;
for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
- if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
- return cdev->private->senseid.ciw + ciw_cnt;
+ if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
+ return cdev->private->dma_area->senseid.ciw + ciw_cnt;
return NULL;
}
@@ -699,6 +699,23 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
}
EXPORT_SYMBOL_GPL(ccw_device_get_schid);
+/*
+ * Allocate zeroed dma coherent 31 bit addressable memory using
+ * the subchannels dma pool. Maximal size of allocation supported
+ * is PAGE_SIZE.
+ */
+void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
+{
+ return cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
+}
+EXPORT_SYMBOL(ccw_device_dma_zalloc);
+
+void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size)
+{
+ cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size);
+}
+EXPORT_SYMBOL(ccw_device_dma_free);
+
EXPORT_SYMBOL(ccw_device_set_options_mask);
EXPORT_SYMBOL(ccw_device_set_options);
EXPORT_SYMBOL(ccw_device_clear_options);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index d30a3babf176..767a85635a0f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -57,7 +57,7 @@ out:
static void nop_build_cp(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
cp->cmd_code = CCW_CMD_NOOP;
cp->cda = 0;
@@ -134,9 +134,9 @@ err:
static void spid_build_cp(struct ccw_device *cdev, u8 fn)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
int i = pathmask_to_pos(req->lpm);
- struct pgid *pgid = &cdev->private->pgid[i];
+ struct pgid *pgid = &cdev->private->dma_area->pgid[i];
pgid->inf.fc = fn;
cp->cmd_code = CCW_CMD_SET_PGID;
@@ -300,7 +300,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
int *mismatch, u8 *reserved, u8 *reset)
{
- struct pgid *pgid = &cdev->private->pgid[0];
+ struct pgid *pgid = &cdev->private->dma_area->pgid[0];
struct pgid *first = NULL;
int lpm;
int i;
@@ -342,7 +342,7 @@ static u8 pgid_to_donepm(struct ccw_device *cdev)
lpm = 0x80 >> i;
if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
- pgid = &cdev->private->pgid[i];
+ pgid = &cdev->private->dma_area->pgid[i];
if (sch->opm & lpm) {
if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
continue;
@@ -368,7 +368,8 @@ static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
int i;
for (i = 0; i < 8; i++)
- memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
+ memcpy(&cdev->private->dma_area->pgid[i], pgid,
+ sizeof(struct pgid));
}
/*
@@ -435,12 +436,12 @@ out:
static void snid_build_cp(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
int i = pathmask_to_pos(req->lpm);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_PGID;
- cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
+ cp->cda = (u32) (addr_t) &cdev->private->dma_area->pgid[i];
cp->count = sizeof(struct pgid);
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
@@ -516,7 +517,8 @@ static void verify_start(struct ccw_device *cdev)
sch->lpm = sch->schib.pmcw.pam;
/* Initialize PGID data. */
- memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+ memset(cdev->private->dma_area->pgid, 0,
+ sizeof(cdev->private->dma_area->pgid));
cdev->private->pgid_valid_mask = 0;
cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
cdev->private->path_notoper_mask = 0;
@@ -626,7 +628,7 @@ struct stlck_data {
static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
cp[0].cmd_code = CCW_CMD_STLCK;
cp[0].cda = (u32) (addr_t) buf1;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 7d5c7892b2c4..0bd8f2642732 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -79,15 +79,15 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
* are condition that have to be met for the extended control
* bit to have meaning. Sick.
*/
- cdev->private->irb.scsw.cmd.ectl = 0;
+ cdev->private->dma_area->irb.scsw.cmd.ectl = 0;
if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
!(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
- cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
+ cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
/* Check if extended control word is valid. */
- if (!cdev->private->irb.scsw.cmd.ectl)
+ if (!cdev->private->dma_area->irb.scsw.cmd.ectl)
return;
/* Copy concurrent sense / model dependent information. */
- memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
+ memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw));
}
/*
@@ -118,7 +118,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
if (!ccw_device_accumulate_esw_valid(irb))
return;
- cdev_irb = &cdev->private->irb;
+ cdev_irb = &cdev->private->dma_area->irb;
/* Copy last path used mask. */
cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
@@ -210,7 +210,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
ccw_device_path_notoper(cdev);
/* No irb accumulation for transport mode irbs. */
if (scsw_is_tm(&irb->scsw)) {
- memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+ memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb));
return;
}
/*
@@ -219,7 +219,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
if (!scsw_is_solicited(&irb->scsw))
return;
- cdev_irb = &cdev->private->irb;
+ cdev_irb = &cdev->private->dma_area->irb;
/*
* If the clear function had been performed, all formerly pending
@@ -227,7 +227,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
* intermediate accumulated status to the device driver.
*/
if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
/* Copy bits which are valid only for the start function. */
if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
@@ -329,9 +329,9 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
/*
* We have ending status but no sense information. Do a basic sense.
*/
- sense_ccw = &to_io_private(sch)->sense_ccw;
+ sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
- sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
+ sense_ccw->cda = (__u32) __pa(cdev->private->dma_area->irb.ecw);
sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI;
@@ -364,7 +364,7 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
- cdev->private->irb.esw.esw0.erw.cons = 1;
+ cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
}
/* Check if path verification is required. */
@@ -386,7 +386,7 @@ ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
/* Check for basic sense. */
if (cdev->private->flags.dosense &&
!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
- cdev->private->irb.esw.esw0.erw.cons = 1;
+ cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
return 0;
}
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 90e4e3a7841b..c03b4a19974e 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -9,15 +9,20 @@
#include "css.h"
#include "orb.h"
+struct io_subchannel_dma_area {
+ struct ccw1 sense_ccw; /* static ccw for sense command */
+};
+
struct io_subchannel_private {
union orb orb; /* operation request block */
- struct ccw1 sense_ccw; /* static ccw for sense command */
struct ccw_device *cdev;/* pointer to the child ccw device */
struct {
unsigned int suspend:1; /* allow suspend */
unsigned int prefetch:1;/* deny prefetch */
unsigned int inter:1; /* suppress intermediate interrupts */
} __packed options;
+ struct io_subchannel_dma_area *dma_area;
+ dma_addr_t dma_area_dma;
} __aligned(8);
#define to_io_private(n) ((struct io_subchannel_private *) \
@@ -115,6 +120,13 @@ enum cdev_todo {
#define FAKE_CMD_IRB 1
#define FAKE_TM_IRB 2
+struct ccw_device_dma_area {
+ struct senseid senseid; /* SenseID info */
+ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
+ struct irb irb; /* device status */
+ struct pgid pgid[8]; /* path group IDs per chpid*/
+};
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
@@ -156,11 +168,7 @@ struct ccw_device_private {
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
- struct irb irb; /* device status */
int async_kill_io_rc;
- struct senseid senseid; /* SenseID info */
- struct pgid pgid[8]; /* path group IDs per chpid*/
- struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
struct work_struct todo_work;
enum cdev_todo todo;
wait_queue_head_t wait_q;
@@ -169,6 +177,8 @@ struct ccw_device_private {
struct list_head cmb_list; /* list of measured devices */
u64 cmb_start_time; /* clock value of cmb reset */
void *cmb_wait; /* deferred cmb enable/disable */
+ struct gen_pool *dma_pool;
+ struct ccw_device_dma_area *dma_area;
enum interruption_class int_class;
};
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 7b7620de2acd..730c4e68094b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -736,6 +736,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
+ case SLSB_P_OUTPUT_PENDING:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
"out empty:%1d %02x", q->nr, count);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 99d7d2566a3a..d4101cecdc8d 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -150,6 +150,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
return -ENOMEM;
}
irq_ptr_qs[i] = q;
+ INIT_LIST_HEAD(&q->entry);
}
return 0;
}
@@ -178,6 +179,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
+ INIT_LIST_HEAD(&q->entry);
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 28d59ac2204c..93ee067c10ca 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -79,7 +79,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
mutex_lock(&tiq_list_lock);
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
- xchg(irq_ptr->dsci, 1 << 7);
}
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
@@ -87,14 +86,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
struct qdio_q *q;
q = irq_ptr->input_qs[0];
- /* if establish triggered an error */
- if (!q || !q->entry.prev || !q->entry.next)
+ if (!q)
return;
mutex_lock(&tiq_list_lock);
list_del_rcu(&q->entry);
mutex_unlock(&tiq_list_lock);
synchronize_rcu();
+ INIT_LIST_HEAD(&q->entry);
}
static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
@@ -178,6 +177,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @airq: pointer to adapter interrupt descriptor
+ * @floating: flag to recognize floating vs. directed interrupts (unused)
*/
static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
{
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 0e79799e9a71..1d4c893ead23 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -16,12 +16,6 @@
#include "vfio_ccw_cp.h"
-/*
- * Max length for ccw chain.
- * XXX: Limit to 256, need to check more?
- */
-#define CCWCHAIN_LEN_MAX 256
-
struct pfn_array {
/* Starting guest physical I/O address. */
unsigned long pa_iova;
@@ -33,11 +27,6 @@ struct pfn_array {
int pa_nr;
};
-struct pfn_array_table {
- struct pfn_array *pat_pa;
- int pat_nr;
-};
-
struct ccwchain {
struct list_head next;
struct ccw1 *ch_ccw;
@@ -46,35 +35,29 @@ struct ccwchain {
/* Count of the valid ccws in chain. */
int ch_len;
/* Pinned PAGEs for the original data. */
- struct pfn_array_table *ch_pat;
+ struct pfn_array *ch_pa;
};
/*
- * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
+ * pfn_array_alloc() - alloc memory for PFNs
* @pa: pfn_array on which to perform the operation
- * @mdev: the mediated device to perform pin/unpin operations
* @iova: target guest physical address
* @len: number of bytes that should be pinned from @iova
*
- * Attempt to allocate memory for PFNs, and pin user pages in memory.
+ * Attempt to allocate memory for PFNs.
*
* Usage of pfn_array:
* We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
* this structure will be filled in by this function.
*
* Returns:
- * Number of pages pinned on success.
- * If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
- * returns -EINVAL.
- * If no pages were pinned, returns -errno.
+ * 0 if PFNs are allocated
+ * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL
+ * -ENOMEM if alloc failed
*/
-static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
- u64 iova, unsigned int len)
+static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
{
- int i, ret = 0;
-
- if (!len)
- return 0;
+ int i;
if (pa->pa_nr || pa->pa_iova_pfn)
return -EINVAL;
@@ -94,8 +77,27 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
- for (i = 1; i < pa->pa_nr; i++)
+ pa->pa_pfn[0] = -1ULL;
+ for (i = 1; i < pa->pa_nr; i++) {
pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
+ pa->pa_pfn[i] = -1ULL;
+ }
+
+ return 0;
+}
+
+/*
+ * pfn_array_pin() - Pin user pages in memory
+ * @pa: pfn_array on which to perform the operation
+ * @mdev: the mediated device to perform pin operations
+ *
+ * Returns number of pages pinned upon success.
+ * If the pin request partially succeeds, or fails completely,
+ * all pages are left unpinned and a negative error value is returned.
+ */
+static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
+{
+ int ret = 0;
ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
@@ -112,8 +114,6 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
err_out:
pa->pa_nr = 0;
- kfree(pa->pa_iova_pfn);
- pa->pa_iova_pfn = NULL;
return ret;
}
@@ -121,60 +121,30 @@ err_out:
/* Unpin the pages before releasing the memory. */
static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
{
- vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+ /* Only unpin if any pages were pinned to begin with */
+ if (pa->pa_nr)
+ vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
pa->pa_nr = 0;
kfree(pa->pa_iova_pfn);
}
-static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
-{
- pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
- if (unlikely(ZERO_OR_NULL_PTR(pat->pat_pa))) {
- pat->pat_nr = 0;
- return -ENOMEM;
- }
-
- pat->pat_nr = nr;
-
- return 0;
-}
-
-static void pfn_array_table_unpin_free(struct pfn_array_table *pat,
- struct device *mdev)
+static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
{
- int i;
-
- for (i = 0; i < pat->pat_nr; i++)
- pfn_array_unpin_free(pat->pat_pa + i, mdev);
-
- if (pat->pat_nr) {
- kfree(pat->pat_pa);
- pat->pat_pa = NULL;
- pat->pat_nr = 0;
- }
-}
-
-static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
- unsigned long iova)
-{
- struct pfn_array *pa = pat->pat_pa;
unsigned long iova_pfn = iova >> PAGE_SHIFT;
- int i, j;
+ int i;
- for (i = 0; i < pat->pat_nr; i++, pa++)
- for (j = 0; j < pa->pa_nr; j++)
- if (pa->pa_iova_pfn[j] == iova_pfn)
- return true;
+ for (i = 0; i < pa->pa_nr; i++)
+ if (pa->pa_iova_pfn[i] == iova_pfn)
+ return true;
return false;
}
-/* Create the list idal words for a pfn_array_table. */
-static inline void pfn_array_table_idal_create_words(
- struct pfn_array_table *pat,
+/* Create the list of IDAL words for a pfn_array. */
+static inline void pfn_array_idal_create_words(
+ struct pfn_array *pa,
unsigned long *idaws)
{
- struct pfn_array *pa;
- int i, j, k;
+ int i;
/*
* Idal words (execept the first one) rely on the memory being 4k
@@ -183,19 +153,36 @@ static inline void pfn_array_table_idal_create_words(
* there will be no problem here to simply use the phys to create an
* idaw.
*/
- k = 0;
- for (i = 0; i < pat->pat_nr; i++) {
- pa = pat->pat_pa + i;
- for (j = 0; j < pa->pa_nr; j++) {
- idaws[k] = pa->pa_pfn[j] << PAGE_SHIFT;
- if (k == 0)
- idaws[k] += pa->pa_iova & (PAGE_SIZE - 1);
- k++;
+
+ for (i = 0; i < pa->pa_nr; i++)
+ idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
+
+ /* Adjust the first IDAW, since it may not start on a page boundary */
+ idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
+}
+
+static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
+{
+ struct ccw0 ccw0;
+ struct ccw1 *pccw1 = source;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ ccw0 = *(struct ccw0 *)pccw1;
+ if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
+ pccw1->cmd_code = CCW_CMD_TIC;
+ pccw1->flags = 0;
+ pccw1->count = 0;
+ } else {
+ pccw1->cmd_code = ccw0.cmd_code;
+ pccw1->flags = ccw0.flags;
+ pccw1->count = ccw0.count;
}
+ pccw1->cda = ccw0.cda;
+ pccw1++;
}
}
-
/*
* Within the domain (@mdev), copy @n bytes from a guest physical
* address (@iova) to a host physical address (@to).
@@ -209,9 +196,15 @@ static long copy_from_iova(struct device *mdev,
int i, ret;
unsigned long l, m;
- ret = pfn_array_alloc_pin(&pa, mdev, iova, n);
- if (ret <= 0)
+ ret = pfn_array_alloc(&pa, iova, n);
+ if (ret < 0)
+ return ret;
+
+ ret = pfn_array_pin(&pa, mdev);
+ if (ret < 0) {
+ pfn_array_unpin_free(&pa, mdev);
return ret;
+ }
l = n;
for (i = 0; i < pa.pa_nr; i++) {
@@ -235,55 +228,60 @@ static long copy_from_iova(struct device *mdev,
return l;
}
-static long copy_ccw_from_iova(struct channel_program *cp,
- struct ccw1 *to, u64 iova,
- unsigned long len)
-{
- struct ccw0 ccw0;
- struct ccw1 *pccw1;
- int ret;
- int i;
-
- ret = copy_from_iova(cp->mdev, to, iova, len * sizeof(struct ccw1));
- if (ret)
- return ret;
-
- if (!cp->orb.cmd.fmt) {
- pccw1 = to;
- for (i = 0; i < len; i++) {
- ccw0 = *(struct ccw0 *)pccw1;
- if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
- pccw1->cmd_code = CCW_CMD_TIC;
- pccw1->flags = 0;
- pccw1->count = 0;
- } else {
- pccw1->cmd_code = ccw0.cmd_code;
- pccw1->flags = ccw0.flags;
- pccw1->count = ccw0.count;
- }
- pccw1->cda = ccw0.cda;
- pccw1++;
- }
- }
-
- return ret;
-}
-
/*
* Helpers to operate ccwchain.
*/
-#define ccw_is_test(_ccw) (((_ccw)->cmd_code & 0x0F) == 0)
+#define ccw_is_read(_ccw) (((_ccw)->cmd_code & 0x03) == 0x02)
+#define ccw_is_read_backward(_ccw) (((_ccw)->cmd_code & 0x0F) == 0x0C)
+#define ccw_is_sense(_ccw) (((_ccw)->cmd_code & 0x0F) == CCW_CMD_BASIC_SENSE)
#define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP)
#define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC)
#define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA)
-
+#define ccw_is_skip(_ccw) ((_ccw)->flags & CCW_FLAG_SKIP)
#define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
/*
+ * ccw_does_data_transfer()
+ *
+ * Determine whether a CCW will move any data, such that the guest pages
+ * would need to be pinned before performing the I/O.
+ *
+ * Returns 1 if yes, 0 if no.
+ */
+static inline int ccw_does_data_transfer(struct ccw1 *ccw)
+{
+ /* If the count field is zero, then no data will be transferred */
+ if (ccw->count == 0)
+ return 0;
+
+ /* If the command is a NOP, then no data will be transferred */
+ if (ccw_is_noop(ccw))
+ return 0;
+
+ /* If the skip flag is off, then data will be transferred */
+ if (!ccw_is_skip(ccw))
+ return 1;
+
+ /*
+ * If the skip flag is on, it is only meaningful if the command
+ * code is a read, read backward, sense, or sense ID. In those
+ * cases, no data will be transferred.
+ */
+ if (ccw_is_read(ccw) || ccw_is_read_backward(ccw))
+ return 0;
+
+ if (ccw_is_sense(ccw))
+ return 0;
+
+ /* The skip flag is on, but it is ignored for this command code. */
+ return 1;
+}
+
+/*
* is_cpa_within_range()
*
* @cpa: channel program address being questioned
@@ -319,7 +317,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
/* Make ccw address aligned to 8. */
size = ((sizeof(*chain) + 7L) & -8L) +
sizeof(*chain->ch_ccw) * len +
- sizeof(*chain->ch_pat) * len;
+ sizeof(*chain->ch_pa) * len;
chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
if (!chain)
return NULL;
@@ -328,7 +326,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
chain->ch_ccw = (struct ccw1 *)data;
data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
- chain->ch_pat = (struct pfn_array_table *)data;
+ chain->ch_pa = (struct pfn_array *)data;
chain->ch_len = len;
@@ -348,31 +346,12 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx)
{
struct ccw1 *ccw = chain->ch_ccw + idx;
- if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw))
- return;
- if (!ccw->count)
+ if (ccw_is_tic(ccw))
return;
kfree((void *)(u64)ccw->cda);
}
-/* Unpin the pages then free the memory resources. */
-static void cp_unpin_free(struct channel_program *cp)
-{
- struct ccwchain *chain, *temp;
- int i;
-
- cp->initialized = false;
- list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
- for (i = 0; i < chain->ch_len; i++) {
- pfn_array_table_unpin_free(chain->ch_pat + i,
- cp->mdev);
- ccwchain_cda_free(chain, i);
- }
- ccwchain_free(chain);
- }
-}
-
/**
* ccwchain_calc_length - calculate the length of the ccw chain.
* @iova: guest physical address of the target ccw chain
@@ -388,25 +367,9 @@ static void cp_unpin_free(struct channel_program *cp)
*/
static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
{
- struct ccw1 *ccw, *p;
- int cnt;
-
- /*
- * Copy current chain from guest to host kernel.
- * Currently the chain length is limited to CCWCHAIN_LEN_MAX (256).
- * So copying 2K is enough (safe).
- */
- p = ccw = kcalloc(CCWCHAIN_LEN_MAX, sizeof(*ccw), GFP_KERNEL);
- if (!ccw)
- return -ENOMEM;
-
- cnt = copy_ccw_from_iova(cp, ccw, iova, CCWCHAIN_LEN_MAX);
- if (cnt) {
- kfree(ccw);
- return cnt;
- }
+ struct ccw1 *ccw = cp->guest_cp;
+ int cnt = 0;
- cnt = 0;
do {
cnt++;
@@ -415,10 +378,8 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
* orb specified one of the unsupported formats, we defer
* checking for IDAWs in unsupported formats to here.
*/
- if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
- kfree(p);
+ if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
return -EOPNOTSUPP;
- }
/*
* We want to keep counting if the current CCW has the
@@ -437,7 +398,6 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
if (cnt == CCWCHAIN_LEN_MAX + 1)
cnt = -EINVAL;
- kfree(p);
return cnt;
}
@@ -458,17 +418,23 @@ static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
static int ccwchain_loop_tic(struct ccwchain *chain,
struct channel_program *cp);
-static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp)
+static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
{
struct ccwchain *chain;
- int len, ret;
+ int len;
- /* May transfer to an existing chain. */
- if (tic_target_chain_exists(tic, cp))
- return 0;
+ /* Copy 2K (the most we support today) of possible CCWs */
+ len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
+ CCWCHAIN_LEN_MAX * sizeof(struct ccw1));
+ if (len)
+ return len;
- /* Get chain length. */
- len = ccwchain_calc_length(tic->cda, cp);
+ /* Convert any Format-0 CCWs to Format-1 */
+ if (!cp->orb.cmd.fmt)
+ convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX);
+
+ /* Count the CCWs in the current chain */
+ len = ccwchain_calc_length(cda, cp);
if (len < 0)
return len;
@@ -476,14 +442,10 @@ static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp)
chain = ccwchain_alloc(cp, len);
if (!chain)
return -ENOMEM;
- chain->ch_iova = tic->cda;
+ chain->ch_iova = cda;
- /* Copy the new chain from user. */
- ret = copy_ccw_from_iova(cp, chain->ch_ccw, tic->cda, len);
- if (ret) {
- ccwchain_free(chain);
- return ret;
- }
+ /* Copy the actual CCWs into the new chain */
+ memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
/* Loop for tics on this new chain. */
return ccwchain_loop_tic(chain, cp);
@@ -501,7 +463,12 @@ static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
if (!ccw_is_tic(tic))
continue;
- ret = ccwchain_handle_tic(tic, cp);
+ /* May transfer to an existing chain. */
+ if (tic_target_chain_exists(tic, cp))
+ continue;
+
+ /* Build a ccwchain for the next segment */
+ ret = ccwchain_handle_ccw(tic->cda, cp);
if (ret)
return ret;
}
@@ -534,115 +501,90 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
struct channel_program *cp)
{
struct ccw1 *ccw;
- struct pfn_array_table *pat;
+ struct pfn_array *pa;
+ u64 iova;
unsigned long *idaws;
int ret;
+ int bytes = 1;
+ int idaw_nr, idal_len;
+ int i;
ccw = chain->ch_ccw + idx;
- if (!ccw->count) {
- /*
- * We just want the translation result of any direct ccw
- * to be an IDA ccw, so let's add the IDA flag for it.
- * Although the flag will be ignored by firmware.
- */
- ccw->flags |= CCW_FLAG_IDA;
- return 0;
- }
-
- /*
- * Pin data page(s) in memory.
- * The number of pages actually is the count of the idaws which will be
- * needed when translating a direct ccw to a idal ccw.
- */
- pat = chain->ch_pat + idx;
- ret = pfn_array_table_init(pat, 1);
- if (ret)
- goto out_init;
-
- ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
- if (ret < 0)
- goto out_unpin;
+ if (ccw->count)
+ bytes = ccw->count;
- /* Translate this direct ccw to a idal ccw. */
- idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
- if (!idaws) {
- ret = -ENOMEM;
- goto out_unpin;
+ /* Calculate size of IDAL */
+ if (ccw_is_idal(ccw)) {
+ /* Read first IDAW to see if it's 4K-aligned or not. */
+ /* All subsequent IDAws will be 4K-aligned. */
+ ret = copy_from_iova(cp->mdev, &iova, ccw->cda, sizeof(iova));
+ if (ret)
+ return ret;
+ } else {
+ iova = ccw->cda;
}
- ccw->cda = (__u32) virt_to_phys(idaws);
- ccw->flags |= CCW_FLAG_IDA;
-
- pfn_array_table_idal_create_words(pat, idaws);
-
- return 0;
-
-out_unpin:
- pfn_array_table_unpin_free(pat, cp->mdev);
-out_init:
- ccw->cda = 0;
- return ret;
-}
-
-static int ccwchain_fetch_idal(struct ccwchain *chain,
- int idx,
- struct channel_program *cp)
-{
- struct ccw1 *ccw;
- struct pfn_array_table *pat;
- unsigned long *idaws;
- u64 idaw_iova;
- unsigned int idaw_nr, idaw_len;
- int i, ret;
-
- ccw = chain->ch_ccw + idx;
-
- if (!ccw->count)
- return 0;
-
- /* Calculate size of idaws. */
- ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova));
- if (ret)
- return ret;
- idaw_nr = idal_nr_words((void *)(idaw_iova), ccw->count);
- idaw_len = idaw_nr * sizeof(*idaws);
-
- /* Pin data page(s) in memory. */
- pat = chain->ch_pat + idx;
- ret = pfn_array_table_init(pat, idaw_nr);
- if (ret)
- goto out_init;
+ idaw_nr = idal_nr_words((void *)iova, bytes);
+ idal_len = idaw_nr * sizeof(*idaws);
- /* Translate idal ccw to use new allocated idaws. */
- idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
+ /* Allocate an IDAL from host storage */
+ idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
if (!idaws) {
ret = -ENOMEM;
- goto out_unpin;
+ goto out_init;
}
- ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idaw_len);
- if (ret)
+ /*
+ * Allocate an array of pfn's for pages to pin/translate.
+ * The number of pages is actually the count of the idaws
+ * required for the data transfer, since we only only support
+ * 4K IDAWs today.
+ */
+ pa = chain->ch_pa + idx;
+ ret = pfn_array_alloc(pa, iova, bytes);
+ if (ret < 0)
goto out_free_idaws;
- ccw->cda = virt_to_phys(idaws);
+ if (ccw_is_idal(ccw)) {
+ /* Copy guest IDAL into host IDAL */
+ ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idal_len);
+ if (ret)
+ goto out_unpin;
- for (i = 0; i < idaw_nr; i++) {
- idaw_iova = *(idaws + i);
+ /*
+ * Copy guest IDAWs into pfn_array, in case the memory they
+ * occupy is not contiguous.
+ */
+ for (i = 0; i < idaw_nr; i++)
+ pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
+ } else {
+ /*
+ * No action is required here; the iova addresses in pfn_array
+ * were initialized sequentially in pfn_array_alloc() beginning
+ * with the contents of ccw->cda.
+ */
+ }
- ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev,
- idaw_iova, 1);
+ if (ccw_does_data_transfer(ccw)) {
+ ret = pfn_array_pin(pa, cp->mdev);
if (ret < 0)
- goto out_free_idaws;
+ goto out_unpin;
+ } else {
+ pa->pa_nr = 0;
}
- pfn_array_table_idal_create_words(pat, idaws);
+ ccw->cda = (__u32) virt_to_phys(idaws);
+ ccw->flags |= CCW_FLAG_IDA;
+
+ /* Populate the IDAL with pinned/translated addresses from pfn */
+ pfn_array_idal_create_words(pa, idaws);
return 0;
+out_unpin:
+ pfn_array_unpin_free(pa, cp->mdev);
out_free_idaws:
kfree(idaws);
-out_unpin:
- pfn_array_table_unpin_free(pat, cp->mdev);
out_init:
ccw->cda = 0;
return ret;
@@ -660,15 +602,9 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
{
struct ccw1 *ccw = chain->ch_ccw + idx;
- if (ccw_is_test(ccw) || ccw_is_noop(ccw))
- return 0;
-
if (ccw_is_tic(ccw))
return ccwchain_fetch_tic(chain, idx, cp);
- if (ccw_is_idal(ccw))
- return ccwchain_fetch_idal(chain, idx, cp);
-
return ccwchain_fetch_direct(chain, idx, cp);
}
@@ -691,9 +627,7 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
*/
int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
{
- u64 iova = orb->cmd.cpa;
- struct ccwchain *chain;
- int len, ret;
+ int ret;
/*
* XXX:
@@ -706,28 +640,11 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
memcpy(&cp->orb, orb, sizeof(*orb));
cp->mdev = mdev;
- /* Get chain length. */
- len = ccwchain_calc_length(iova, cp);
- if (len < 0)
- return len;
-
- /* Alloc mem for the head chain. */
- chain = ccwchain_alloc(cp, len);
- if (!chain)
- return -ENOMEM;
- chain->ch_iova = iova;
-
- /* Copy the head chain from guest. */
- ret = copy_ccw_from_iova(cp, chain->ch_ccw, iova, len);
- if (ret) {
- ccwchain_free(chain);
- return ret;
- }
-
- /* Now loop for its TICs. */
- ret = ccwchain_loop_tic(chain, cp);
+ /* Build a ccwchain for the first CCW segment */
+ ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
if (ret)
- cp_unpin_free(cp);
+ cp_free(cp);
+
/* It is safe to force: if not set but idals used
* ccwchain_calc_length returns an error.
*/
@@ -750,8 +667,20 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
*/
void cp_free(struct channel_program *cp)
{
- if (cp->initialized)
- cp_unpin_free(cp);
+ struct ccwchain *chain, *temp;
+ int i;
+
+ if (!cp->initialized)
+ return;
+
+ cp->initialized = false;
+ list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
+ for (i = 0; i < chain->ch_len; i++) {
+ pfn_array_unpin_free(chain->ch_pa + i, cp->mdev);
+ ccwchain_cda_free(chain, i);
+ }
+ ccwchain_free(chain);
+ }
}
/**
@@ -886,7 +815,11 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
*/
list_for_each_entry(chain, &cp->ccwchain_list, next) {
ccw_head = (u32)(u64)chain->ch_ccw;
- if (is_cpa_within_range(cpa, ccw_head, chain->ch_len)) {
+ /*
+ * On successful execution, cpa points just beyond the end
+ * of the chain.
+ */
+ if (is_cpa_within_range(cpa, ccw_head, chain->ch_len + 1)) {
/*
* (cpa - ccw_head) is the offset value of the host
* physical ccw to its chain head.
@@ -919,8 +852,7 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova)
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
- if (pfn_array_table_iova_pinned(chain->ch_pat + i,
- iova))
+ if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
return true;
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index 3c20cd208da5..7cdc38049033 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -16,6 +16,12 @@
#include "orb.h"
+/*
+ * Max length for ccw chain.
+ * XXX: Limit to 256, need to check more?
+ */
+#define CCWCHAIN_LEN_MAX 256
+
/**
* struct channel_program - manage information for channel program
* @ccwchain_list: list head of ccwchains
@@ -32,6 +38,7 @@ struct channel_program {
union orb orb;
struct device *mdev;
bool initialized;
+ struct ccw1 *guest_cp;
};
extern int cp_init(struct channel_program *cp, struct device *mdev,
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 9125f7f4e64c..2b90a5ecaeb9 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -95,11 +95,11 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
memcpy(private->io_region->irb_area, irb, sizeof(*irb));
mutex_unlock(&private->io_mutex);
- if (private->io_trigger)
- eventfd_signal(private->io_trigger, 1);
-
if (private->mdev && is_final)
private->state = VFIO_CCW_STATE_IDLE;
+
+ if (private->io_trigger)
+ eventfd_signal(private->io_trigger, 1);
}
/*
@@ -129,6 +129,11 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (!private)
return -ENOMEM;
+ private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
+ GFP_KERNEL);
+ if (!private->cp.guest_cp)
+ goto out_free;
+
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA);
if (!private->io_region)
@@ -169,6 +174,7 @@ out_free:
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
if (private->io_region)
kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ kfree(private->cp.guest_cp);
kfree(private);
return ret;
}
@@ -185,6 +191,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ kfree(private->cp.guest_cp);
kfree(private);
return 0;
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 45eb0c14b880..7f418d2d8cdf 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -690,7 +690,7 @@ int pkey_clr2protkey(u32 keytype,
*/
if (!cpacf_test_func(&pckmo_functions, fc)) {
DEBUG_ERR("%s pckmo functions not available\n", __func__);
- return -EOPNOTSUPP;
+ return -ENODEV;
}
/* prepare param block */
@@ -1695,15 +1695,15 @@ static int __init pkey_init(void)
* are able to work with protected keys.
*/
if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
- return -EOPNOTSUPP;
+ return -ENODEV;
/* check for kmc instructions available */
if (!cpacf_query(CPACF_KMC, &kmc_functions))
- return -EOPNOTSUPP;
+ return -ENODEV;
if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256))
- return -EOPNOTSUPP;
+ return -ENODEV;
pkey_debug_init();
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index e9824c35c34f..003662aa8060 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -5,6 +5,7 @@
* Copyright IBM Corp. 2018
*
* Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
*/
#include <linux/module.h>
@@ -40,14 +41,45 @@ static struct ap_device_id ap_queue_ids[] = {
MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
+/**
+ * vfio_ap_queue_dev_probe:
+ *
+ * Allocate a vfio_ap_queue structure and associate it
+ * with the device as driver_data.
+ */
static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
{
+ struct vfio_ap_queue *q;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+ dev_set_drvdata(&apdev->device, q);
+ q->apqn = to_ap_queue(&apdev->device)->qid;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
return 0;
}
+/**
+ * vfio_ap_queue_dev_remove:
+ *
+ * Takes the matrix lock to avoid actions on this device while removing
+ * Free the associated vfio_ap_queue structure
+ */
static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
{
- /* Nothing to do yet */
+ struct vfio_ap_queue *q;
+ int apid, apqi;
+
+ mutex_lock(&matrix_dev->lock);
+ q = dev_get_drvdata(&apdev->device);
+ dev_set_drvdata(&apdev->device, NULL);
+ apid = AP_QID_CARD(q->apqn);
+ apqi = AP_QID_QUEUE(q->apqn);
+ vfio_ap_mdev_reset_queue(apid, apqi, 1);
+ vfio_ap_irq_disable(q);
+ kfree(q);
+ mutex_unlock(&matrix_dev->lock);
}
static void vfio_ap_matrix_dev_release(struct device *dev)
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 900b9cf20ca5..2c9fb1423a39 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -24,6 +24,296 @@
#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
+static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
+
+static int match_apqn(struct device *dev, void *data)
+{
+ struct vfio_ap_queue *q = dev_get_drvdata(dev);
+
+ return (q->apqn == *(int *)(data)) ? 1 : 0;
+}
+
+/**
+ * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list
+ * @matrix_mdev: the associated mediated matrix
+ * @apqn: The queue APQN
+ *
+ * Retrieve a queue with a specific APQN from the list of the
+ * devices of the vfio_ap_drv.
+ * Verify that the APID and the APQI are set in the matrix.
+ *
+ * Returns the pointer to the associated vfio_ap_queue
+ */
+static struct vfio_ap_queue *vfio_ap_get_queue(
+ struct ap_matrix_mdev *matrix_mdev,
+ int apqn)
+{
+ struct vfio_ap_queue *q;
+ struct device *dev;
+
+ if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
+ return NULL;
+ if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
+ return NULL;
+
+ dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
+ &apqn, match_apqn);
+ if (!dev)
+ return NULL;
+ q = dev_get_drvdata(dev);
+ q->matrix_mdev = matrix_mdev;
+ put_device(dev);
+
+ return q;
+}
+
+/**
+ * vfio_ap_wait_for_irqclear
+ * @apqn: The AP Queue number
+ *
+ * Checks the IRQ bit for the status of this APQN using ap_tapq.
+ * Returns if the ap_tapq function succeeded and the bit is clear.
+ * Returns if ap_tapq function failed with invalid, deconfigured or
+ * checkstopped AP.
+ * Otherwise retries up to 5 times after waiting 20ms.
+ *
+ */
+static void vfio_ap_wait_for_irqclear(int apqn)
+{
+ struct ap_queue_status status;
+ int retry = 5;
+
+ do {
+ status = ap_tapq(apqn, NULL);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ if (!status.irq_enabled)
+ return;
+ /* Fall through */
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ default:
+ WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
+ status.response_code, apqn);
+ return;
+ }
+ } while (--retry);
+
+ WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
+ __func__, status.response_code, apqn);
+}
+
+/**
+ * vfio_ap_free_aqic_resources
+ * @q: The vfio_ap_queue
+ *
+ * Unregisters the ISC in the GIB when the saved ISC not invalid.
+ * Unpin the guest's page holding the NIB when it exist.
+ * Reset the saved_pfn and saved_isc to invalid values.
+ * Clear the pointer to the matrix mediated device.
+ *
+ */
+static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
+{
+ if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev)
+ kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
+ if (q->saved_pfn && q->matrix_mdev)
+ vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
+ &q->saved_pfn, 1);
+ q->saved_pfn = 0;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ q->matrix_mdev = NULL;
+}
+
+/**
+ * vfio_ap_irq_disable
+ * @q: The vfio_ap_queue
+ *
+ * Uses ap_aqic to disable the interruption and in case of success, reset
+ * in progress or IRQ disable command already proceeded: calls
+ * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
+ * and calls vfio_ap_free_aqic_resources() to free the resources associated
+ * with the AP interrupt handling.
+ *
+ * In the case the AP is busy, or a reset is in progress,
+ * retries after 20ms, up to 5 times.
+ *
+ * Returns if ap_aqic function failed with invalid, deconfigured or
+ * checkstopped AP.
+ */
+struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
+{
+ struct ap_qirq_ctrl aqic_gisa = {};
+ struct ap_queue_status status;
+ int retries = 5;
+
+ do {
+ status = ap_aqic(q->apqn, aqic_gisa, NULL);
+ switch (status.response_code) {
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ case AP_RESPONSE_NORMAL:
+ vfio_ap_wait_for_irqclear(q->apqn);
+ goto end_free;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_INVALID_ADDRESS:
+ default:
+ /* All cases in default means AP not operational */
+ WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
+ status.response_code);
+ goto end_free;
+ }
+ } while (retries--);
+
+ WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
+ status.response_code);
+end_free:
+ vfio_ap_free_aqic_resources(q);
+ return status;
+}
+
+/**
+ * vfio_ap_setirq: Enable Interruption for a APQN
+ *
+ * @dev: the device associated with the ap_queue
+ * @q: the vfio_ap_queue holding AQIC parameters
+ *
+ * Pin the NIB saved in *q
+ * Register the guest ISC to GIB interface and retrieve the
+ * host ISC to issue the host side PQAP/AQIC
+ *
+ * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the
+ * vfio_pin_pages failed.
+ *
+ * Otherwise return the ap_queue_status returned by the ap_aqic(),
+ * all retry handling will be done by the guest.
+ */
+static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
+ int isc,
+ unsigned long nib)
+{
+ struct ap_qirq_ctrl aqic_gisa = {};
+ struct ap_queue_status status = {};
+ struct kvm_s390_gisa *gisa;
+ struct kvm *kvm;
+ unsigned long h_nib, g_pfn, h_pfn;
+ int ret;
+
+ g_pfn = nib >> PAGE_SHIFT;
+ ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &h_pfn);
+ switch (ret) {
+ case 1:
+ break;
+ default:
+ status.response_code = AP_RESPONSE_INVALID_ADDRESS;
+ return status;
+ }
+
+ kvm = q->matrix_mdev->kvm;
+ gisa = kvm->arch.gisa_int.origin;
+
+ h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
+ aqic_gisa.gisc = isc;
+ aqic_gisa.isc = kvm_s390_gisc_register(kvm, isc);
+ aqic_gisa.ir = 1;
+ aqic_gisa.gisa = (uint64_t)gisa >> 4;
+
+ status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ /* See if we did clear older IRQ configuration */
+ vfio_ap_free_aqic_resources(q);
+ q->saved_pfn = g_pfn;
+ q->saved_isc = isc;
+ break;
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ /* We could not modify IRQ setings: clear new configuration */
+ vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1);
+ kvm_s390_gisc_unregister(kvm, isc);
+ break;
+ default:
+ pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
+ status.response_code);
+ vfio_ap_irq_disable(q);
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * handle_pqap: PQAP instruction callback
+ *
+ * @vcpu: The vcpu on which we received the PQAP instruction
+ *
+ * Get the general register contents to initialize internal variables.
+ * REG[0]: APQN
+ * REG[1]: IR and ISC
+ * REG[2]: NIB
+ *
+ * Response.status may be set to following Response Code:
+ * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
+ * - AP_RESPONSE_DECONFIGURED: if the queue is not configured
+ * - AP_RESPONSE_NORMAL (0) : in case of successs
+ * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
+ * We take the matrix_dev lock to ensure serialization on queues and
+ * mediated device access.
+ *
+ * Return 0 if we could handle the request inside KVM.
+ * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
+ */
+static int handle_pqap(struct kvm_vcpu *vcpu)
+{
+ uint64_t status;
+ uint16_t apqn;
+ struct vfio_ap_queue *q;
+ struct ap_queue_status qstatus = {
+ .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
+ struct ap_matrix_mdev *matrix_mdev;
+
+ /* If we do not use the AIV facility just go to userland */
+ if (!(vcpu->arch.sie_block->eca & ECA_AIV))
+ return -EOPNOTSUPP;
+
+ apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
+ mutex_lock(&matrix_dev->lock);
+
+ if (!vcpu->kvm->arch.crypto.pqap_hook)
+ goto out_unlock;
+ matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
+ struct ap_matrix_mdev, pqap_hook);
+
+ q = vfio_ap_get_queue(matrix_mdev, apqn);
+ if (!q)
+ goto out_unlock;
+
+ status = vcpu->run->s.regs.gprs[1];
+
+ /* If IR bit(16) is set we enable the interrupt */
+ if ((status >> (63 - 16)) & 0x01)
+ qstatus = vfio_ap_irq_enable(q, status & 0x07,
+ vcpu->run->s.regs.gprs[2]);
+ else
+ qstatus = vfio_ap_irq_disable(q);
+
+out_unlock:
+ memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
+ vcpu->run->s.regs.gprs[1] >>= 32;
+ mutex_unlock(&matrix_dev->lock);
+ return 0;
+}
+
static void vfio_ap_matrix_init(struct ap_config_info *info,
struct ap_matrix *matrix)
{
@@ -45,8 +335,11 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
return -ENOMEM;
}
+ matrix_mdev->mdev = mdev;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
mdev_set_drvdata(mdev, matrix_mdev);
+ matrix_mdev->pqap_hook.hook = handle_pqap;
+ matrix_mdev->pqap_hook.owner = THIS_MODULE;
mutex_lock(&matrix_dev->lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
mutex_unlock(&matrix_dev->lock);
@@ -62,6 +355,7 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
return -EBUSY;
mutex_lock(&matrix_dev->lock);
+ vfio_ap_mdev_reset_queues(mdev);
list_del(&matrix_mdev->node);
mutex_unlock(&matrix_dev->lock);
@@ -754,11 +1048,42 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
}
matrix_mdev->kvm = kvm;
+ kvm_get_kvm(kvm);
+ kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
mutex_unlock(&matrix_dev->lock);
return 0;
}
+/*
+ * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback
+ *
+ * @nb: The notifier block
+ * @action: Action to be taken
+ * @data: data associated with the request
+ *
+ * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
+ * pinned before). Other requests are ignored.
+ *
+ */
+static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier);
+
+ if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
+ struct vfio_iommu_type1_dma_unmap *unmap = data;
+ unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
+
+ vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -790,15 +1115,36 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
- unsigned int retry)
+static void vfio_ap_irq_disable_apqn(int apqn)
+{
+ struct device *dev;
+ struct vfio_ap_queue *q;
+
+ dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
+ &apqn, match_apqn);
+ if (dev) {
+ q = dev_get_drvdata(dev);
+ vfio_ap_irq_disable(q);
+ put_device(dev);
+ }
+}
+
+int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+ unsigned int retry)
{
struct ap_queue_status status;
+ int retry2 = 2;
+ int apqn = AP_MKQID(apid, apqi);
do {
- status = ap_zapq(AP_MKQID(apid, apqi));
+ status = ap_zapq(apqn);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
+ while (!status.queue_empty && retry2--) {
+ msleep(20);
+ status = ap_tapq(apqn, NULL);
+ }
+ WARN_ON_ONCE(retry <= 0);
return 0;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
@@ -832,6 +1178,7 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
*/
if (ret)
rc = ret;
+ vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi));
}
}
@@ -858,20 +1205,37 @@ static int vfio_ap_mdev_open(struct mdev_device *mdev)
return ret;
}
- return 0;
+ matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
+ events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &events, &matrix_mdev->iommu_notifier);
+ if (!ret)
+ return ret;
+
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &matrix_mdev->group_notifier);
+ module_put(THIS_MODULE);
+ return ret;
}
static void vfio_ap_mdev_release(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
- if (matrix_mdev->kvm)
+ mutex_lock(&matrix_dev->lock);
+ if (matrix_mdev->kvm) {
kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+ vfio_ap_mdev_reset_queues(mdev);
+ kvm_put_kvm(matrix_mdev->kvm);
+ matrix_mdev->kvm = NULL;
+ }
+ mutex_unlock(&matrix_dev->lock);
- vfio_ap_mdev_reset_queues(mdev);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &matrix_mdev->iommu_notifier);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
&matrix_mdev->group_notifier);
- matrix_mdev->kvm = NULL;
module_put(THIS_MODULE);
}
@@ -900,6 +1264,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
{
int ret;
+ mutex_lock(&matrix_dev->lock);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
ret = vfio_ap_mdev_get_device_info(arg);
@@ -911,6 +1276,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
ret = -EOPNOTSUPP;
break;
}
+ mutex_unlock(&matrix_dev->lock);
return ret;
}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index 76b7f98e47e9..f46dde56b464 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -4,6 +4,7 @@
*
* Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
* Halil Pasic <pasic@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
*
* Copyright IBM Corp. 2018
*/
@@ -16,6 +17,7 @@
#include <linux/mdev.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/kvm_host.h>
#include "ap_bus.h"
@@ -80,10 +82,23 @@ struct ap_matrix_mdev {
struct list_head node;
struct ap_matrix matrix;
struct notifier_block group_notifier;
+ struct notifier_block iommu_notifier;
struct kvm *kvm;
+ struct kvm_s390_module_hook pqap_hook;
+ struct mdev_device *mdev;
};
extern int vfio_ap_mdev_register(void);
extern void vfio_ap_mdev_unregister(void);
+int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+ unsigned int retry);
+struct vfio_ap_queue {
+ struct ap_matrix_mdev *matrix_mdev;
+ unsigned long saved_pfn;
+ int apqn;
+#define VFIO_AP_ISC_INVALID 0xff
+ unsigned char saved_isc;
+};
+struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q);
#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 0cbcc238ef98..12fe9deb265e 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -567,6 +567,10 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
*fcode = payload_hdr->func_val & 0xFFFF;
+ /* enable special processing based on the cprbs flags special bit */
+ if (msg->cprbx.flags & 0x20)
+ ap_msg->special = 1;
+
return 0;
}
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 7c5a25ddf832..ced896d1534a 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -7,10 +7,10 @@ config LCS
prompt "Lan Channel Station Interface"
depends on CCW && NETDEVICES && (ETHERNET || FDDI)
help
- Select this option if you want to use LCS networking on IBM System z.
- This device driver supports FDDI (IEEE 802.7) and Ethernet.
- To compile as a module, choose M. The module name is lcs.
- If you do not know what it is, it's safe to choose Y.
+ Select this option if you want to use LCS networking on IBM System z.
+ This device driver supports FDDI (IEEE 802.7) and Ethernet.
+ To compile as a module, choose M. The module name is lcs.
+ If you do not know what it is, it's safe to choose Y.
config CTCM
def_tristate m
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 6a3076881321..1a55e5942d36 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -46,9 +46,15 @@ struct vq_config_block {
#define VIRTIO_CCW_CONFIG_SIZE 0x100
/* same as PCI config space size, should be enough for all drivers */
+struct vcdev_dma_area {
+ unsigned long indicators;
+ unsigned long indicators2;
+ struct vq_config_block config_block;
+ __u8 status;
+};
+
struct virtio_ccw_device {
struct virtio_device vdev;
- __u8 *status;
__u8 config[VIRTIO_CCW_CONFIG_SIZE];
struct ccw_device *cdev;
__u32 curr_io;
@@ -58,17 +64,24 @@ struct virtio_ccw_device {
spinlock_t lock;
struct mutex io_lock; /* Serializes I/O requests */
struct list_head virtqueues;
- unsigned long indicators;
- unsigned long indicators2;
- struct vq_config_block *config_block;
bool is_thinint;
bool going_away;
bool device_lost;
unsigned int config_ready;
void *airq_info;
- u64 dma_mask;
+ struct vcdev_dma_area *dma_area;
};
+static inline unsigned long *indicators(struct virtio_ccw_device *vcdev)
+{
+ return &vcdev->dma_area->indicators;
+}
+
+static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev)
+{
+ return &vcdev->dma_area->indicators2;
+}
+
struct vq_info_block_legacy {
__u64 queue;
__u32 align;
@@ -127,11 +140,17 @@ static int virtio_ccw_use_airq = 1;
struct airq_info {
rwlock_t lock;
- u8 summary_indicator;
+ u8 summary_indicator_idx;
struct airq_struct airq;
struct airq_iv *aiv;
};
static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
+static u8 *summary_indicators;
+
+static inline u8 *get_summary_indicator(struct airq_info *info)
+{
+ return summary_indicators + info->summary_indicator_idx;
+}
#define CCW_CMD_SET_VQ 0x13
#define CCW_CMD_VDEV_RESET 0x33
@@ -196,7 +215,7 @@ static void virtio_airq_handler(struct airq_struct *airq, bool floating)
break;
vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
}
- info->summary_indicator = 0;
+ *(get_summary_indicator(info)) = 0;
smp_wmb();
/* Walk through indicators field, summary indicator not active. */
for (ai = 0;;) {
@@ -208,7 +227,7 @@ static void virtio_airq_handler(struct airq_struct *airq, bool floating)
read_unlock(&info->lock);
}
-static struct airq_info *new_airq_info(void)
+static struct airq_info *new_airq_info(int index)
{
struct airq_info *info;
int rc;
@@ -217,13 +236,15 @@ static struct airq_info *new_airq_info(void)
if (!info)
return NULL;
rwlock_init(&info->lock);
- info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
+ info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR
+ | AIRQ_IV_CACHELINE);
if (!info->aiv) {
kfree(info);
return NULL;
}
info->airq.handler = virtio_airq_handler;
- info->airq.lsi_ptr = &info->summary_indicator;
+ info->summary_indicator_idx = index;
+ info->airq.lsi_ptr = get_summary_indicator(info);
info->airq.lsi_mask = 0xff;
info->airq.isc = VIRTIO_AIRQ_ISC;
rc = register_adapter_interrupt(&info->airq);
@@ -245,7 +266,7 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
if (!airq_areas[i])
- airq_areas[i] = new_airq_info();
+ airq_areas[i] = new_airq_info(i);
info = airq_areas[i];
if (!info)
return 0;
@@ -326,29 +347,29 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
struct airq_info *airq_info = vcdev->airq_info;
if (vcdev->is_thinint) {
- thinint_area = kzalloc(sizeof(*thinint_area),
- GFP_DMA | GFP_KERNEL);
+ thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*thinint_area));
if (!thinint_area)
return;
thinint_area->summary_indicator =
- (unsigned long) &airq_info->summary_indicator;
+ (unsigned long) get_summary_indicator(airq_info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->count = sizeof(*thinint_area);
ccw->cda = (__u32)(unsigned long) thinint_area;
} else {
/* payload is the address of the indicators */
- indicatorp = kmalloc(sizeof(&vcdev->indicators),
- GFP_DMA | GFP_KERNEL);
+ indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(indicators(vcdev)));
if (!indicatorp)
return;
*indicatorp = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
- ccw->count = sizeof(&vcdev->indicators);
+ ccw->count = sizeof(indicators(vcdev));
ccw->cda = (__u32)(unsigned long) indicatorp;
}
/* Deregister indicators from host. */
- vcdev->indicators = 0;
+ *indicators(vcdev) = 0;
ccw->flags = 0;
ret = ccw_io_helper(vcdev, ccw,
vcdev->is_thinint ?
@@ -359,8 +380,8 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
"Failed to deregister indicators (%d)\n", ret);
else if (vcdev->is_thinint)
virtio_ccw_drop_indicators(vcdev);
- kfree(indicatorp);
- kfree(thinint_area);
+ ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
}
static inline long __do_kvm_notify(struct subchannel_id schid,
@@ -407,15 +428,15 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
{
int ret;
- vcdev->config_block->index = index;
+ vcdev->dma_area->config_block.index = index;
ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
ccw->flags = 0;
ccw->count = sizeof(struct vq_config_block);
- ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
+ ccw->cda = (__u32)(unsigned long)(&vcdev->dma_area->config_block);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
if (ret)
return ret;
- return vcdev->config_block->num ?: -ENOENT;
+ return vcdev->dma_area->config_block.num ?: -ENOENT;
}
static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
@@ -460,7 +481,8 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
ret, index);
vring_del_virtqueue(vq);
- kfree(info->info_block);
+ ccw_device_dma_free(vcdev->cdev, info->info_block,
+ sizeof(*info->info_block));
kfree(info);
}
@@ -470,7 +492,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
struct ccw1 *ccw;
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
@@ -479,7 +501,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
virtio_ccw_del_vq(vq, ccw);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
@@ -502,8 +524,8 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = -ENOMEM;
goto out_err;
}
- info->info_block = kzalloc(sizeof(*info->info_block),
- GFP_DMA | GFP_KERNEL);
+ info->info_block = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*info->info_block));
if (!info->info_block) {
dev_warn(&vcdev->cdev->dev, "no info block\n");
err = -ENOMEM;
@@ -567,7 +589,8 @@ out_err:
if (vq)
vring_del_virtqueue(vq);
if (info) {
- kfree(info->info_block);
+ ccw_device_dma_free(vcdev->cdev, info->info_block,
+ sizeof(*info->info_block));
}
kfree(info);
return ERR_PTR(err);
@@ -581,7 +604,8 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
struct virtio_thinint_area *thinint_area = NULL;
struct airq_info *info;
- thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
+ thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*thinint_area));
if (!thinint_area) {
ret = -ENOMEM;
goto out;
@@ -596,7 +620,7 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
}
info = vcdev->airq_info;
thinint_area->summary_indicator =
- (unsigned long) &info->summary_indicator;
+ (unsigned long) get_summary_indicator(info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->flags = CCW_FLAG_SLI;
@@ -617,7 +641,7 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
virtio_ccw_drop_indicators(vcdev);
}
out:
- kfree(thinint_area);
+ ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
return ret;
}
@@ -633,7 +657,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
int ret, i, queue_idx = 0;
struct ccw1 *ccw;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
@@ -657,10 +681,11 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* We need a data area under 2G to communicate. Our payload is
* the address of the indicators.
*/
- indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
+ indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(indicators(vcdev)));
if (!indicatorp)
goto out;
- *indicatorp = (unsigned long) &vcdev->indicators;
+ *indicatorp = (unsigned long) indicators(vcdev);
if (vcdev->is_thinint) {
ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
if (ret)
@@ -669,32 +694,36 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
}
if (!vcdev->is_thinint) {
/* Register queue indicators with host. */
- vcdev->indicators = 0;
+ *indicators(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->flags = 0;
- ccw->count = sizeof(&vcdev->indicators);
+ ccw->count = sizeof(indicators(vcdev));
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
if (ret)
goto out;
}
/* Register indicators2 with host for config changes */
- *indicatorp = (unsigned long) &vcdev->indicators2;
- vcdev->indicators2 = 0;
+ *indicatorp = (unsigned long) indicators2(vcdev);
+ *indicators2(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_CONF_IND;
ccw->flags = 0;
- ccw->count = sizeof(&vcdev->indicators2);
+ ccw->count = sizeof(indicators2(vcdev));
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
if (ret)
goto out;
- kfree(indicatorp);
- kfree(ccw);
+ if (indicatorp)
+ ccw_device_dma_free(vcdev->cdev, indicatorp,
+ sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return 0;
out:
- kfree(indicatorp);
- kfree(ccw);
+ if (indicatorp)
+ ccw_device_dma_free(vcdev->cdev, indicatorp,
+ sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
virtio_ccw_del_vqs(vdev);
return ret;
}
@@ -704,12 +733,12 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct ccw1 *ccw;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
/* Zero status bits. */
- *vcdev->status = 0;
+ vcdev->dma_area->status = 0;
/* Send a reset ccw on device. */
ccw->cmd_code = CCW_CMD_VDEV_RESET;
@@ -717,7 +746,7 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
ccw->count = 0;
ccw->cda = 0;
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static u64 virtio_ccw_get_features(struct virtio_device *vdev)
@@ -728,11 +757,11 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
u64 rc;
struct ccw1 *ccw;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return 0;
- features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
if (!features) {
rc = 0;
goto out_free;
@@ -765,8 +794,8 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
rc |= (u64)le32_to_cpu(features->features) << 32;
out_free:
- kfree(features);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return rc;
}
@@ -791,11 +820,11 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
- features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
if (!features) {
ret = -ENOMEM;
goto out_free;
@@ -830,8 +859,8 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
out_free:
- kfree(features);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return ret;
}
@@ -845,11 +874,12 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
- config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+ config_area = ccw_device_dma_zalloc(vcdev->cdev,
+ VIRTIO_CCW_CONFIG_SIZE);
if (!config_area)
goto out_free;
@@ -871,8 +901,8 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
memcpy(buf, config_area + offset, len);
out_free:
- kfree(config_area);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static void virtio_ccw_set_config(struct virtio_device *vdev,
@@ -884,11 +914,12 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
- config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+ config_area = ccw_device_dma_zalloc(vcdev->cdev,
+ VIRTIO_CCW_CONFIG_SIZE);
if (!config_area)
goto out_free;
@@ -907,61 +938,61 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
out_free:
- kfree(config_area);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static u8 virtio_ccw_get_status(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- u8 old_status = *vcdev->status;
+ u8 old_status = vcdev->dma_area->status;
struct ccw1 *ccw;
if (vcdev->revision < 1)
- return *vcdev->status;
+ return vcdev->dma_area->status;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return old_status;
ccw->cmd_code = CCW_CMD_READ_STATUS;
ccw->flags = 0;
- ccw->count = sizeof(*vcdev->status);
- ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw->count = sizeof(vcdev->dma_area->status);
+ ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
/*
* If the channel program failed (should only happen if the device
* was hotunplugged, and then we clean up via the machine check
- * handler anyway), vcdev->status was not overwritten and we just
+ * handler anyway), vcdev->dma_area->status was not overwritten and we just
* return the old status, which is fine.
*/
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
- return *vcdev->status;
+ return vcdev->dma_area->status;
}
static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- u8 old_status = *vcdev->status;
+ u8 old_status = vcdev->dma_area->status;
struct ccw1 *ccw;
int ret;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
/* Write the status to the host. */
- *vcdev->status = status;
+ vcdev->dma_area->status = status;
ccw->cmd_code = CCW_CMD_WRITE_STATUS;
ccw->flags = 0;
ccw->count = sizeof(status);
- ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
/* Write failed? We assume status is unchanged. */
if (ret)
- *vcdev->status = old_status;
- kfree(ccw);
+ vcdev->dma_area->status = old_status;
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static const char *virtio_ccw_bus_name(struct virtio_device *vdev)
@@ -994,8 +1025,8 @@ static void virtio_ccw_release_dev(struct device *_d)
struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_ccw_device *vcdev = to_vc_device(dev);
- kfree(vcdev->status);
- kfree(vcdev->config_block);
+ ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
+ sizeof(*vcdev->dma_area));
kfree(vcdev);
}
@@ -1093,17 +1124,17 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
vcdev->err = -EIO;
}
virtio_ccw_check_activity(vcdev, activity);
- for_each_set_bit(i, &vcdev->indicators,
- sizeof(vcdev->indicators) * BITS_PER_BYTE) {
+ for_each_set_bit(i, indicators(vcdev),
+ sizeof(*indicators(vcdev)) * BITS_PER_BYTE) {
/* The bit clear must happen before the vring kick. */
- clear_bit(i, &vcdev->indicators);
+ clear_bit(i, indicators(vcdev));
barrier();
vq = virtio_ccw_vq_by_ind(vcdev, i);
vring_interrupt(0, vq);
}
- if (test_bit(0, &vcdev->indicators2)) {
+ if (test_bit(0, indicators2(vcdev))) {
virtio_config_changed(&vcdev->vdev);
- clear_bit(0, &vcdev->indicators2);
+ clear_bit(0, indicators2(vcdev));
}
}
@@ -1203,12 +1234,12 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
struct ccw1 *ccw;
int ret;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
- rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
+ rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev));
if (!rev) {
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return -ENOMEM;
}
@@ -1238,8 +1269,8 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
}
} while (ret == -EOPNOTSUPP);
- kfree(ccw);
- kfree(rev);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ ccw_device_dma_free(vcdev->cdev, rev, sizeof(*rev));
return ret;
}
@@ -1255,24 +1286,11 @@ static int virtio_ccw_online(struct ccw_device *cdev)
ret = -ENOMEM;
goto out_free;
}
-
vcdev->vdev.dev.parent = &cdev->dev;
- cdev->dev.dma_mask = &vcdev->dma_mask;
- /* we are fine with common virtio infrastructure using 64 bit DMA */
- ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64));
- if (ret) {
- dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n");
- goto out_free;
- }
-
- vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
- GFP_DMA | GFP_KERNEL);
- if (!vcdev->config_block) {
- ret = -ENOMEM;
- goto out_free;
- }
- vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
- if (!vcdev->status) {
+ vcdev->cdev = cdev;
+ vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*vcdev->dma_area));
+ if (!vcdev->dma_area) {
ret = -ENOMEM;
goto out_free;
}
@@ -1281,7 +1299,6 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->vdev.dev.release = virtio_ccw_release_dev;
vcdev->vdev.config = &virtio_ccw_config_ops;
- vcdev->cdev = cdev;
init_waitqueue_head(&vcdev->wait_q);
INIT_LIST_HEAD(&vcdev->virtqueues);
spin_lock_init(&vcdev->lock);
@@ -1312,8 +1329,8 @@ out_put:
return ret;
out_free:
if (vcdev) {
- kfree(vcdev->status);
- kfree(vcdev->config_block);
+ ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
+ sizeof(*vcdev->dma_area));
}
kfree(vcdev);
return ret;
@@ -1483,8 +1500,17 @@ static void __init no_auto_parse(void)
static int __init virtio_ccw_init(void)
{
+ int rc;
+
/* parse no_auto string before we do anything further */
no_auto_parse();
- return ccw_driver_register(&virtio_ccw_driver);
+
+ summary_indicators = cio_dma_zalloc(MAX_AIRQ_AREAS);
+ if (!summary_indicators)
+ return -ENOMEM;
+ rc = ccw_driver_register(&virtio_ccw_driver);
+ if (rc)
+ cio_dma_free(summary_indicators, MAX_AIRQ_AREAS);
+ return rc;
}
device_initcall(virtio_ccw_init);
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index ecee4b3ff073..377b07b2feeb 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -763,6 +763,7 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
struct pvscsi_adapter *adapter = shost_priv(host);
struct pvscsi_ctx *ctx;
unsigned long flags;
+ unsigned char op;
spin_lock_irqsave(&adapter->hw_lock, flags);
@@ -775,13 +776,14 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
}
cmd->scsi_done = done;
+ op = cmd->cmnd[0];
dev_dbg(&cmd->device->sdev_gendev,
- "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
+ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
- pvscsi_kick_io(adapter, cmd->cmnd[0]);
+ pvscsi_kick_io(adapter, op);
return 0;
}
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 524ecdc2a9bb..2ec355003524 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
obj-y += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-$(CONFIG_SOC_TI) += ti/
+obj-y += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
obj-y += xilinx/
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index ea0859f7b185..d7d50d48d05d 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -75,10 +75,10 @@ config TI_SCI_PM_DOMAINS
called ti_sci_pm_domains. Note this is needed early in boot before
rootfs may be available.
+endif # SOC_TI
+
config TI_SCI_INTA_MSI_DOMAIN
bool
select GENERIC_MSI_IRQ_DOMAIN
help
Driver to enable Interrupt Aggregator specific MSI Domain.
-
-endif # SOC_TI
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index b6e4862cc242..51ddca2033e0 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -81,6 +81,12 @@ out:
return CHAP_DIGEST_UNKNOWN;
}
+static void chap_close(struct iscsi_conn *conn)
+{
+ kfree(conn->auth_protocol);
+ conn->auth_protocol = NULL;
+}
+
static struct iscsi_chap *chap_server_open(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
@@ -118,7 +124,7 @@ static struct iscsi_chap *chap_server_open(
case CHAP_DIGEST_UNKNOWN:
default:
pr_err("Unsupported CHAP_A value\n");
- kfree(conn->auth_protocol);
+ chap_close(conn);
return NULL;
}
@@ -133,19 +139,13 @@ static struct iscsi_chap *chap_server_open(
* Generate Challenge.
*/
if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) {
- kfree(conn->auth_protocol);
+ chap_close(conn);
return NULL;
}
return chap;
}
-static void chap_close(struct iscsi_conn *conn)
-{
- kfree(conn->auth_protocol);
- conn->auth_protocol = NULL;
-}
-
static int chap_server_compute_md5(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index f4a075303e9a..6949ea8bc387 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -502,7 +502,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> SECTOR_SHIFT;
- sectors -= 1;
+ sectors -= sg->length >> SECTOR_SHIFT;
}
iblock_submit_bios(&list);
OpenPOWER on IntegriCloud